Merge "MTP: CPU usage high while under MTP mode."
diff --git a/apex/Android.bp b/apex/Android.bp
index 51e4c23..9455290 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -12,36 +12,58 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-apex {
- name: "com.android.media",
- manifest: "manifest.json",
- native_shared_libs: [
- // Extractor plugins
- "libaacextractor",
- "libamrextractor",
- "libflacextractor",
- "libmidiextractor",
- "libmkvextractor",
- "libmp3extractor",
- "libmp4extractor",
- "libmpeg2extractor",
- "liboggextractor",
- "libwavextractor",
- // MediaPlayer2
- "libmediaplayer2_jni",
- ],
+apex_defaults {
+ name: "com.android.media-defaults",
+ java_libs: ["updatable-media"],
+ compile_multilib: "both",
+ multilib: {
+ first: {
+ // Extractor process runs only with the primary ABI.
+ native_shared_libs: [
+ // Extractor plugins
+ "libaacextractor",
+ "libamrextractor",
+ "libflacextractor",
+ "libmidiextractor",
+ "libmkvextractor",
+ "libmp3extractor",
+ "libmp4extractor",
+ "libmpeg2extractor",
+ "liboggextractor",
+ "libwavextractor",
+ ],
+ },
+ both: {
+ native_shared_libs: [
+ // MediaPlayer2
+ "libmedia2_jni",
+ ],
+ },
+ },
key: "com.android.media.key",
+ certificate: ":com.android.media.certificate",
}
apex {
- name: "com.android.media.swcodec",
- compile_multilib: "32",
- manifest: "manifest_codec.json",
+ name: "com.android.media",
+ manifest: "manifest.json",
+ defaults: ["com.android.media-defaults"],
+}
+
+apex_defaults {
+ name: "com.android.media.swcodec-defaults",
native_shared_libs: [
"libmedia_codecserviceregistrant",
],
use_vendor: true,
key: "com.android.media.swcodec.key",
+ certificate: ":com.android.media.swcodec.certificate",
+}
+
+apex {
+ name: "com.android.media.swcodec",
+ manifest: "manifest_codec.json",
+ defaults: ["com.android.media.swcodec-defaults"],
}
apex_key {
@@ -55,3 +77,13 @@
public_key: "com.android.media.swcodec.avbpubkey",
private_key: "com.android.media.swcodec.pem",
}
+
+android_app_certificate {
+ name: "com.android.media.certificate",
+ certificate: "com.android.media",
+}
+
+android_app_certificate {
+ name: "com.android.media.swcodec.certificate",
+ certificate: "com.android.media.swcodec",
+}
diff --git a/apex/com.android.media.pk8 b/apex/com.android.media.pk8
new file mode 100644
index 0000000..6df741e
--- /dev/null
+++ b/apex/com.android.media.pk8
Binary files differ
diff --git a/apex/com.android.media.swcodec.pk8 b/apex/com.android.media.swcodec.pk8
new file mode 100644
index 0000000..05a4216
--- /dev/null
+++ b/apex/com.android.media.swcodec.pk8
Binary files differ
diff --git a/apex/com.android.media.swcodec.x509.pem b/apex/com.android.media.swcodec.x509.pem
new file mode 100644
index 0000000..67b9b4f
--- /dev/null
+++ b/apex/com.android.media.swcodec.x509.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIJAIM72JpD4v6XMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEiMCAGA1UE
+AwwZY29tLmFuZHJvaWQubWVkaWEuc3djb2RlYzAgFw0xOTAyMTEwMjExMTFaGA80
+NzU3MDEwNzAyMTExMVowgYIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9y
+bmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAw
+DgYDVQQLDAdBbmRyb2lkMSIwIAYDVQQDDBljb20uYW5kcm9pZC5tZWRpYS5zd2Nv
+ZGVjMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsqXE0AIWpLW9Tgq2
+nQGph7KZ6L2Q9oxviqCVHxIaPqfhM2SwTbycADIQeqrrlRxhddVkjLuMUkJa7mev
+fERmgpiOfnPIlGK6PTs2gljCkskZhF3bgfeyuHt0tsYO+UaN8MVoZD7/QdiE46w2
+OMDClG1UqgiqOBhLTEN/cHXObnUiiVXUYqN8aYZf6L6Fs3yQi2ZZgfbxTVFewqdv
+aLLOqCYnVYXZH+ZxbXESA0M+WXKgRKsYTj2GYs3eko1rFi4Y6uHVLx45yaoT5u/i
+SxPEkocyMCKvGJWu4XlSOd3EjSOMaqCOYVyGLxdlnQWQU7PZDqBSJ0SysWgpFHpB
+I15c2jhRdXOCfQ9ZtDfPZkE0a2A8kJDAoF1mzTp6IvBAWUsl5nHPw5CWkFpNad/h
+tqqGCScWbiKZuvrQ4/RQNm3f1K+mxX9TrjFigpqNO6d4pGAo1fa6sHR3xWPw/myq
+h5ZJjVnXU5Yq64S4xWOssfjpOg7RfNuvzuk3ok3MYs1mbx3vhZOj5km1f3qrgX9c
+mXjYnyXD0jJBm4uAJWXLdK9PlZvlXbztMCzYj832Io4pFLCtSxkzX75t1em36Nv0
+mNp6NtSSy6SFSq8l7IsXV2FNyUiyHWxS/UQm8pYg5Q5dWHvEEF78P6lV0wRa6FQl
+BBSgpqTAI092KIjDDtB7GQCgV5ECAwEAAaNTMFEwHQYDVR0OBBYEFAFIdFTDEDft
+ewSSAS7Fa3OZ5TXzMB8GA1UdIwQYMBaAFAFIdFTDEDftewSSAS7Fa3OZ5TXzMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAC5e3zXythJCGmz1FmAV
+8Y/UI+Glg6G0x/k04WaRG0DPLLjlJ1F0LM1/IReBSgXcYAL0CAgPycf/rGPOgMFm
+tQxYyjBUxKdjpIqU5DJoV1feanGveIRpto1YRKNgHuzG9rZGR4AgPnt6X4Yxlq04
+lI7QpWadXe1myARJhj3niSNY9+2wEInkx4ZuCO1LtIGqnbdc8jQ8YoVqIE5N4kuM
+ccyPYgsdABtopbjN92rueu8sfF8R6ROy+tNgb6OjpAAevtnBfZ2LXqfObKirHCK+
+k6w4WSB1UUoZ3Xgz8sJtXgokvYeInkN8tHuTagHYU2VQTcA0rdBGMN/1OljJpWlN
+0UUq4fAYU6cN4lHxr2LM9If4WvAzdLAWvaIZrDqaU4i/zYT9l6rR4lC2KW3EHWov
+nPXfgEJJ8AP1iRGibvew3i3SB6XTWFQYTUIBeJfDz/KDXQabP+yzXWISdZCUMUpx
+f+Raqsb5MoKaJdVgnSL0mBunjCyJDzzg34J7oGx6/BnwoiOrwLN4Qaz5U8jbrPSx
+p9LfleCcO7ZdeE8GKqx0X1T4d7tradtmxOS8Iwr4niskkHGRkzozvVvuyGKmoN2k
+162Vfjq+ddj7qEpSh3BS6hHU+vlMbC9L0trGxPxFEAHDrwu0KwGNduTkiu/3jvfB
+JTgH8P9mD1loYxRdo+vet8eQ
+-----END CERTIFICATE-----
diff --git a/apex/com.android.media.x509.pem b/apex/com.android.media.x509.pem
new file mode 100644
index 0000000..e7908fa
--- /dev/null
+++ b/apex/com.android.media.x509.pem
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFzDCCA7SgAwIBAgIJAO05DBBusaaLMA0GCSqGSIb3DQEBCwUAMHoxCzAJBgNV
+BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBW
+aWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRowGAYDVQQD
+DBFjb20uYW5kcm9pZC5tZWRpYTAgFw0xOTAxMjUxNzE3MTdaGA80NzU2MTIyMTE3
+MTcxN1owejELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNV
+BAcMDU1vdW50YWluIFZpZXcxEDAOBgNVBAoMB0FuZHJvaWQxEDAOBgNVBAsMB0Fu
+ZHJvaWQxGjAYBgNVBAMMEWNvbS5hbmRyb2lkLm1lZGlhMIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAmNkVxUbp/bLbeGbvKqYXzwBycSDpmOhh///lNGYQ
+/AMUD0q6EaZzU2bd4aL0rOGqfoYlhKd0kMVmMUmfdE9ODAfKxleEeEaRl2GJS8a9
+ABi770l3GHbB2xMI2sEWeOD9xsPFF6+ByPZmoUuNhMr4pUbXsDpE3h8ljrgXHtIg
+bh7ofbvddruwBV0lS1k9OZ9jPVGhEKkJnhgQa67cwgdjizAMbI0Dcz9gtMMawsDj
+Z2aQd1r+vxgh1/XkI/NMmXCnG2ERytXcJeC5S4gEtHfTTPoP0FuVgSB6y6dalMuZ
+F0NBZw8Mvgdy3QJip0uNa36J63CMZKTJWbTdlFpPL2hk0PgaYvje8C5Xtk5282wT
+dMocc8n2zIXbzbnSXGvjcNZib3Pfu55YUnX6eTqZ1BxlJ0FHZAsC4quFFWXxYBYD
+LCRoNNFEtIDQpuvuHF2DuHNDULpAQjy2y6+7eot0KEsVoDmZ4H8BpuAVVu2SxYNb
+gYflR9SmM0tmYeAcRT48q3xrocGyEHMqvgQRUpPfvct/8l8xVcDzOI/sJVDqmYzM
+u0Cj3fkSypGDJOMF/esFSmVvoI01tS7kaNS5vvtKYib//xqKRC9f0dCsGfFLnuUK
+o4KYbYWYwMyJqEd/5/ZvXyKIPAEeJL174L9+wTkc3cQpoBwJN4t+2E5MnhOEq6do
+5L0CAwEAAaNTMFEwHQYDVR0OBBYEFHjNK/GZko1RdZp+8iavWXL5xz9wMB8GA1Ud
+IwQYMBaAFHjNK/GZko1RdZp+8iavWXL5xz9wMA8GA1UdEwEB/wQFMAMBAf8wDQYJ
+KoZIhvcNAQELBQADggIBACmPQMksuLrNV1vbI44S1f70I0FHdBxchFGB39zuLbcn
+SsYom/LPtYJiD0Dl4bB4eb+ZnxkQP2XeP6pycmUH2j1EWexFwvdUvlfe8Qz+wAec
+ap4AxiX4Z2Ke2ivYotIZFUHdZOLkX20js8Wex1mzY43MLQn5APl9gK1VZTxDggeR
+EObH1S+JVjGwQqYZj2e6gNZH34Q25NQ698RL85GDkYtSISAifJtaJsU/B3vKm82I
+k9xMiCooCH6bRdGHG1jze4SRpidjxEm8cxkiaQagfcuXeCLziXJr3qAMKYiEY6bp
+0+bAqCt3S8OrrN3RQZfQrnlwitsM1jJJ/+C+WoDg4eY5AFrXDLvNeKh1qO/f8xv+
+fCXkQPcVVphLfRH9oxNrSgOWBP5/qIDH4s1YUL9luGT6H+08dlue3RkbzDbBqsQu
+7fQ/BbrIG/GuVKgyEM+a7C9gv7zc86YlueVYJEyxKidnn7RxOqyDBqyyfXA3zvme
+Rro7xIrMHPL7Nu3AWjwjXzbp/w0z+tEFPsfVB+OOHKsWPcUG0HUTJGkyeO/uHRjN
+qPEkkf7BHHUO4V2gjOIdCsELxKwHf7vsZTOk40EV751fZ7FDHMr1eddQkgH4eqAb
+DB79uP+SLfUo+42n4q6eMmoqw8d76bBXRoUhIo/Ms4sebhV0sRtAS67OQioc9UUg
+-----END CERTIFICATE-----
diff --git a/apex/testing/Android.bp b/apex/testing/Android.bp
new file mode 100644
index 0000000..701ced7
--- /dev/null
+++ b/apex/testing/Android.bp
@@ -0,0 +1,29 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+apex {
+ name: "test_com.android.media",
+ manifest: "test_manifest.json",
+ file_contexts: "com.android.media",
+ defaults: ["com.android.media-defaults"],
+ installable: false,
+}
+
+apex {
+ name: "test_com.android.media.swcodec",
+ manifest: "test_manifest_codec.json",
+ file_contexts: "com.android.media.swcodec",
+ defaults: ["com.android.media.swcodec-defaults"],
+ installable: false,
+}
diff --git a/apex/testing/test_manifest.json b/apex/testing/test_manifest.json
new file mode 100644
index 0000000..9f81f9f
--- /dev/null
+++ b/apex/testing/test_manifest.json
@@ -0,0 +1,4 @@
+{
+ "name": "com.android.media",
+ "version": 2
+}
diff --git a/apex/testing/test_manifest_codec.json b/apex/testing/test_manifest_codec.json
new file mode 100644
index 0000000..c956454
--- /dev/null
+++ b/apex/testing/test_manifest_codec.json
@@ -0,0 +1,4 @@
+{
+ "name": "com.android.media.swcodec",
+ "version": 2
+}
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index c038314..0e969c7 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -162,6 +162,28 @@
* Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
*/
const int EVENT_NONE = 0;
- const int EVENT_USER_SWITCHED = 1;
+ const int EVENT_USER_SWITCHED = 1; // The argument is the set of new foreground user IDs.
oneway void notifySystemEvent(int eventId, in int[] args);
+
+ /**
+ * Notify the camera service of a device physical status change. May only be called from
+ * a privileged process.
+ *
+ * newState is a bitfield consisting of DEVICE_STATE_* values combined together. Valid state
+ * combinations are device-specific. At device startup, the camera service will assume the device
+ * state is NORMAL until otherwise notified.
+ *
+ * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+ */
+ oneway void notifyDeviceStateChange(long newState);
+
+ // Bitfield constants for notifyDeviceStateChange
+ // All bits >= 32 are for custom vendor states
+ // Written as ints since AIDL does not support long constants.
+ const int DEVICE_STATE_NORMAL = 0;
+ const int DEVICE_STATE_BACK_COVERED = 1;
+ const int DEVICE_STATE_FRONT_COVERED = 2;
+ const int DEVICE_STATE_FOLDED = 4;
+ const int DEVICE_STATE_LAST_FRAMEWORK_BIT = 0x80000000; // 1 << 31;
+
}
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index b88a2c5..92b06c2 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -27,6 +27,7 @@
"libhidltransport",
"android.hardware.camera.common@1.0",
"android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.5",
"android.hardware.camera.device@1.0",
"android.hardware.camera.device@3.2",
"android.hardware.camera.device@3.4",
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index c661233..359eaed 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -36,6 +36,10 @@
filterDurations(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
filterDurations(ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS);
filterDurations(ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS);
+ filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS);
+ filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS);
+ filterDurations(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS);
+ filterDurations(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
}
// TODO: filter request/result keys
}
@@ -104,7 +108,8 @@
for (size_t i = 0; i < entry.count; ++i) {
if (ids[i] == '\0') {
if (start != i) {
- mStaticPhysicalCameraIds.push_back((const char*)ids+start);
+ mStaticPhysicalCameraIdValues.push_back(String8((const char *)ids+start));
+ mStaticPhysicalCameraIds.push_back(mStaticPhysicalCameraIdValues.back().string());
}
start = i+1;
}
@@ -173,6 +178,26 @@
filteredDurations.push_back(duration);
}
break;
+ case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS:
+ case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS:
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_HEIC;
+ filteredDurations.push_back(format);
+ filteredDurations.push_back(width);
+ filteredDurations.push_back(height);
+ filteredDurations.push_back(duration);
+ }
+ break;
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_DEPTH_JPEG;
+ filteredDurations.push_back(format);
+ filteredDurations.push_back(width);
+ filteredDurations.push_back(height);
+ filteredDurations.push_back(duration);
+ }
+ break;
default:
// Should not reach here
ALOGE("%s: Unkown tag 0x%x", __FUNCTION__, tag);
@@ -246,6 +271,57 @@
filteredDepthStreamConfigs.push_back(isInput);
}
mData.update(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, filteredDepthStreamConfigs);
+
+ entry = mData.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS);
+ Vector<int32_t> filteredHeicStreamConfigs;
+ filteredHeicStreamConfigs.setCapacity(entry.count);
+
+ for (size_t i=0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+ int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ if (isInput == ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT) {
+ // Hide input streams
+ continue;
+ }
+ // Translate HAL formats to NDK format
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_HEIC;
+ }
+
+ filteredHeicStreamConfigs.push_back(format);
+ filteredHeicStreamConfigs.push_back(width);
+ filteredHeicStreamConfigs.push_back(height);
+ filteredHeicStreamConfigs.push_back(isInput);
+ }
+ mData.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS, filteredHeicStreamConfigs);
+
+ entry = mData.find(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
+ Vector<int32_t> filteredDynamicDepthStreamConfigs;
+ filteredDynamicDepthStreamConfigs.setCapacity(entry.count);
+
+ for (size_t i = 0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+ int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ if (isInput == ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT) {
+ // Hide input streams
+ continue;
+ }
+ // Translate HAL formats to NDK format
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_DEPTH_JPEG;
+ }
+
+ filteredDynamicDepthStreamConfigs.push_back(format);
+ filteredDynamicDepthStreamConfigs.push_back(width);
+ filteredDynamicDepthStreamConfigs.push_back(height);
+ filteredDynamicDepthStreamConfigs.push_back(isInput);
+ }
+ mData.update(ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
+ filteredDynamicDepthStreamConfigs);
}
bool
@@ -484,6 +560,8 @@
ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION,
ANDROID_DEPTH_MAX_DEPTH_SAMPLES,
+ ANDROID_HEIC_INFO_SUPPORTED,
+ ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT,
});
/*~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index 7049c4b..3d895cb 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -117,6 +117,7 @@
static std::unordered_set<uint32_t> sSystemTags;
std::vector<const char*> mStaticPhysicalCameraIds;
+ std::vector<String8> mStaticPhysicalCameraIdValues;
};
#endif // _ACAMERA_METADATA_H
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 4bb74cb..8c19e1d 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -71,6 +71,8 @@
ACAMERA_DEPTH,
ACAMERA_LOGICAL_MULTI_CAMERA,
ACAMERA_DISTORTION_CORRECTION,
+ ACAMERA_HEIC,
+ ACAMERA_HEIC_INFO,
ACAMERA_SECTION_COUNT,
ACAMERA_VENDOR = 0x8000
@@ -112,6 +114,8 @@
ACAMERA_DISTORTION_CORRECTION_START
= ACAMERA_DISTORTION_CORRECTION
<< 16,
+ ACAMERA_HEIC_START = ACAMERA_HEIC << 16,
+ ACAMERA_HEIC_INFO_START = ACAMERA_HEIC_INFO << 16,
ACAMERA_VENDOR_START = ACAMERA_VENDOR << 16
} acamera_metadata_section_start_t;
@@ -1912,6 +1916,7 @@
* <li>ACaptureRequest</li>
* </ul></p>
*
+ * <p>This tag is also used for HEIC image capture.</p>
*/
ACAMERA_JPEG_GPS_COORDINATES = // double[3]
ACAMERA_JPEG_START,
@@ -1927,6 +1932,7 @@
* <li>ACaptureRequest</li>
* </ul></p>
*
+ * <p>This tag is also used for HEIC image capture.</p>
*/
ACAMERA_JPEG_GPS_PROCESSING_METHOD = // byte
ACAMERA_JPEG_START + 1,
@@ -1942,6 +1948,7 @@
* <li>ACaptureRequest</li>
* </ul></p>
*
+ * <p>This tag is also used for HEIC image capture.</p>
*/
ACAMERA_JPEG_GPS_TIMESTAMP = // int64
ACAMERA_JPEG_START + 2,
@@ -1986,6 +1993,10 @@
* </code></pre>
* <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
* also be set to EXTERNAL. The above code is not relevant in such case.</p>
+ * <p>This tag is also used to describe the orientation of the HEIC image capture, in which
+ * case the rotation is reflected by
+ * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>, and not by
+ * rotating the image data itself.</p>
*
* @see ACAMERA_SENSOR_ORIENTATION
*/
@@ -2003,7 +2014,8 @@
* <li>ACaptureRequest</li>
* </ul></p>
*
- * <p>85-95 is typical usage range.</p>
+ * <p>85-95 is typical usage range. This tag is also used to describe the quality
+ * of the HEIC image capture.</p>
*/
ACAMERA_JPEG_QUALITY = // byte
ACAMERA_JPEG_START + 4,
@@ -2019,6 +2031,7 @@
* <li>ACaptureRequest</li>
* </ul></p>
*
+ * <p>This tag is also used to describe the quality of the HEIC image capture.</p>
*/
ACAMERA_JPEG_THUMBNAIL_QUALITY = // byte
ACAMERA_JPEG_START + 5,
@@ -2055,6 +2068,10 @@
* orientation is requested. LEGACY device will always report unrotated thumbnail
* size.</li>
* </ul>
+ * <p>The tag is also used as thumbnail size for HEIC image format capture, in which case the
+ * the thumbnail rotation is reflected by
+ * <a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>, and not by
+ * rotating the thumbnail data itself.</p>
*
* @see ACAMERA_JPEG_ORIENTATION
*/
@@ -2088,6 +2105,7 @@
* and vice versa.</li>
* <li>All non-<code>(0, 0)</code> sizes will have non-zero widths and heights.</li>
* </ul>
+ * <p>This list is also used as supported thumbnail sizes for HEIC image format capture.</p>
*
* @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
*/
@@ -5549,6 +5567,73 @@
ACAMERA_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS =
// int32[n*5]
ACAMERA_DEPTH_START + 5,
+ /**
+ * <p>The available dynamic depth dataspace stream
+ * configurations that this camera device supports
+ * (i.e. format, width, height, output/input stream).</p>
+ *
+ * <p>Type: int32[n*4] (acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>These are output stream configurations for use with
+ * dataSpace DYNAMIC_DEPTH. The configurations are
+ * listed as <code>(format, width, height, input?)</code> tuples.</p>
+ * <p>Only devices that support depth output for at least
+ * the HAL_PIXEL_FORMAT_Y16 dense depth map along with
+ * HAL_PIXEL_FORMAT_BLOB with the same size or size with
+ * the same aspect ratio can have dynamic depth dataspace
+ * stream configuration. ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE also
+ * needs to be set to FALSE.</p>
+ *
+ * @see ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS =
+ // int32[n*4] (acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t)
+ ACAMERA_DEPTH_START + 6,
+ /**
+ * <p>This lists the minimum frame duration for each
+ * format/size combination for dynamic depth output streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This should correspond to the frame duration when only that
+ * stream is active, with all processing (typically in android.*.mode)
+ * set to either OFF or FAST.</p>
+ * <p>When multiple streams are used in a request, the minimum frame
+ * duration will be max(individual stream min durations).</p>
+ * <p>The minimum frame duration of a stream (of a particular format, size)
+ * is the same regardless of whether the stream is input or output.</p>
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 7,
+ /**
+ * <p>This lists the maximum stall duration for each
+ * output format/size combination for dynamic depth streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>A stall duration is how much extra time would get added
+ * to the normal minimum frame duration for a repeating request
+ * that has streams with non-zero stall.</p>
+ * <p>All dynamic depth output streams may have a nonzero stall
+ * duration.</p>
+ */
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 8,
ACAMERA_DEPTH_END,
/**
@@ -5561,12 +5646,12 @@
* <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
* </ul></p>
*
- * <p>For a logical camera, this is concatenation of all underlying physical camera ids.
- * The null terminator for physical camera id must be preserved so that the whole string
- * can be tokenized using '\0' to generate list of physical camera ids.</p>
- * <p>For example, if the physical camera ids of the logical camera are "2" and "3", the
+ * <p>For a logical camera, this is concatenation of all underlying physical camera IDs.
+ * The null terminator for physical camera ID must be preserved so that the whole string
+ * can be tokenized using '\0' to generate list of physical camera IDs.</p>
+ * <p>For example, if the physical camera IDs of the logical camera are "2" and "3", the
* value of this tag will be ['2', '\0', '3', '\0'].</p>
- * <p>The number of physical camera ids must be no less than 2.</p>
+ * <p>The number of physical camera IDs must be no less than 2.</p>
*/
ACAMERA_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS = // byte[n]
ACAMERA_LOGICAL_MULTI_CAMERA_START,
@@ -5591,6 +5676,28 @@
*/
ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE = // byte (acamera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t)
ACAMERA_LOGICAL_MULTI_CAMERA_START + 1,
+ /**
+ * <p>String containing the ID of the underlying active physical camera.</p>
+ *
+ * <p>Type: byte</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * </ul></p>
+ *
+ * <p>The ID of the active physical camera that's backing the logical camera. All camera
+ * streams and metadata that are not physical camera specific will be originating from this
+ * physical camera. This must be one of valid physical IDs advertised in the physicalIds
+ * static tag.</p>
+ * <p>For a logical camera made up of physical cameras where each camera's lenses have
+ * different characteristics, the camera device may choose to switch between the physical
+ * cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
+ * At the time of lens switch, this result metadata reflects the new active physical camera
+ * ID.</p>
+ */
+ ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID = // byte
+ ACAMERA_LOGICAL_MULTI_CAMERA_START + 2,
ACAMERA_LOGICAL_MULTI_CAMERA_END,
/**
@@ -5668,6 +5775,80 @@
ACAMERA_DISTORTION_CORRECTION_START + 1,
ACAMERA_DISTORTION_CORRECTION_END,
+ /**
+ * <p>The available HEIC (ISO/IEC 23008-12) stream
+ * configurations that this camera device supports
+ * (i.e. format, width, height, output/input stream).</p>
+ *
+ * <p>Type: int32[n*4] (acamera_metadata_enum_android_heic_available_heic_stream_configurations_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>The configurations are listed as <code>(format, width, height, input?)</code> tuples.</p>
+ * <p>If the camera device supports HEIC image format, it will support identical set of stream
+ * combinations involving HEIC image format, compared to the combinations involving JPEG
+ * image format as required by the device's hardware level and capabilities.</p>
+ * <p>All the static, control, and dynamic metadata tags related to JPEG apply to HEIC formats.
+ * Configuring JPEG and HEIC streams at the same time is not supported.</p>
+ * <p>All the configuration tuples <code>(format, width, height, input?)</code> will contain
+ * AIMAGE_FORMAT_HEIC format as OUTPUT only.</p>
+ */
+ ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS = // int32[n*4] (acamera_metadata_enum_android_heic_available_heic_stream_configurations_t)
+ ACAMERA_HEIC_START,
+ /**
+ * <p>This lists the minimum frame duration for each
+ * format/size combination for HEIC output formats.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This should correspond to the frame duration when only that
+ * stream is active, with all processing (typically in android.*.mode)
+ * set to either OFF or FAST.</p>
+ * <p>When multiple streams are used in a request, the minimum frame
+ * duration will be max(individual stream min durations).</p>
+ * <p>See ACAMERA_SENSOR_FRAME_DURATION and
+ * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for more details about
+ * calculating the max frame rate.</p>
+ *
+ * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+ * @see ACAMERA_SENSOR_FRAME_DURATION
+ */
+ ACAMERA_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_HEIC_START + 1,
+ /**
+ * <p>This lists the maximum stall duration for each
+ * output format/size combination for HEIC streams.</p>
+ *
+ * <p>Type: int64[4*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>A stall duration is how much extra time would get added
+ * to the normal minimum frame duration for a repeating request
+ * that has streams with non-zero stall.</p>
+ * <p>This functions similarly to
+ * ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS for HEIC
+ * streams.</p>
+ * <p>All HEIC output stream formats may have a nonzero stall
+ * duration.</p>
+ *
+ * @see ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS
+ */
+ ACAMERA_HEIC_AVAILABLE_HEIC_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_HEIC_START + 2,
+ ACAMERA_HEIC_END,
+
} acamera_metadata_tag_t;
/**
@@ -7162,6 +7343,10 @@
* <p>If this is supported, android.scaler.streamConfigurationMap will
* additionally return a min frame duration that is greater than
* zero for each supported size-format combination.</p>
+ * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when the underlying active
+ * physical camera switches, exposureTime, sensitivity, and lens properties may change
+ * even if AE/AF is locked. However, the overall auto exposure and auto focus experience
+ * for users will be consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
*
* @see ACAMERA_BLACK_LEVEL_LOCK
* @see ACAMERA_CONTROL_AE_LOCK
@@ -7217,6 +7402,10 @@
* will accurately report the values applied by AWB in the result.</p>
* <p>A given camera device may also support additional post-processing
* controls, but this capability only covers the above list of controls.</p>
+ * <p>For camera devices with LOGICAL_MULTI_CAMERA capability, when underlying active
+ * physical camera switches, tonemap, white balance, and shading map may change even if
+ * awb is locked. However, the overall post-processing experience for users will be
+ * consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.</p>
*
* @see ACAMERA_COLOR_CORRECTION_ABERRATION_MODE
* @see ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES
@@ -7396,7 +7585,7 @@
* </li>
* <li>The SENSOR_INFO_TIMESTAMP_SOURCE of the logical device and physical devices must be
* the same.</li>
- * <li>The logical camera device must be LIMITED or higher device.</li>
+ * <li>The logical camera must be LIMITED or higher device.</li>
* </ul>
* <p>Both the logical camera device and its underlying physical devices support the
* mandatory stream combinations required for their device levels.</p>
@@ -7416,13 +7605,84 @@
* <p>Using physical streams in place of a logical stream of the same size and format will
* not slow down the frame rate of the capture, as long as the minimum frame duration
* of the physical and logical streams are the same.</p>
+ * <p>A logical camera device's dynamic metadata may contain
+ * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID to notify the application of the current
+ * active physical camera Id. An active physical camera is the physical camera from which
+ * the logical camera's main image data outputs (YUV or RAW) and metadata come from.
+ * In addition, this serves as an indication which physical camera is used to output to
+ * a RAW stream, or in case only physical cameras support RAW, which physical RAW stream
+ * the application should request.</p>
+ * <p>Logical camera's static metadata tags below describe the default active physical
+ * camera. An active physical camera is default if it's used when application directly
+ * uses requests built from a template. All templates will default to the same active
+ * physical camera.</p>
+ * <ul>
+ * <li>ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE</li>
+ * <li>ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT</li>
+ * <li>ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE</li>
+ * <li>ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION</li>
+ * <li>ACAMERA_SENSOR_INFO_PHYSICAL_SIZE</li>
+ * <li>ACAMERA_SENSOR_INFO_WHITE_LEVEL</li>
+ * <li>ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED</li>
+ * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT1</li>
+ * <li>ACAMERA_SENSOR_REFERENCE_ILLUMINANT2</li>
+ * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM1</li>
+ * <li>ACAMERA_SENSOR_CALIBRATION_TRANSFORM2</li>
+ * <li>ACAMERA_SENSOR_COLOR_TRANSFORM1</li>
+ * <li>ACAMERA_SENSOR_COLOR_TRANSFORM2</li>
+ * <li>ACAMERA_SENSOR_FORWARD_MATRIX1</li>
+ * <li>ACAMERA_SENSOR_FORWARD_MATRIX2</li>
+ * <li>ACAMERA_SENSOR_BLACK_LEVEL_PATTERN</li>
+ * <li>ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY</li>
+ * <li>ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS</li>
+ * <li>ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES</li>
+ * <li>ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE</li>
+ * <li>ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE</li>
+ * <li>ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION</li>
+ * <li>ACAMERA_LENS_POSE_ROTATION</li>
+ * <li>ACAMERA_LENS_POSE_TRANSLATION</li>
+ * <li>ACAMERA_LENS_INTRINSIC_CALIBRATION</li>
+ * <li>ACAMERA_LENS_POSE_REFERENCE</li>
+ * <li>ACAMERA_LENS_DISTORTION</li>
+ * </ul>
+ * <p>To maintain backward compatibility, the capture request and result metadata tags
+ * required for basic camera functionalities will be solely based on the
+ * logical camera capabiltity. Other request and result metadata tags, on the other
+ * hand, will be based on current active physical camera. For example, the physical
+ * cameras' sensor sensitivity and lens capability could be different from each other.
+ * So when the application manually controls sensor exposure time/gain, or does manual
+ * focus control, it must checks the current active physical camera's exposure, gain,
+ * and focus distance range.</p>
*
* @see ACAMERA_LENS_DISTORTION
+ * @see ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
+ * @see ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE
+ * @see ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE
* @see ACAMERA_LENS_INTRINSIC_CALIBRATION
* @see ACAMERA_LENS_POSE_REFERENCE
* @see ACAMERA_LENS_POSE_ROTATION
* @see ACAMERA_LENS_POSE_TRANSLATION
+ * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
* @see ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
+ * @see ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES
+ * @see ACAMERA_SENSOR_BLACK_LEVEL_PATTERN
+ * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM1
+ * @see ACAMERA_SENSOR_CALIBRATION_TRANSFORM2
+ * @see ACAMERA_SENSOR_COLOR_TRANSFORM1
+ * @see ACAMERA_SENSOR_COLOR_TRANSFORM2
+ * @see ACAMERA_SENSOR_FORWARD_MATRIX1
+ * @see ACAMERA_SENSOR_FORWARD_MATRIX2
+ * @see ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
+ * @see ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE
+ * @see ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED
+ * @see ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION
+ * @see ACAMERA_SENSOR_INFO_PHYSICAL_SIZE
+ * @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
+ * @see ACAMERA_SENSOR_INFO_WHITE_LEVEL
+ * @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+ * @see ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS
+ * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
+ * @see ACAMERA_SENSOR_REFERENCE_ILLUMINANT2
*/
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA = 11,
@@ -7440,6 +7700,13 @@
*/
ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME = 12,
+ /**
+ * <p>The camera device is capable of writing image data into a region of memory
+ * inaccessible to Android userspace or the Android kernel, and only accessible to
+ * trusted execution environments (TEE).</p>
+ */
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA = 13,
+
} acamera_metadata_enum_android_request_available_capabilities_t;
@@ -8145,6 +8412,16 @@
} acamera_metadata_enum_android_depth_depth_is_exclusive_t;
+// ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_depth_available_dynamic_depth_stream_configurations {
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_OUTPUT
+ = 0,
+
+ ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT
+ = 1,
+
+} acamera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t;
+
// ACAMERA_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
typedef enum acamera_metadata_enum_acamera_logical_multi_camera_sensor_sync_type {
@@ -8188,6 +8465,16 @@
} acamera_metadata_enum_android_distortion_correction_mode_t;
+// ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_heic_available_heic_stream_configurations {
+ ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_OUTPUT = 0,
+
+ ACAMERA_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT = 1,
+
+} acamera_metadata_enum_android_heic_available_heic_stream_configurations_t;
+
+
+
#endif /* __ANDROID_API__ >= 24 */
__END_DECLS
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index f7863a5..9aafcd3 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -289,7 +289,7 @@
}
camera_status_t
-CameraDevice::allocateCaptureRequest(
+CameraDevice::allocateCaptureRequestLocked(
const ACaptureRequest* request, /*out*/sp<CaptureRequest> &outReq) {
sp<CaptureRequest> req(new CaptureRequest());
req->mCaptureRequest.physicalCameraSettings.resize(1);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index c63b97f..d571585 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -169,7 +169,12 @@
camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
- camera_status_t allocateCaptureRequest(
+ // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
+ // a) This function is called serially.
+ // b) This function is called in accordance with ICameraDeviceUser.submitRequestList,
+ // otherwise, the wrong capture request might have the wrong settings
+ // metadata associated with it.
+ camera_status_t allocateCaptureRequestLocked(
const ACaptureRequest* request, sp<CaptureRequest>& outReq);
static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
index 7d2304e..8bd5a52 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
+++ b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
@@ -73,7 +73,7 @@
requestsV.setCapacity(numRequests);
for (int i = 0; i < numRequests; i++) {
sp<CaptureRequest> req;
- ret = allocateCaptureRequest(requests[i], req);
+ ret = allocateCaptureRequestLocked(requests[i], req);
// We need to call this method since after submitRequestList is called,
// the request metadata queue might have removed the capture request
// metadata. Therefore we simply add the metadata to its wrapper class,
diff --git a/camera/ndk/ndk_vendor/impl/utils.cpp b/camera/ndk/ndk_vendor/impl/utils.cpp
index 7193006..5d2d47c 100644
--- a/camera/ndk/ndk_vendor/impl/utils.cpp
+++ b/camera/ndk/ndk_vendor/impl/utils.cpp
@@ -70,7 +70,6 @@
return;
}
size_t size = get_camera_metadata_size(src);
- ALOGE("Converting metadata size: %d", (int)size);
dst->setToExternal((uint8_t *) src, size);
return;
}
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
new file mode 100644
index 0000000..86476cd
--- /dev/null
+++ b/cmds/screenrecord/Android.bp
@@ -0,0 +1,55 @@
+// Copyright 2013 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_binary {
+ name: "screenrecord",
+
+ srcs: [
+ "screenrecord.cpp",
+ "EglWindow.cpp",
+ "FrameOutput.cpp",
+ "TextRenderer.cpp",
+ "Overlay.cpp",
+ "Program.cpp",
+ ],
+
+ shared_libs: [
+ "libstagefright",
+ "libmedia",
+ "libmedia_omx",
+ "libutils",
+ "libbinder",
+ "libstagefright_foundation",
+ "libjpeg",
+ "libui",
+ "libgui",
+ "libcutils",
+ "liblog",
+ "libEGL",
+ "libGLESv2",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright",
+ "frameworks/av/media/libstagefright/include",
+ "frameworks/native/include/media/openmax",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wno-multichar",
+ //"-UNDEBUG",
+ ]
+}
diff --git a/cmds/screenrecord/Android.mk b/cmds/screenrecord/Android.mk
deleted file mode 100644
index 5e83ed6..0000000
--- a/cmds/screenrecord/Android.mk
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- screenrecord.cpp \
- EglWindow.cpp \
- FrameOutput.cpp \
- TextRenderer.cpp \
- Overlay.cpp \
- Program.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libmedia_omx libutils libbinder libstagefright_foundation \
- libjpeg libui libgui libcutils liblog libEGL libGLESv2
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libstagefright \
- frameworks/av/media/libstagefright/include \
- frameworks/native/include/media/openmax \
- external/jpeg
-
-LOCAL_CFLAGS := -Werror -Wall
-LOCAL_CFLAGS += -Wno-multichar
-#LOCAL_CFLAGS += -UNDEBUG
-
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_MODULE:= screenrecord
-
-include $(BUILD_EXECUTABLE)
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 7803ccc..c361690 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -86,6 +86,7 @@
using android::INFO_FORMAT_CHANGED;
using android::INFO_OUTPUT_BUFFERS_CHANGED;
using android::INVALID_OPERATION;
+using android::NAME_NOT_FOUND;
using android::NO_ERROR;
using android::UNKNOWN_ERROR;
@@ -585,8 +586,12 @@
self->startThreadPool();
// Get main display parameters.
- sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
- ISurfaceComposer::eDisplayIdMain);
+ const sp<IBinder> mainDpy = SurfaceComposerClient::getInternalDisplayToken();
+ if (mainDpy == nullptr) {
+ fprintf(stderr, "ERROR: no display\n");
+ return NAME_NOT_FOUND;
+ }
+
DisplayInfo mainDpyInfo;
err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
if (err != NO_ERROR) {
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index a463ec5..e5a4337 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -411,10 +411,12 @@
composerClient = new SurfaceComposerClient;
CHECK_EQ(composerClient->initCheck(), (status_t)OK);
- sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
- ISurfaceComposer::eDisplayIdMain));
+ const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+ CHECK(display != nullptr);
+
DisplayInfo info;
- SurfaceComposerClient::getDisplayInfo(display, &info);
+ CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
ssize_t displayWidth = info.w;
ssize_t displayHeight = info.h;
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index f0ee0e1..2cf6955 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -748,10 +748,12 @@
composerClient = new SurfaceComposerClient;
CHECK_EQ((status_t)OK, composerClient->initCheck());
- android::sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
- ISurfaceComposer::eDisplayIdMain));
+ const android::sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+ CHECK(display != nullptr);
+
DisplayInfo info;
- SurfaceComposerClient::getDisplayInfo(display, &info);
+ CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
ssize_t displayWidth = info.w;
ssize_t displayHeight = info.h;
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 34a9a40..bf36be0 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -46,6 +46,7 @@
#include <media/stagefright/JPEGSource.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -628,7 +629,7 @@
fprintf(stderr, " -l(ist) components\n");
fprintf(stderr, " -m max-number-of-frames-to-decode in each pass\n");
fprintf(stderr, " -b bug to reproduce\n");
- fprintf(stderr, " -p(rofiles) dump decoder profiles supported\n");
+ fprintf(stderr, " -i(nfo) dump codec info (profiles and color formats supported, details)\n");
fprintf(stderr, " -t(humbnail) extract video thumbnail or album art\n");
fprintf(stderr, " -s(oftware) prefer software codec\n");
fprintf(stderr, " -r(hardware) force to use hardware codec\n");
@@ -646,55 +647,131 @@
fprintf(stderr, " -v be more verbose\n");
}
-static void dumpCodecProfiles(bool queryDecoders) {
- const char *kMimeTypes[] = {
- MEDIA_MIMETYPE_VIDEO_AVC, MEDIA_MIMETYPE_VIDEO_MPEG4,
- MEDIA_MIMETYPE_VIDEO_H263, MEDIA_MIMETYPE_AUDIO_AAC,
- MEDIA_MIMETYPE_AUDIO_AMR_NB, MEDIA_MIMETYPE_AUDIO_AMR_WB,
- MEDIA_MIMETYPE_AUDIO_MPEG, MEDIA_MIMETYPE_AUDIO_G711_MLAW,
- MEDIA_MIMETYPE_AUDIO_G711_ALAW, MEDIA_MIMETYPE_AUDIO_VORBIS,
- MEDIA_MIMETYPE_VIDEO_VP8, MEDIA_MIMETYPE_VIDEO_VP9,
- MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, MEDIA_MIMETYPE_VIDEO_HEVC,
- MEDIA_MIMETYPE_AUDIO_EAC3, MEDIA_MIMETYPE_AUDIO_AC4,
- MEDIA_MIMETYPE_VIDEO_AV1
- };
-
- const char *codecType = queryDecoders? "decoder" : "encoder";
- printf("%s profiles:\n", codecType);
+static void dumpCodecDetails(bool queryDecoders) {
+ const char *codecType = queryDecoders? "Decoder" : "Encoder";
+ printf("\n%s infos by media types:\n"
+ "=============================\n", codecType);
sp<IMediaCodecList> list = MediaCodecList::getInstance();
size_t numCodecs = list->countCodecs();
- for (size_t k = 0; k < sizeof(kMimeTypes) / sizeof(kMimeTypes[0]); ++k) {
- printf("type '%s':\n", kMimeTypes[k]);
+ // gather all media types supported by codec class, and link to codecs that support them
+ KeyedVector<AString, Vector<sp<MediaCodecInfo>>> allMediaTypes;
+ for (size_t codec_ix = 0; codec_ix < numCodecs; ++codec_ix) {
+ sp<MediaCodecInfo> info = list->getCodecInfo(codec_ix);
+ if (info->isEncoder() == !queryDecoders) {
+ Vector<AString> supportedMediaTypes;
+ info->getSupportedMediaTypes(&supportedMediaTypes);
+ if (!supportedMediaTypes.size()) {
+ printf("warning: %s does not support any media types\n",
+ info->getCodecName());
+ } else {
+ for (const AString &mediaType : supportedMediaTypes) {
+ if (allMediaTypes.indexOfKey(mediaType) < 0) {
+ allMediaTypes.add(mediaType, Vector<sp<MediaCodecInfo>>());
+ }
+ allMediaTypes.editValueFor(mediaType).add(info);
+ }
+ }
+ }
+ }
- for (size_t index = 0; index < numCodecs; ++index) {
- sp<MediaCodecInfo> info = list->getCodecInfo(index);
- if (info == NULL || info->isEncoder() != !queryDecoders) {
- continue;
- }
- sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(kMimeTypes[k]);
+ KeyedVector<AString, bool> visitedCodecs;
+ for (size_t type_ix = 0; type_ix < allMediaTypes.size(); ++type_ix) {
+ const AString &mediaType = allMediaTypes.keyAt(type_ix);
+ printf("\nMedia type '%s':\n", mediaType.c_str());
+
+ for (const sp<MediaCodecInfo> &info : allMediaTypes.valueAt(type_ix)) {
+ sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(mediaType.c_str());
if (caps == NULL) {
+ printf("warning: %s does not have capabilities for type %s\n",
+ info->getCodecName(), mediaType.c_str());
continue;
}
- printf(" %s '%s' supports ",
+ printf(" %s \"%s\" supports\n",
codecType, info->getCodecName());
- Vector<MediaCodecInfo::ProfileLevel> profileLevels;
- caps->getSupportedProfileLevels(&profileLevels);
- if (profileLevels.size() == 0) {
- printf("NOTHING.\n");
- continue;
+ auto printList = [](const char *type, const Vector<AString> &values){
+ printf(" %s: [", type);
+ for (size_t j = 0; j < values.size(); ++j) {
+ printf("\n %s%s", values[j].c_str(),
+ j == values.size() - 1 ? " " : ",");
+ }
+ printf("]\n");
+ };
+
+ if (visitedCodecs.indexOfKey(info->getCodecName()) < 0) {
+ visitedCodecs.add(info->getCodecName(), true);
+ {
+ Vector<AString> aliases;
+ info->getAliases(&aliases);
+ // quote alias
+ for (AString &alias : aliases) {
+ alias.insert("\"", 1, 0);
+ alias.append('"');
+ }
+ printList("aliases", aliases);
+ }
+ {
+ uint32_t attrs = info->getAttributes();
+ Vector<AString> list;
+ list.add(AStringPrintf("encoder: %d", !!(attrs & MediaCodecInfo::kFlagIsEncoder)));
+ list.add(AStringPrintf("vendor: %d", !!(attrs & MediaCodecInfo::kFlagIsVendor)));
+ list.add(AStringPrintf("software-only: %d", !!(attrs & MediaCodecInfo::kFlagIsSoftwareOnly)));
+ list.add(AStringPrintf("hw-accelerated: %d", !!(attrs & MediaCodecInfo::kFlagIsHardwareAccelerated)));
+ printList(AStringPrintf("attributes: %#x", attrs).c_str(), list);
+ }
+
+ printf(" owner: \"%s\"\n", info->getOwnerName());
+ printf(" rank: %u\n", info->getRank());
+ } else {
+ printf(" aliases, attributes, owner, rank: see above\n");
}
- for (size_t j = 0; j < profileLevels.size(); ++j) {
- const MediaCodecInfo::ProfileLevel &profileLevel = profileLevels[j];
+ {
+ Vector<AString> list;
+ Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+ caps->getSupportedProfileLevels(&profileLevels);
+ for (const MediaCodecInfo::ProfileLevel &pl : profileLevels) {
+ const char *niceProfile =
+ mediaType.equalsIgnoreCase(MIMETYPE_AUDIO_AAC) ? asString_AACObject(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2) ? asString_MPEG2Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263) ? asString_H263Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4) ? asString_MPEG4Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC) ? asString_AVCProfile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8) ? asString_VP8Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC) ? asString_HEVCProfile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9) ? asString_VP9Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1) ? asString_AV1Profile(pl.mProfile) :"??";
+ const char *niceLevel =
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2) ? asString_MPEG2Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263) ? asString_H263Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4) ? asString_MPEG4Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC) ? asString_AVCLevel(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8) ? asString_VP8Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC) ? asString_HEVCTierLevel(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9) ? asString_VP9Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1) ? asString_AV1Level(pl.mLevel) :
+ "??";
- printf("%s%u/%u", j > 0 ? ", " : "",
- profileLevel.mProfile, profileLevel.mLevel);
+ list.add(AStringPrintf("% 5u/% 5u (%s/%s)",
+ pl.mProfile, pl.mLevel, niceProfile, niceLevel));
+ }
+ printList("profile/levels", list);
}
- printf("\n");
+ {
+ Vector<AString> list;
+ Vector<uint32_t> colors;
+ caps->getSupportedColorFormats(&colors);
+ for (uint32_t color : colors) {
+ list.add(AStringPrintf("%#x (%s)", color,
+ asString_ColorFormat((int32_t)color)));
+ }
+ printList("colors", list);
+ }
+
+ printf(" details: %s\n", caps->getDetails()->debugString(6).c_str());
}
}
}
@@ -704,7 +781,7 @@
bool audioOnly = false;
bool listComponents = false;
- bool dumpProfiles = false;
+ bool dumpCodecInfo = false;
bool extractThumbnail = false;
bool seekTest = false;
bool useSurfaceAlloc = false;
@@ -724,7 +801,7 @@
sp<android::ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "vhaqn:lm:b:ptsrow:kN:xSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "vhaqn:lm:b:itsrow:kN:xSTd:D:")) >= 0) {
switch (res) {
case 'a':
{
@@ -794,9 +871,9 @@
break;
}
- case 'p':
+ case 'i':
{
- dumpProfiles = true;
+ dumpCodecInfo = true;
break;
}
@@ -937,9 +1014,9 @@
return 0;
}
- if (dumpProfiles) {
- dumpCodecProfiles(true /* queryDecoders */);
- dumpCodecProfiles(false /* queryDecoders */);
+ if (dumpCodecInfo) {
+ dumpCodecDetails(true /* queryDecoders */);
+ dumpCodecDetails(false /* queryDecoders */);
}
if (listComponents) {
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index b2f39dc..35bdbc0 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -318,10 +318,12 @@
sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
CHECK_EQ(composerClient->initCheck(), (status_t)OK);
- sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
- ISurfaceComposer::eDisplayIdMain));
+ const sp<IBinder> display = SurfaceComposerClient::getInternalDisplayToken();
+ CHECK(display != nullptr);
+
DisplayInfo info;
- SurfaceComposerClient::getDisplayInfo(display, &info);
+ CHECK_EQ(SurfaceComposerClient::getDisplayInfo(display, &info), NO_ERROR);
+
ssize_t displayWidth = info.w;
ssize_t displayHeight = info.h;
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index b72348f..5888af0 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -145,13 +145,30 @@
}
}
+static SecurityLevel toHidlSecurityLevel(DrmPlugin::SecurityLevel level) {
+ switch(level) {
+ case DrmPlugin::kSecurityLevelSwSecureCrypto:
+ return SecurityLevel::SW_SECURE_CRYPTO;
+ case DrmPlugin::kSecurityLevelSwSecureDecode:
+ return SecurityLevel::SW_SECURE_DECODE;
+ case DrmPlugin::kSecurityLevelHwSecureCrypto:
+ return SecurityLevel::HW_SECURE_CRYPTO;
+ case DrmPlugin::kSecurityLevelHwSecureDecode:
+ return SecurityLevel::HW_SECURE_DECODE;
+ case DrmPlugin::kSecurityLevelHwSecureAll:
+ return SecurityLevel::HW_SECURE_ALL;
+ default:
+ return SecurityLevel::UNKNOWN;
+ }
+}
+
static DrmPlugin::OfflineLicenseState toOfflineLicenseState(
OfflineLicenseState licenseState) {
switch(licenseState) {
case OfflineLicenseState::USABLE:
return DrmPlugin::kOfflineLicenseStateUsable;
case OfflineLicenseState::INACTIVE:
- return DrmPlugin::kOfflineLicenseStateInactive;
+ return DrmPlugin::kOfflineLicenseStateReleased;
default:
return DrmPlugin::kOfflineLicenseStateUnknown;
}
@@ -569,28 +586,57 @@
return Void();
}
-bool DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
- Mutex::Autolock autoLock(mLock);
+status_t DrmHal::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
+ const uint8_t uuid[16],
+ const String8 &mimeType,
+ DrmPlugin::SecurityLevel level,
+ bool *isSupported) {
+ *isSupported = false;
- for (size_t i = 0; i < mFactories.size(); i++) {
+ // handle default value cases
+ if (level == DrmPlugin::kSecurityLevelUnknown) {
+ if (mimeType == "") {
+ // isCryptoSchemeSupported(uuid)
+ *isSupported = true;
+ } else {
+ // isCryptoSchemeSupported(uuid, mimeType)
+ *isSupported = factory->isContentTypeSupported(mimeType.string());
+ }
+ return OK;
+ } else if (mimeType == "") {
+ return BAD_VALUE;
+ }
+
+ sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
+ if (factoryV1_2 == NULL) {
+ return ERROR_UNSUPPORTED;
+ } else {
+ *isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid,
+ mimeType.string(), toHidlSecurityLevel(level));
+ return OK;
+ }
+}
+
+status_t DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16],
+ const String8 &mimeType,
+ DrmPlugin::SecurityLevel level,
+ bool *isSupported) {
+ Mutex::Autolock autoLock(mLock);
+ *isSupported = false;
+ for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
- if (mimeType != "") {
- if (mFactories[i]->isContentTypeSupported(mimeType.string())) {
- return true;
- }
- } else {
- return true;
- }
+ return matchMimeTypeAndSecurityLevel(mFactories[i],
+ uuid, mimeType, level, isSupported);
}
}
- return false;
+ return OK;
}
status_t DrmHal::createPlugin(const uint8_t uuid[16],
const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
- for (size_t i = mFactories.size() - 1; i >= 0; i--) {
+ for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
auto plugin = makeDrmPlugin(mFactories[i], uuid, appPackageName);
if (plugin != NULL) {
@@ -634,30 +680,15 @@
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- SecurityLevel hSecurityLevel;
+ SecurityLevel hSecurityLevel = toHidlSecurityLevel(level);
bool setSecurityLevel = true;
- switch(level) {
- case DrmPlugin::kSecurityLevelSwSecureCrypto:
- hSecurityLevel = SecurityLevel::SW_SECURE_CRYPTO;
- break;
- case DrmPlugin::kSecurityLevelSwSecureDecode:
- hSecurityLevel = SecurityLevel::SW_SECURE_DECODE;
- break;
- case DrmPlugin::kSecurityLevelHwSecureCrypto:
- hSecurityLevel = SecurityLevel::HW_SECURE_CRYPTO;
- break;
- case DrmPlugin::kSecurityLevelHwSecureDecode:
- hSecurityLevel = SecurityLevel::HW_SECURE_DECODE;
- break;
- case DrmPlugin::kSecurityLevelHwSecureAll:
- hSecurityLevel = SecurityLevel::HW_SECURE_ALL;
- break;
- case DrmPlugin::kSecurityLevelMax:
+ if (level == DrmPlugin::kSecurityLevelMax) {
setSecurityLevel = false;
- break;
- default:
- return ERROR_DRM_CANNOT_HANDLE;
+ } else {
+ if (hSecurityLevel == SecurityLevel::UNKNOWN) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
}
status_t err = UNKNOWN_ERROR;
@@ -1188,7 +1219,7 @@
}
if (mPluginV1_2 == NULL) {
- return ERROR_DRM_CANNOT_HANDLE;
+ return ERROR_UNSUPPORTED;
}
status_t err = UNKNOWN_ERROR;
@@ -1213,7 +1244,7 @@
}
if (mPluginV1_2 == NULL) {
- return ERROR_DRM_CANNOT_HANDLE;
+ return ERROR_UNSUPPORTED;
}
Return<Status> status = mPluginV1_2->removeOfflineLicense(toHidlVec(keySetId));
@@ -1229,7 +1260,7 @@
}
if (mPluginV1_2 == NULL) {
- return ERROR_DRM_CANNOT_HANDLE;
+ return ERROR_UNSUPPORTED;
}
*licenseState = DrmPlugin::kOfflineLicenseStateUnknown;
@@ -1525,22 +1556,22 @@
void DrmHal::reportFrameworkMetrics() const
{
- MediaAnalyticsItem item("mediadrm");
- item.generateSessionID();
- item.setPkgName(mMetrics.GetAppPackageName().c_str());
+ std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("mediadrm"));
+ item->generateSessionID();
+ item->setPkgName(mMetrics.GetAppPackageName().c_str());
String8 vendor;
String8 description;
status_t result = getPropertyStringInternal(String8("vendor"), vendor);
if (result != OK) {
ALOGE("Failed to get vendor from drm plugin: %d", result);
} else {
- item.setCString("vendor", vendor.c_str());
+ item->setCString("vendor", vendor.c_str());
}
result = getPropertyStringInternal(String8("description"), description);
if (result != OK) {
ALOGE("Failed to get description from drm plugin: %d", result);
} else {
- item.setCString("description", description.c_str());
+ item->setCString("description", description.c_str());
}
std::string serializedMetrics;
@@ -1551,9 +1582,9 @@
std::string b64EncodedMetrics = toBase64StringNoPad(serializedMetrics.data(),
serializedMetrics.size());
if (!b64EncodedMetrics.empty()) {
- item.setCString("serialized_metrics", b64EncodedMetrics.c_str());
+ item->setCString("serialized_metrics", b64EncodedMetrics.c_str());
}
- if (!item.selfrecord()) {
+ if (!item->selfrecord()) {
ALOGE("Failed to self record framework metrics");
}
}
diff --git a/drm/libmediadrm/IDrm.cpp b/drm/libmediadrm/IDrm.cpp
index 8c26317..51274d1 100644
--- a/drm/libmediadrm/IDrm.cpp
+++ b/drm/libmediadrm/IDrm.cpp
@@ -83,18 +83,22 @@
return reply.readInt32();
}
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) {
+ virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType,
+ DrmPlugin::SecurityLevel level, bool *isSupported) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
data.write(uuid, 16);
data.writeString8(mimeType);
+ data.writeInt32(level);
+
status_t status = remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
if (status != OK) {
ALOGE("isCryptoSchemeSupported: binder call failed: %d", status);
- return false;
+ return status;
}
+ *isSupported = static_cast<bool>(reply.readInt32());
- return reply.readInt32() != 0;
+ return reply.readInt32();
}
virtual status_t createPlugin(const uint8_t uuid[16],
@@ -123,11 +127,11 @@
return reply.readInt32();
}
- virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+ virtual status_t openSession(DrmPlugin::SecurityLevel level,
Vector<uint8_t> &sessionId) {
Parcel data, reply;
data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
- data.writeInt32(securityLevel);
+ data.writeInt32(level);
status_t status = remote()->transact(OPEN_SESSION, data, &reply);
if (status != OK) {
@@ -768,7 +772,12 @@
uint8_t uuid[16];
data.read(uuid, sizeof(uuid));
String8 mimeType = data.readString8();
- reply->writeInt32(isCryptoSchemeSupported(uuid, mimeType));
+ DrmPlugin::SecurityLevel level =
+ static_cast<DrmPlugin::SecurityLevel>(data.readInt32());
+ bool isSupported = false;
+ status_t result = isCryptoSchemeSupported(uuid, mimeType, level, &isSupported);
+ reply->writeInt32(isSupported);
+ reply->writeInt32(result);
return OK;
}
diff --git a/drm/libmediadrm/PluginMetricsReporting.cpp b/drm/libmediadrm/PluginMetricsReporting.cpp
index 5cb48bf..8cd6f96 100644
--- a/drm/libmediadrm/PluginMetricsReporting.cpp
+++ b/drm/libmediadrm/PluginMetricsReporting.cpp
@@ -34,17 +34,17 @@
status_t reportVendorMetrics(const std::string& metrics,
const String8& name,
const String8& appPackageName) {
- MediaAnalyticsItem analyticsItem(name.c_str());
- analyticsItem.generateSessionID();
+ std::unique_ptr<MediaAnalyticsItem> analyticsItem(MediaAnalyticsItem::create(name.c_str()));
+ analyticsItem->generateSessionID();
std::string app_package_name(appPackageName.c_str(), appPackageName.size());
- analyticsItem.setPkgName(app_package_name);
+ analyticsItem->setPkgName(app_package_name);
if (metrics.size() > 0) {
- analyticsItem.setCString(kSerializedMetricsField, metrics.c_str());
+ analyticsItem->setCString(kSerializedMetricsField, metrics.c_str());
}
- if (!analyticsItem.selfrecord()) {
- ALOGE("selfrecord() returned false. sessioId %" PRId64, analyticsItem.getSessionID());
+ if (!analyticsItem->selfrecord()) {
+ ALOGE("selfrecord() returned false. sessioId %" PRId64, analyticsItem->getSessionID());
}
return OK;
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index 1558e8b..bf35224 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -65,7 +65,20 @@
*plugin = new ClearKeyCasPlugin(appData, callback);
return OK;
}
-///////////////////////////////////////////////////////////////////////////////
+
+status_t ClearKeyCasFactory::createPlugin(
+ int32_t CA_system_id,
+ void *appData,
+ CasPluginCallbackExt callback,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new ClearKeyCasPlugin(appData, callback);
+ return OK;
+}
+////////////////////////////////////////////////////////////////////////////////
bool ClearKeyDescramblerFactory::isSystemIdSupported(
int32_t CA_system_id) const {
return CA_system_id == sClearKeySystemId;
@@ -84,7 +97,13 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
void *appData, CasPluginCallback callback)
- : mCallback(callback), mAppData(appData) {
+ : mCallback(callback), mCallbackExt(NULL), mAppData(appData) {
+ ALOGV("CTOR");
+}
+
+ClearKeyCasPlugin::ClearKeyCasPlugin(
+ void *appData, CasPluginCallbackExt callback)
+ : mCallback(NULL), mCallbackExt(callback), mAppData(appData) {
ALOGV("CTOR");
}
@@ -167,11 +186,30 @@
// Echo the received event to the callback.
// Clear key plugin doesn't use any event, echo'ing for testing only.
if (mCallback != NULL) {
- mCallback((void*)mAppData, event, arg, (uint8_t*)eventData.data(), eventData.size());
+ mCallback((void*)mAppData, event, arg, (uint8_t*)eventData.data(),
+ eventData.size());
+ } else if (mCallbackExt != NULL) {
+ mCallbackExt((void*)mAppData, event, arg, (uint8_t*)eventData.data(),
+ eventData.size(), NULL);
}
return OK;
}
+status_t ClearKeyCasPlugin::sendSessionEvent(
+ const CasSessionId &sessionId, int32_t event,
+ int arg, const CasData &eventData) {
+ ALOGV("sendSessionEvent: sessionId=%s, event=%d, arg=%d",
+ sessionIdToString(sessionId).string(), event, arg);
+ // Echo the received event to the callback.
+ // Clear key plugin doesn't use any event, echo'ing for testing only.
+ if (mCallbackExt != NULL) {
+ mCallbackExt((void*)mAppData, event, arg, (uint8_t*)eventData.data(),
+ eventData.size(), &sessionId);
+ }
+
+ return OK;
+}
+
status_t ClearKeyCasPlugin::provision(const String8 &str) {
ALOGV("provision: provisionString=%s", str.string());
Mutex::Autolock lock(mKeyFetcherLock);
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index 389e172..f48d5b1 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -47,6 +47,11 @@
void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ void *appData,
+ CasPluginCallbackExt callback,
+ CasPlugin **plugin) override;
};
class ClearKeyDescramblerFactory : public DescramblerFactory {
@@ -63,6 +68,7 @@
class ClearKeyCasPlugin : public CasPlugin {
public:
ClearKeyCasPlugin(void *appData, CasPluginCallback callback);
+ ClearKeyCasPlugin(void *appData, CasPluginCallbackExt callback);
virtual ~ClearKeyCasPlugin();
virtual status_t setPrivateData(
@@ -85,6 +91,10 @@
virtual status_t sendEvent(
int32_t event, int32_t arg, const CasData &eventData) override;
+ virtual status_t sendSessionEvent(
+ const CasSessionId &sessionId,
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
virtual status_t provision(const String8 &str) override;
virtual status_t refreshEntitlements(
@@ -94,6 +104,7 @@
Mutex mKeyFetcherLock;
std::unique_ptr<KeyFetcher> mKeyFetcher;
CasPluginCallback mCallback;
+ CasPluginCallbackExt mCallbackExt;
void* mAppData;
};
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 8404a83..2964791 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -60,6 +60,19 @@
return OK;
}
+status_t MockCasFactory::createPlugin(
+ int32_t CA_system_id,
+ void* /*appData*/,
+ CasPluginCallbackExt /*callback*/,
+ CasPlugin **plugin) {
+ if (!isSystemIdSupported(CA_system_id)) {
+ return BAD_VALUE;
+ }
+
+ *plugin = new MockCasPlugin();
+ return OK;
+}
+
///////////////////////////////////////////////////////////////////////////////
bool MockDescramblerFactory::isSystemIdSupported(int32_t CA_system_id) const {
@@ -170,6 +183,16 @@
return OK;
}
+status_t MockCasPlugin::sendSessionEvent(
+ const CasSessionId &sessionId, int32_t event,
+ int /*arg*/, const CasData& /*eventData*/) {
+ ALOGV("sendSessionEvent: sessionId=%s, event=%d",
+ arrayToString(sessionId).string(), event);
+ Mutex::Autolock lock(mLock);
+
+ return OK;
+}
+
status_t MockCasPlugin::provision(const String8 &str) {
ALOGV("provision: provisionString=%s", str.string());
Mutex::Autolock lock(mLock);
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 8106990..74b540c 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -42,6 +42,11 @@
void *appData,
CasPluginCallback callback,
CasPlugin **plugin) override;
+ virtual status_t createPlugin(
+ int32_t CA_system_id,
+ void *appData,
+ CasPluginCallbackExt callback,
+ CasPlugin **plugin) override;
};
class MockDescramblerFactory : public DescramblerFactory {
@@ -80,7 +85,11 @@
virtual status_t sendEvent(
int32_t event, int32_t arg, const CasData &eventData) override;
- virtual status_t provision(const String8 &str) override;
+ virtual status_t sendSessionEvent(
+ const CasSessionId &sessionId,
+ int32_t event, int32_t arg, const CasData &eventData) override;
+
+ virtual status_t provision(const String8 &str) override;
virtual status_t refreshEntitlements(
int32_t refreshType, const CasData &refreshData) override;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index b44a6c7..e91e918 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -14,8 +14,8 @@
// limitations under the License.
//
-cc_binary {
- name: "android.hardware.drm@1.2-service.clearkey",
+cc_defaults {
+ name: "clearkey_service_defaults",
vendor: true,
srcs: [
@@ -33,13 +33,11 @@
"MemoryFileSystem.cpp",
"Session.cpp",
"SessionLibrary.cpp",
- "service.cpp",
],
relative_install_path: "hw",
cflags: ["-Wall", "-Werror"],
- init_rc: ["android.hardware.drm@1.2-service.clearkey.rc"],
shared_libs: [
"android.hardware.drm@1.0",
@@ -80,3 +78,16 @@
},
srcs: ["protos/DeviceFiles.proto"],
}
+cc_binary {
+ name: "android.hardware.drm@1.2-service.clearkey",
+ defaults: ["clearkey_service_defaults"],
+ srcs: ["service.cpp"],
+ init_rc: ["android.hardware.drm@1.2-service.clearkey.rc"],
+}
+cc_binary {
+ name: "android.hardware.drm@1.2-service-lazy.clearkey",
+ overrides: ["android.hardware.drm@1.2-service.clearkey"],
+ defaults: ["clearkey_service_defaults"],
+ srcs: ["serviceLazy.cpp"],
+ init_rc: ["android.hardware.drm@1.2-service-lazy.clearkey.rc"],
+}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
index 9d040a8..9fb5bbe 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmFactory.cpp
@@ -34,6 +34,7 @@
namespace clearkey {
using ::android::hardware::drm::V1_0::Status;
+using ::android::hardware::drm::V1_1::SecurityLevel;
using ::android::hardware::Void;
Return<bool> DrmFactory::isCryptoSchemeSupported(
@@ -41,6 +42,13 @@
return clearkeydrm::isClearKeyUUID(uuid.data());
}
+Return<bool> DrmFactory::isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+ const hidl_string &mimeType,
+ SecurityLevel level) {
+ return isCryptoSchemeSupported(uuid) && isContentTypeSupported(mimeType) &&
+ level == SecurityLevel::SW_SECURE_CRYPTO;
+}
+
Return<bool> DrmFactory::isContentTypeSupported(const hidl_string &mimeType) {
// This should match the mimeTypes handed by InitDataParser.
return mimeType == kIsoBmffVideoMimeType ||
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
new file mode 100644
index 0000000..9afd3d7
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service-lazy.clearkey.rc
@@ -0,0 +1,14 @@
+service vendor.drm-clearkey-hal-1-2 /vendor/bin/hw/android.hardware.drm@1.2-service-lazy.clearkey
+ interface android.hardware.drm@1.0::ICryptoFactory clearkey
+ interface android.hardware.drm@1.0::IDrmFactory clearkey
+ interface android.hardware.drm@1.1::ICryptoFactory clearkey
+ interface android.hardware.drm@1.1::IDrmFactory clearkey
+ interface android.hardware.drm@1.2::ICryptoFactory clearkey
+ interface android.hardware.drm@1.2::IDrmFactory clearkey
+ disabled
+ oneshot
+ class hal
+ user media
+ group media mediadrm
+ ioprio rt 4
+ writepid /dev/cpuset/foreground/tasks
diff --git a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
index ac184f7..5ba669d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
+++ b/drm/mediadrm/plugins/clearkey/hidl/android.hardware.drm@1.2-service.clearkey.rc
@@ -1,4 +1,10 @@
service vendor.drm-clearkey-hal-1-2 /vendor/bin/hw/android.hardware.drm@1.2-service.clearkey
+ interface android.hardware.drm@1.0::ICryptoFactory clearkey
+ interface android.hardware.drm@1.0::IDrmFactory clearkey
+ interface android.hardware.drm@1.1::ICryptoFactory clearkey
+ interface android.hardware.drm@1.1::IDrmFactory clearkey
+ interface android.hardware.drm@1.2::ICryptoFactory clearkey
+ interface android.hardware.drm@1.2::IDrmFactory clearkey
class hal
user media
group media mediadrm
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
index 2dafa36..03c434e 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyTypes.h
@@ -28,6 +28,7 @@
namespace clearkey {
using ::android::hardware::drm::V1_0::KeyValue;
+using ::android::hardware::drm::V1_1::SecurityLevel;
using ::android::hardware::hidl_vec;
const uint8_t kBlockSize = 16; //AES_BLOCK_SIZE;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
index ff715ea..4ca856d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmFactory.h
@@ -39,6 +39,10 @@
Return<bool> isCryptoSchemeSupported(const hidl_array<uint8_t, 16>& uuid)
override;
+ Return<bool> isCryptoSchemeSupported_1_2(const hidl_array<uint8_t, 16>& uuid,
+ const hidl_string& mimeType,
+ SecurityLevel level) override;
+
Return<bool> isContentTypeSupported(const hidl_string &mimeType)
override;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/service.cpp b/drm/mediadrm/plugins/clearkey/hidl/service.cpp
index 4ca31f3..b39ea01 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/service.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/service.cpp
@@ -13,13 +13,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#define LOG_TAG "android.hardware.drm@1.2-service.clearkey"
-
#include <CryptoFactory.h>
#include <DrmFactory.h>
#include <android-base/logging.h>
#include <binder/ProcessState.h>
+#include <hidl/HidlLazyUtils.h>
#include <hidl/HidlTransportSupport.h>
using ::android::hardware::configureRpcThreadpool;
@@ -31,14 +30,7 @@
using android::hardware::drm::V1_2::clearkey::CryptoFactory;
using android::hardware::drm::V1_2::clearkey::DrmFactory;
-
int main(int /* argc */, char** /* argv */) {
- ALOGD("android.hardware.drm@1.2-service.clearkey starting...");
-
- // The DRM HAL may communicate to other vendor components via
- // /dev/vndbinder
- android::ProcessState::initWithDriver("/dev/vndbinder");
-
sp<IDrmFactory> drmFactory = new DrmFactory;
sp<ICryptoFactory> cryptoFactory = new CryptoFactory;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp b/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp
new file mode 100644
index 0000000..99fd883
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/hidl/serviceLazy.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <CryptoFactory.h>
+#include <DrmFactory.h>
+
+#include <android-base/logging.h>
+#include <binder/ProcessState.h>
+#include <hidl/HidlLazyUtils.h>
+#include <hidl/HidlTransportSupport.h>
+
+using ::android::hardware::configureRpcThreadpool;
+using ::android::hardware::joinRpcThreadpool;
+using ::android::sp;
+
+using android::hardware::drm::V1_2::ICryptoFactory;
+using android::hardware::drm::V1_2::IDrmFactory;
+using android::hardware::drm::V1_2::clearkey::CryptoFactory;
+using android::hardware::drm::V1_2::clearkey::DrmFactory;
+using android::hardware::LazyServiceRegistrar;
+
+int main(int /* argc */, char** /* argv */) {
+ sp<IDrmFactory> drmFactory = new DrmFactory;
+ sp<ICryptoFactory> cryptoFactory = new CryptoFactory;
+
+ configureRpcThreadpool(8, true /* callerWillJoin */);
+
+ // Setup hwbinder service
+ LazyServiceRegistrar serviceRegistrar;
+
+ // Setup hwbinder service
+ CHECK_EQ(serviceRegistrar.registerService(drmFactory, "clearkey"), android::NO_ERROR)
+ << "Failed to register Clearkey Factory HAL";
+ CHECK_EQ(serviceRegistrar.registerService(cryptoFactory, "clearkey"), android::NO_ERROR)
+ << "Failed to register Clearkey Crypto HAL";
+
+ joinRpcThreadpool();
+}
diff --git a/include/media/AudioAttributes.h b/include/media/AudioAttributes.h
new file mode 120000
index 0000000..27ba471
--- /dev/null
+++ b/include/media/AudioAttributes.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioAttributes.h
\ No newline at end of file
diff --git a/include/media/AudioCommonTypes.h b/include/media/AudioCommonTypes.h
new file mode 120000
index 0000000..ae7c99a
--- /dev/null
+++ b/include/media/AudioCommonTypes.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioCommonTypes.h
\ No newline at end of file
diff --git a/include/media/AudioPolicyHelper.h b/include/media/AudioPolicyHelper.h
deleted file mode 120000
index 558657e..0000000
--- a/include/media/AudioPolicyHelper.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioPolicyHelper.h
\ No newline at end of file
diff --git a/include/media/AudioProductStrategy.h b/include/media/AudioProductStrategy.h
new file mode 120000
index 0000000..6bfaf11
--- /dev/null
+++ b/include/media/AudioProductStrategy.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioProductStrategy.h
\ No newline at end of file
diff --git a/include/media/AudioVolumeGroup.h b/include/media/AudioVolumeGroup.h
new file mode 120000
index 0000000..d6f1c99
--- /dev/null
+++ b/include/media/AudioVolumeGroup.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioVolumeGroup.h
\ No newline at end of file
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index f05c84b..10d8b13 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -19,7 +19,8 @@
libnbaio \
libnblog \
libsoundtriggerservice \
- libutils
+ libutils \
+ libvibrator
# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
LOCAL_C_INCLUDES := \
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index c7c8442..4d00d35 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -52,37 +52,30 @@
namespace android {
-class C2SoftAacDec::IntfImpl : public C2InterfaceHelper {
+constexpr char COMPONENT_NAME[] = "c2.android.aac.decoder";
+
+class C2SoftAacDec::IntfImpl : public SimpleInterface<void>::BaseParams {
public:
explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
- : C2InterfaceHelper(helper) {
-
- setDerivedInstance(this);
+ : SimpleInterface<void>::BaseParams(
+ helper,
+ COMPONENT_NAME,
+ C2Component::KIND_DECODER,
+ C2Component::DOMAIN_AUDIO,
+ MEDIA_MIMETYPE_AUDIO_AAC) {
+ noPrivateBuffers();
+ noInputReferences();
+ noOutputReferences();
+ noInputLatency();
+ noTimeStretch();
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+ .withConstValue(new C2PortActualDelayTuning::output(2u))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
- .build());
-
- addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
- MEDIA_MIMETYPE_AUDIO_AAC))
- .build());
-
- addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
- MEDIA_MIMETYPE_AUDIO_RAW))
- .build());
-
- addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -91,15 +84,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -110,10 +103,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -198,7 +191,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
(void)me; // TODO: validate
@@ -212,13 +205,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
@@ -231,8 +224,6 @@
// TODO Add : C2StreamAacSbrModeTuning
};
-constexpr char COMPONENT_NAME[] = "c2.android.aac.decoder";
-
C2SoftAacDec::C2SoftAacDec(
const char *name,
c2_node_id_t id,
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index 87730ae..137e775 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -37,29 +37,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -68,15 +68,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 6)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -125,13 +125,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
@@ -151,6 +151,7 @@
mNumBytesPerInputFrame(0u),
mOutBufferSize(0u),
mSentCodecSpecificData(false),
+ mInputTimeSet(false),
mInputSize(0),
mInputTimeUs(-1ll),
mSignalledError(false),
@@ -176,6 +177,7 @@
c2_status_t C2SoftAacEnc::onStop() {
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
mInputTimeUs = -1ll;
mSignalledError = false;
@@ -193,6 +195,7 @@
c2_status_t C2SoftAacEnc::onFlush_sm() {
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
return C2_OK;
}
@@ -320,8 +323,8 @@
return;
}
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(encInfo.confSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(encInfo.confSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -337,7 +340,6 @@
mOutBufferSize = encInfo.maxOutBufBytes;
mNumBytesPerInputFrame = encInfo.frameLength * channelCount * sizeof(int16_t);
- mInputTimeUs = work->input.ordinal.timestamp;
mSentCodecSpecificData = true;
}
@@ -351,6 +353,10 @@
data = view.data();
capacity = view.capacity();
}
+ if (!mInputTimeSet && capacity > 0) {
+ mInputTimeUs = work->input.ordinal.timestamp;
+ mInputTimeSet = true;
+ }
size_t numFrames = (capacity + mInputSize + (eos ? mNumBytesPerInputFrame - 1 : 0))
/ mNumBytesPerInputFrame;
@@ -550,6 +556,7 @@
(void)pool;
mSentCodecSpecificData = false;
+ mInputTimeSet = false;
mInputSize = 0u;
// TODO: we don't have any pending work at this time to drain.
diff --git a/media/codec2/components/aac/C2SoftAacEnc.h b/media/codec2/components/aac/C2SoftAacEnc.h
index 82fb438..779365b 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.h
+++ b/media/codec2/components/aac/C2SoftAacEnc.h
@@ -57,6 +57,7 @@
UINT mOutBufferSize;
bool mSentCodecSpecificData;
+ bool mInputTimeSet;
size_t mInputSize;
c2_cntr64_t mInputTimeUs;
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
index c591e21..edad75a 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
@@ -47,18 +47,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef AMRNB
MEDIA_MIMETYPE_AUDIO_AMR_NB
#else
@@ -67,13 +67,13 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
#ifdef AMRNB
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
@@ -85,19 +85,19 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
#ifdef AMRNB
- .withDefault(new C2BitrateTuning::input(0u, 4750))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
#else
- .withDefault(new C2BitrateTuning::input(0u, 6600))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
#endif
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
@@ -110,13 +110,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index ca21480..3d3aa7d 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -36,38 +36,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_NB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter(
@@ -75,8 +75,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 4750))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -271,7 +271,7 @@
mFilledLen = 0;
}
ALOGV("causal sample size %d", mFilledLen);
- if (mIsFirst) {
+ if (mIsFirst && outPos != 0) {
mIsFirst = false;
mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
}
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index be3892f..379cb32 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -38,38 +38,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_WB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 16000))
.withFields({C2F(mSampleRate, value).equalTo(16000)})
.withSetter(
@@ -77,8 +77,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 6600))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -347,7 +347,7 @@
mFilledLen = 0;
}
ALOGV("causal sample size %d", mFilledLen);
- if (mIsFirst) {
+ if (mIsFirst && outPos != 0) {
mIsFirst = false;
mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
}
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 6be1807..4bcc2c6 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -141,7 +141,7 @@
static C2R SizeSetter(bool mayBlock,
const C2P<C2StreamPictureSizeInfo::output>& oldMe,
- C2P<C2VideoSizeStreamInfo::output>& me) {
+ C2P<C2StreamPictureSizeInfo::output>& me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -586,7 +586,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 3e62744..9290d74 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -51,6 +51,12 @@
noInputLatency();
noTimeStretch();
+ // TODO: Proper support for reorder depth.
+ addParameter(
+ DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+ .withConstValue(new C2PortActualDelayTuning::output(8u))
+ .build());
+
// TODO: output latency and reordering
addParameter(
@@ -192,7 +198,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -839,7 +845,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
@@ -877,6 +883,8 @@
} else if (!hasPicture) {
fillEmptyWork(work);
}
+
+ work->input.buffers.clear();
}
c2_status_t C2SoftAvcDec::drainInternal(
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index cf06623..b851908 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -45,36 +45,36 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_VIDEO_AVC))
.build());
addParameter(
- DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2560, 2),
C2F(mSize, height).inRange(2, 2560, 2),
@@ -83,7 +83,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -91,8 +91,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -182,9 +182,9 @@
static C2R ProfileLevelSetter(
bool mayBlock,
C2P<C2StreamProfileLevelInfo::output> &me,
- const C2P<C2VideoSizeStreamTuning::input> &size,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
const C2P<C2StreamFrameRateInfo::output> &frameRate,
- const C2P<C2BitrateTuning::output> &bitrate) {
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
(void)mayBlock;
if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
me.set().profile = PROFILE_AVC_CONSTRAINED_BASELINE;
@@ -216,7 +216,7 @@
};
uint64_t mbs = uint64_t((size.v.width + 15) / 16) * ((size.v.height + 15) / 16);
- float mbsPerSec = float(mbs) / frameRate.v.value;
+ float mbsPerSec = float(mbs) * frameRate.v.value;
// Check if the supplied level meets the MB / bitrate requirements. If
// not, update the level with the lowest level meeting the requirements.
@@ -325,16 +325,16 @@
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -1332,8 +1332,8 @@
mSpsPpsHeaderReceived = true;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -1492,7 +1492,7 @@
if (IV_IDR_FRAME == s_encode_op.u4_encoded_frame_type) {
ALOGV("IDR frame produced");
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/base/Android.bp b/media/codec2/components/base/Android.bp
index d02f541..78a444b 100644
--- a/media/codec2/components/base/Android.bp
+++ b/media/codec2/components/base/Android.bp
@@ -74,9 +74,6 @@
"signed-integer-overflow",
],
cfi: true,
- diag: {
- cfi: true,
- },
},
}
@@ -90,9 +87,6 @@
"signed-integer-overflow",
],
cfi: true,
- diag: {
- cfi: true,
- },
},
}
@@ -128,9 +122,6 @@
"signed-integer-overflow",
],
cfi: true,
- diag: {
- cfi: true,
- },
},
ldflags: ["-Wl,-Bsymbolic"],
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index b8baec8..44f1fe0 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -151,7 +151,7 @@
c2_status_t status;
do {
status = mBase->fetchLinearBlock(capacity, usage, block);
- } while (status == C2_TIMED_OUT);
+ } while (status == C2_BLOCKING);
return status;
}
@@ -162,7 +162,7 @@
c2_status_t status;
do {
status = mBase->fetchCircularBlock(capacity, usage, block);
- } while (status == C2_TIMED_OUT);
+ } while (status == C2_BLOCKING);
return status;
}
@@ -174,7 +174,7 @@
do {
status = mBase->fetchGraphicBlock(width, height, format, usage,
block);
- } while (status == C2_TIMED_OUT);
+ } while (status == C2_BLOCKING);
return status;
}
@@ -473,7 +473,7 @@
if (!mOutputBlockPool) {
c2_status_t err = [this] {
// TODO: don't use query_vb
- C2StreamFormatConfig::output outputFormat(0u);
+ C2StreamBufferTypeSetting::output outputFormat(0u);
std::vector<std::unique_ptr<C2Param>> params;
c2_status_t err = intf()->query_vb(
{ &outputFormat },
@@ -485,7 +485,7 @@
return err;
}
C2BlockPool::local_id_t poolId =
- outputFormat.value == C2FormatVideo
+ outputFormat.value == C2BufferData::GRAPHIC
? C2BlockPool::BASIC_GRAPHIC
: C2BlockPool::BASIC_LINEAR;
if (params.size()) {
diff --git a/media/codec2/components/flac/C2SoftFlacDec.cpp b/media/codec2/components/flac/C2SoftFlacDec.cpp
index 86b16e8..10b14ce 100644
--- a/media/codec2/components/flac/C2SoftFlacDec.cpp
+++ b/media/codec2/components/flac/C2SoftFlacDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -99,13 +99,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 4ea35c2..0ce2543 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -34,38 +34,38 @@
: C2InterfaceHelper(helper) {
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::input> mPcmEncodingInfo;
};
@@ -223,8 +223,8 @@
}
if (!mWroteHeader) {
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(mHeaderOffset, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(mHeaderOffset, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 1c71d45..504ca78 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -41,18 +41,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef ALAW
MEDIA_MIMETYPE_AUDIO_G711_ALAW
#else
@@ -61,28 +61,28 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).inRange(8000, 48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).equalTo(64000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/gsm/C2SoftGsmDec.cpp b/media/codec2/components/gsm/C2SoftGsmDec.cpp
index 7101c79..69d4885 100644
--- a/media/codec2/components/gsm/C2SoftGsmDec.cpp
+++ b/media/codec2/components/gsm/C2SoftGsmDec.cpp
@@ -36,44 +36,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MSGSM))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 13200))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 13200))
.withFields({C2F(mBitrate, value).equalTo(13200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -85,13 +85,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/hevc/Android.bp b/media/codec2/components/hevc/Android.bp
index 2a045e1..369bd78 100644
--- a/media/codec2/components/hevc/Android.bp
+++ b/media/codec2/components/hevc/Android.bp
@@ -9,8 +9,17 @@
static_libs: ["libhevcdec"],
- include_dirs: [
- "external/libhevc/decoder",
- "external/libhevc/common",
+}
+
+cc_library_shared {
+ name: "libcodec2_soft_hevcenc",
+ defaults: [
+ "libcodec2_soft-defaults",
+ "libcodec2_soft_sanitize_signed-defaults",
],
+
+ srcs: ["C2SoftHevcEnc.cpp"],
+
+ static_libs: ["libhevcenc"],
+
}
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 99892ce..bb8dda0 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -51,7 +51,11 @@
noInputLatency();
noTimeStretch();
- // TODO: output latency and reordering
+ // TODO: Proper support for reorder depth.
+ addParameter(
+ DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+ .withConstValue(new C2PortActualDelayTuning::output(8u))
+ .build());
addParameter(
DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
@@ -188,7 +192,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -835,7 +839,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
new file mode 100644
index 0000000..2c0a7a0
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -0,0 +1,802 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftHevcEnc"
+#include <log/log.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <Codec2BufferUtils.h>
+#include <SimpleC2Interface.h>
+#include <util/C2InterfaceHelper.h>
+
+#include "ihevc_typedefs.h"
+#include "itt_video_api.h"
+#include "ihevce_api.h"
+#include "ihevce_plugin.h"
+#include "C2SoftHevcEnc.h"
+
+namespace android {
+
+class C2SoftHevcEnc::IntfImpl : public C2InterfaceHelper {
+ public:
+ explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper>& helper)
+ : C2InterfaceHelper(helper) {
+ setDerivedInstance(this);
+
+ addParameter(
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
+ .build());
+
+ addParameter(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+ MEDIA_MIMETYPE_VIDEO_RAW))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+ MEDIA_MIMETYPE_VIDEO_HEVC))
+ .build());
+
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+ .withConstValue(new C2StreamUsageTuning::input(
+ 0u, (uint64_t)C2MemoryUsage::CPU_READ))
+ .build());
+
+ addParameter(
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+ .withFields({
+ C2F(mSize, width).inRange(320, 1920, 2),
+ C2F(mSize, height).inRange(128, 1088, 2),
+ })
+ .withSetter(SizeSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
+ .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+ .withFields({C2F(mFrameRate, value).greaterThan(0.)})
+ .withSetter(
+ Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
+ .withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
+ .withSetter(BitrateSetter)
+ .build());
+
+ addParameter(
+ DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::output(
+ 0u, PROFILE_HEVC_MAIN, LEVEL_HEVC_MAIN_1))
+ .withFields({
+ C2F(mProfileLevel, profile)
+ .oneOf({C2Config::PROFILE_HEVC_MAIN,
+ C2Config::PROFILE_HEVC_MAIN_STILL}),
+ C2F(mProfileLevel, level)
+ .oneOf({LEVEL_HEVC_MAIN_1, LEVEL_HEVC_MAIN_2,
+ LEVEL_HEVC_MAIN_2_1, LEVEL_HEVC_MAIN_3,
+ LEVEL_HEVC_MAIN_3_1, LEVEL_HEVC_MAIN_4,
+ LEVEL_HEVC_MAIN_4_1, LEVEL_HEVC_MAIN_5,
+ LEVEL_HEVC_MAIN_5_1, LEVEL_HEVC_MAIN_5_2}),
+ })
+ .withSetter(ProfileLevelSetter, mSize, mFrameRate, mBitrate)
+ .build());
+
+ addParameter(
+ DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
+ .withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
+ .withFields({C2F(mRequestSync, value).oneOf({ C2_FALSE, C2_TRUE }) })
+ .withSetter(Setter<decltype(*mRequestSync)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mSyncFramePeriod, C2_PARAMKEY_SYNC_FRAME_INTERVAL)
+ .withDefault(
+ new C2StreamSyncFrameIntervalTuning::output(0u, 1000000))
+ .withFields({C2F(mSyncFramePeriod, value).any()})
+ .withSetter(
+ Setter<decltype(*mSyncFramePeriod)>::StrictValueWithNoDeps)
+ .build());
+ }
+
+ static C2R BitrateSetter(bool mayBlock,
+ C2P<C2StreamBitrateInfo::output>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (me.v.value <= 4096) {
+ me.set().value = 4096;
+ }
+ return res;
+ }
+
+ static C2R SizeSetter(bool mayBlock,
+ const C2P<C2StreamPictureSizeInfo::input>& oldMe,
+ C2P<C2StreamPictureSizeInfo::input>& me) {
+ (void)mayBlock;
+ C2R res = C2R::Ok();
+ if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+ me.set().width = oldMe.v.width;
+ }
+ if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+ res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+ me.set().height = oldMe.v.height;
+ }
+ return res;
+ }
+
+ static C2R ProfileLevelSetter(
+ bool mayBlock,
+ C2P<C2StreamProfileLevelInfo::output> &me,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
+ const C2P<C2StreamFrameRateInfo::output> &frameRate,
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
+ (void)mayBlock;
+ if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
+ me.set().profile = PROFILE_HEVC_MAIN;
+ }
+
+ struct LevelLimits {
+ C2Config::level_t level;
+ uint64_t samplesPerSec;
+ uint64_t samples;
+ uint32_t bitrate;
+ };
+
+ constexpr LevelLimits kLimits[] = {
+ { LEVEL_HEVC_MAIN_1, 552960, 36864, 128000 },
+ { LEVEL_HEVC_MAIN_2, 3686400, 122880, 1500000 },
+ { LEVEL_HEVC_MAIN_2_1, 7372800, 245760, 3000000 },
+ { LEVEL_HEVC_MAIN_3, 16588800, 552960, 6000000 },
+ { LEVEL_HEVC_MAIN_3_1, 33177600, 983040, 10000000 },
+ { LEVEL_HEVC_MAIN_4, 66846720, 2228224, 12000000 },
+ { LEVEL_HEVC_MAIN_4_1, 133693440, 2228224, 20000000 },
+ { LEVEL_HEVC_MAIN_5, 267386880, 8912896, 25000000 },
+ { LEVEL_HEVC_MAIN_5_1, 534773760, 8912896, 40000000 },
+ { LEVEL_HEVC_MAIN_5_2, 1069547520, 8912896, 60000000 },
+ { LEVEL_HEVC_MAIN_6, 1069547520, 35651584, 60000000 },
+ { LEVEL_HEVC_MAIN_6_1, 2139095040, 35651584, 120000000 },
+ { LEVEL_HEVC_MAIN_6_2, 4278190080, 35651584, 240000000 },
+ };
+
+ uint64_t samples = size.v.width * size.v.height;
+ uint64_t samplesPerSec = samples * frameRate.v.value;
+
+ // Check if the supplied level meets the MB / bitrate requirements. If
+ // not, update the level with the lowest level meeting the requirements.
+
+ bool found = false;
+ // By default needsUpdate = false in case the supplied level does meet
+ // the requirements.
+ bool needsUpdate = false;
+ for (const LevelLimits &limit : kLimits) {
+ if (samples <= limit.samples && samplesPerSec <= limit.samplesPerSec &&
+ bitrate.v.value <= limit.bitrate) {
+ // This is the lowest level that meets the requirements, and if
+ // we haven't seen the supplied level yet, that means we don't
+ // need the update.
+ if (needsUpdate) {
+ ALOGD("Given level %x does not cover current configuration: "
+ "adjusting to %x", me.v.level, limit.level);
+ me.set().level = limit.level;
+ }
+ found = true;
+ break;
+ }
+ if (me.v.level == limit.level) {
+ // We break out of the loop when the lowest feasible level is
+ // found. The fact that we're here means that our level doesn't
+ // meet the requirement and needs to be updated.
+ needsUpdate = true;
+ }
+ }
+ if (!found) {
+ // We set to the highest supported level.
+ me.set().level = LEVEL_HEVC_MAIN_5_2;
+ }
+ return C2R::Ok();
+ }
+
+ UWORD32 getProfile_l() const {
+ switch (mProfileLevel->profile) {
+ case PROFILE_HEVC_MAIN: [[fallthrough]];
+ case PROFILE_HEVC_MAIN_STILL: return 1;
+ default:
+ ALOGD("Unrecognized profile: %x", mProfileLevel->profile);
+ return 1;
+ }
+ }
+
+ UWORD32 getLevel_l() const {
+ struct Level {
+ C2Config::level_t c2Level;
+ UWORD32 hevcLevel;
+ };
+ constexpr Level levels[] = {
+ { LEVEL_HEVC_MAIN_1, 30 },
+ { LEVEL_HEVC_MAIN_2, 60 },
+ { LEVEL_HEVC_MAIN_2_1, 63 },
+ { LEVEL_HEVC_MAIN_3, 90 },
+ { LEVEL_HEVC_MAIN_3_1, 93 },
+ { LEVEL_HEVC_MAIN_4, 120 },
+ { LEVEL_HEVC_MAIN_4_1, 123 },
+ { LEVEL_HEVC_MAIN_5, 150 },
+ { LEVEL_HEVC_MAIN_5_1, 153 },
+ { LEVEL_HEVC_MAIN_5_2, 156 },
+ { LEVEL_HEVC_MAIN_6, 180 },
+ { LEVEL_HEVC_MAIN_6_1, 183 },
+ { LEVEL_HEVC_MAIN_6_2, 186 },
+ };
+ for (const Level &level : levels) {
+ if (mProfileLevel->level == level.c2Level) {
+ return level.hevcLevel;
+ }
+ }
+ ALOGD("Unrecognized level: %x", mProfileLevel->level);
+ return 156;
+ }
+ uint32_t getSyncFramePeriod_l() const {
+ if (mSyncFramePeriod->value < 0 ||
+ mSyncFramePeriod->value == INT64_MAX) {
+ return 0;
+ }
+ double period = mSyncFramePeriod->value / 1e6 * mFrameRate->value;
+ return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
+ }
+
+ std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
+ return mSize;
+ }
+ std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
+ return mFrameRate;
+ }
+ std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const {
+ return mBitrate;
+ }
+ std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const {
+ return mRequestSync;
+ }
+
+ private:
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+ std::shared_ptr<C2StreamUsageTuning::input> mUsage;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+ std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+ std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
+ std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
+};
+constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
+
+static size_t GetCPUCoreCount() {
+ long cpuCoreCount = 1;
+#if defined(_SC_NPROCESSORS_ONLN)
+ cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ // _SC_NPROC_ONLN must be defined...
+ cpuCoreCount = sysconf(_SC_NPROC_ONLN);
+#endif
+ CHECK(cpuCoreCount >= 1);
+ ALOGV("Number of CPU cores: %ld", cpuCoreCount);
+ return (size_t)cpuCoreCount;
+}
+
+C2SoftHevcEnc::C2SoftHevcEnc(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl)
+ : SimpleC2Component(
+ std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+ mIntf(intfImpl),
+ mIvVideoColorFormat(IV_YUV_420P),
+ mHevcEncProfile(1),
+ mHevcEncLevel(30),
+ mStarted(false),
+ mSpsPpsHeaderReceived(false),
+ mSignalledEos(false),
+ mSignalledError(false),
+ mCodecCtx(nullptr) {
+ // If dump is enabled, then create an empty file
+ GENERATE_FILE_NAMES();
+ CREATE_DUMP_FILE(mInFile);
+ CREATE_DUMP_FILE(mOutFile);
+
+ gettimeofday(&mTimeStart, nullptr);
+ gettimeofday(&mTimeEnd, nullptr);
+}
+
+C2SoftHevcEnc::~C2SoftHevcEnc() {
+ releaseEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onInit() {
+ return initEncoder();
+}
+
+c2_status_t C2SoftHevcEnc::onStop() {
+ if (!mStarted) {
+ return C2_OK;
+ }
+ return releaseEncoder();
+}
+
+void C2SoftHevcEnc::onReset() {
+ onStop();
+ initEncoder();
+}
+
+void C2SoftHevcEnc::onRelease() {
+ onStop();
+}
+
+c2_status_t C2SoftHevcEnc::onFlush_sm() {
+ return C2_OK;
+}
+
+static void fillEmptyWork(const std::unique_ptr<C2Work>& work) {
+ uint32_t flags = 0;
+ if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
+ ALOGV("Signalling EOS");
+ }
+ work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = work->input.ordinal;
+ work->workletsProcessed = 1u;
+}
+
+c2_status_t C2SoftHevcEnc::initEncParams() {
+ mCodecCtx = nullptr;
+ mNumCores = MIN(GetCPUCoreCount(), CODEC_MAX_CORES);
+ memset(&mEncParams, 0, sizeof(ihevce_static_cfg_params_t));
+
+ // default configuration
+ IHEVCE_PLUGIN_STATUS_T err = ihevce_set_def_params(&mEncParams);
+ if (IHEVCE_EOK != err) {
+ ALOGE("HEVC default init failed : 0x%x", err);
+ return C2_CORRUPTED;
+ }
+
+ // update configuration
+ mEncParams.s_src_prms.i4_width = mSize->width;
+ mEncParams.s_src_prms.i4_height = mSize->height;
+ mEncParams.s_src_prms.i4_frm_rate_denom = 1000;
+ mEncParams.s_src_prms.i4_frm_rate_num = mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0] =
+ mBitrate->value;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0] =
+ mBitrate->value << 1;
+ mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_codec_level = mHevcEncLevel;
+ mEncParams.s_coding_tools_prms.i4_max_i_open_gop_period = mIDRInterval;
+ mEncParams.s_coding_tools_prms.i4_max_cra_open_gop_period = mIDRInterval;
+ mIvVideoColorFormat = IV_YUV_420P;
+ mEncParams.s_multi_thrd_prms.i4_max_num_cores = mNumCores;
+ mEncParams.s_out_strm_prms.i4_codec_profile = mHevcEncProfile;
+ mEncParams.s_config_prms.i4_rate_control_mode = 2;
+ mEncParams.s_lap_prms.i4_rc_look_ahead_pics = 0;
+
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::releaseEncoder() {
+ mSpsPpsHeaderReceived = false;
+ mSignalledEos = false;
+ mSignalledError = false;
+ mStarted = false;
+
+ if (mCodecCtx) {
+ IHEVCE_PLUGIN_STATUS_T err = ihevce_close(mCodecCtx);
+ if (IHEVCE_EOK != err) return C2_CORRUPTED;
+ mCodecCtx = nullptr;
+ }
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ (void)drainMode;
+ (void)pool;
+ return C2_OK;
+}
+c2_status_t C2SoftHevcEnc::initEncoder() {
+ CHECK(!mCodecCtx);
+ {
+ IntfImpl::Lock lock = mIntf->lock();
+ mSize = mIntf->getSize_l();
+ mBitrate = mIntf->getBitrate_l();
+ mFrameRate = mIntf->getFrameRate_l();
+ mHevcEncProfile = mIntf->getProfile_l();
+ mHevcEncLevel = mIntf->getLevel_l();
+ mIDRInterval = mIntf->getSyncFramePeriod_l();
+ }
+
+ c2_status_t status = initEncParams();
+
+ if (C2_OK != status) {
+ ALOGE("Failed to initialize encoder params : 0x%x", status);
+ mSignalledError = true;
+ return status;
+ }
+
+ IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+ err = ihevce_init(&mEncParams, &mCodecCtx);
+ if (IHEVCE_EOK != err) {
+ ALOGE("HEVC encoder init failed : 0x%x", err);
+ return C2_CORRUPTED;
+ }
+
+ mStarted = true;
+ return C2_OK;
+}
+
+c2_status_t C2SoftHevcEnc::setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+ const C2GraphicView* const input,
+ uint64_t timestamp) {
+ ihevce_static_cfg_params_t* params = &mEncParams;
+ memset(ps_encode_ip, 0, sizeof(ihevce_inp_buf_t));
+
+ if (!input) {
+ return C2_OK;
+ }
+
+ if (input->width() < mSize->width ||
+ input->height() < mSize->height) {
+ /* Expect width height to be configured */
+ ALOGW("unexpected Capacity Aspect %d(%d) x %d(%d)", input->width(),
+ mSize->width, input->height(), mSize->height);
+ return C2_BAD_VALUE;
+ }
+
+ const C2PlanarLayout& layout = input->layout();
+ uint8_t* yPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_Y]);
+ uint8_t* uPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_U]);
+ uint8_t* vPlane =
+ const_cast<uint8_t *>(input->data()[C2PlanarLayout::PLANE_V]);
+ int32_t yStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+ int32_t uStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ int32_t vStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
+
+ uint32_t width = mSize->width;
+ uint32_t height = mSize->height;
+
+ // width and height are always even
+ // width and height are always even (as block size is 16x16)
+ CHECK_EQ((width & 1u), 0u);
+ CHECK_EQ((height & 1u), 0u);
+
+ size_t yPlaneSize = width * height;
+
+ switch (layout.type) {
+ case C2PlanarLayout::TYPE_RGB:
+ [[fallthrough]];
+ case C2PlanarLayout::TYPE_RGBA: {
+ MemoryBlock conversionBuffer =
+ mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+ mConversionBuffersInUse.emplace(conversionBuffer.data(),
+ conversionBuffer);
+ yPlane = conversionBuffer.data();
+ uPlane = yPlane + yPlaneSize;
+ vPlane = uPlane + yPlaneSize / 4;
+ yStride = width;
+ uStride = vStride = yStride / 2;
+ ConvertRGBToPlanarYUV(yPlane, yStride, height,
+ conversionBuffer.size(), *input);
+ break;
+ }
+ case C2PlanarLayout::TYPE_YUV: {
+ if (!IsYUV420(*input)) {
+ ALOGE("input is not YUV420");
+ return C2_BAD_VALUE;
+ }
+
+ if (layout.planes[layout.PLANE_Y].colInc == 1 &&
+ layout.planes[layout.PLANE_U].colInc == 1 &&
+ layout.planes[layout.PLANE_V].colInc == 1 &&
+ uStride == vStride && yStride == 2 * vStride) {
+ // I420 compatible - already set up above
+ break;
+ }
+
+ // copy to I420
+ yStride = width;
+ uStride = vStride = yStride / 2;
+ MemoryBlock conversionBuffer =
+ mConversionBuffers.fetch(yPlaneSize * 3 / 2);
+ mConversionBuffersInUse.emplace(conversionBuffer.data(),
+ conversionBuffer);
+ MediaImage2 img =
+ CreateYUV420PlanarMediaImage2(width, height, yStride, height);
+ status_t err = ImageCopy(conversionBuffer.data(), &img, *input);
+ if (err != OK) {
+ ALOGE("Buffer conversion failed: %d", err);
+ return C2_BAD_VALUE;
+ }
+ yPlane = conversionBuffer.data();
+ uPlane = yPlane + yPlaneSize;
+ vPlane = uPlane + yPlaneSize / 4;
+ break;
+ }
+
+ case C2PlanarLayout::TYPE_YUVA:
+ ALOGE("YUVA plane type is not supported");
+ return C2_BAD_VALUE;
+
+ default:
+ ALOGE("Unrecognized plane type: %d", layout.type);
+ return C2_BAD_VALUE;
+ }
+
+ switch (mIvVideoColorFormat) {
+ case IV_YUV_420P: {
+ // input buffer is supposed to be const but Ittiam API wants bare
+ // pointer.
+ ps_encode_ip->apv_inp_planes[0] = yPlane;
+ ps_encode_ip->apv_inp_planes[1] = uPlane;
+ ps_encode_ip->apv_inp_planes[2] = vPlane;
+
+ ps_encode_ip->ai4_inp_strd[0] = yStride;
+ ps_encode_ip->ai4_inp_strd[1] = uStride;
+ ps_encode_ip->ai4_inp_strd[2] = vStride;
+
+ ps_encode_ip->ai4_inp_size[0] = yStride * height;
+ ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+ ps_encode_ip->ai4_inp_size[2] = vStride * height >> 1;
+ break;
+ }
+
+ case IV_YUV_422ILE: {
+ // TODO
+ break;
+ }
+
+ case IV_YUV_420SP_UV:
+ case IV_YUV_420SP_VU:
+ default: {
+ ps_encode_ip->apv_inp_planes[0] = yPlane;
+ ps_encode_ip->apv_inp_planes[1] = uPlane;
+ ps_encode_ip->apv_inp_planes[2] = nullptr;
+
+ ps_encode_ip->ai4_inp_strd[0] = yStride;
+ ps_encode_ip->ai4_inp_strd[1] = uStride;
+ ps_encode_ip->ai4_inp_strd[2] = 0;
+
+ ps_encode_ip->ai4_inp_size[0] = yStride * height;
+ ps_encode_ip->ai4_inp_size[1] = uStride * height >> 1;
+ ps_encode_ip->ai4_inp_size[2] = 0;
+ break;
+ }
+ }
+
+ ps_encode_ip->i4_curr_bitrate =
+ params->s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0];
+ ps_encode_ip->i4_curr_peak_bitrate =
+ params->s_tgt_lyr_prms.as_tgt_params[0].ai4_peak_bitrate[0];
+ ps_encode_ip->i4_curr_rate_factor = params->s_config_prms.i4_rate_factor;
+ ps_encode_ip->u8_pts = timestamp;
+ return C2_OK;
+}
+
+void C2SoftHevcEnc::process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ // Initialize output work
+ work->result = C2_OK;
+ work->workletsProcessed = 1u;
+ work->worklets.front()->output.flags = work->input.flags;
+
+ if (mSignalledError || mSignalledEos) {
+ work->result = C2_BAD_VALUE;
+ ALOGD("Signalled Error / Signalled Eos");
+ return;
+ }
+ c2_status_t status = C2_OK;
+
+ // Initialize encoder if not already initialized
+ if (!mStarted) {
+ status = initEncoder();
+ if (C2_OK != status) {
+ ALOGE("Failed to initialize encoder : 0x%x", status);
+ mSignalledError = true;
+ work->result = status;
+ return;
+ }
+ }
+
+ std::shared_ptr<const C2GraphicView> view;
+ std::shared_ptr<C2Buffer> inputBuffer = nullptr;
+ bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+ if (!work->input.buffers.empty()) {
+ inputBuffer = work->input.buffers[0];
+ view = std::make_shared<const C2GraphicView>(
+ inputBuffer->data().graphicBlocks().front().map().get());
+ if (view->error() != C2_OK) {
+ ALOGE("graphic view map err = %d", view->error());
+ mSignalledError = true;
+ return;
+ }
+ }
+
+ IHEVCE_PLUGIN_STATUS_T err = IHEVCE_EOK;
+
+ fillEmptyWork(work);
+ if (!mSpsPpsHeaderReceived) {
+ ihevce_out_buf_t s_header_op{};
+ err = ihevce_encode_header(mCodecCtx, &s_header_op);
+ if (err == IHEVCE_EOK && s_header_op.i4_bytes_generated) {
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(
+ s_header_op.i4_bytes_generated, 0u);
+ if (!csd) {
+ ALOGE("CSD allocation failed");
+ mSignalledError = true;
+ work->result = C2_NO_MEMORY;
+ return;
+ }
+ memcpy(csd->m.value, s_header_op.pu1_output_buf,
+ s_header_op.i4_bytes_generated);
+ DUMP_TO_FILE(mOutFile, csd->m.value, csd->flexCount());
+ work->worklets.front()->output.configUpdate.push_back(
+ std::move(csd));
+ mSpsPpsHeaderReceived = true;
+ }
+ if (!inputBuffer) {
+ return;
+ }
+ }
+ ihevce_inp_buf_t s_encode_ip{};
+ ihevce_out_buf_t s_encode_op{};
+ uint64_t timestamp = work->input.ordinal.timestamp.peekull();
+
+ status = setEncodeArgs(&s_encode_ip, view.get(), timestamp);
+ if (C2_OK != status) {
+ mSignalledError = true;
+ ALOGE("setEncodeArgs failed : 0x%x", status);
+ work->result = status;
+ return;
+ }
+
+ uint64_t timeDelay = 0;
+ uint64_t timeTaken = 0;
+ GETTIME(&mTimeStart, nullptr);
+ TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
+
+ ihevce_inp_buf_t* ps_encode_ip = (inputBuffer) ? &s_encode_ip : nullptr;
+
+ err = ihevce_encode(mCodecCtx, ps_encode_ip, &s_encode_op);
+ if (IHEVCE_EOK != err) {
+ ALOGE("Encode Frame failed : 0x%x", err);
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+
+ GETTIME(&mTimeEnd, nullptr);
+ /* Compute time taken for decode() */
+ TIME_DIFF(mTimeStart, mTimeEnd, timeTaken);
+
+ ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", (int)timeTaken,
+ (int)timeDelay, s_encode_op.i4_bytes_generated);
+
+ if (s_encode_op.i4_bytes_generated) {
+ std::shared_ptr<C2LinearBlock> block;
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ status = pool->fetchLinearBlock(s_encode_op.i4_bytes_generated, usage, &block);
+ if (C2_OK != status) {
+ ALOGE("fetchLinearBlock for Output failed with status 0x%x", status);
+ work->result = C2_NO_MEMORY;
+ mSignalledError = true;
+ return;
+ }
+ C2WriteView wView = block->map().get();
+ if (C2_OK != wView.error()) {
+ ALOGE("write view map failed with status 0x%x", wView.error());
+ work->result = wView.error();
+ mSignalledError = true;
+ return;
+ }
+ memcpy(wView.data(), s_encode_op.pu1_output_buf,
+ s_encode_op.i4_bytes_generated);
+
+ std::shared_ptr<C2Buffer> buffer =
+ createLinearBuffer(block, 0, s_encode_op.i4_bytes_generated);
+
+ DUMP_TO_FILE(mOutFile, s_encode_op.pu1_output_buf,
+ s_encode_op.i4_bytes_generated);
+
+ work->worklets.front()->output.ordinal.timestamp = s_encode_op.u8_pts;
+ if (s_encode_op.i4_is_key_frame) {
+ ALOGV("IDR frame produced");
+ buffer->setInfo(
+ std::make_shared<C2StreamPictureTypeMaskInfo::output>(
+ 0u /* stream id */, C2Config::SYNC_FRAME));
+ }
+ work->worklets.front()->output.buffers.push_back(buffer);
+ }
+ if (eos) {
+ mSignalledEos = true;
+ }
+}
+
+class C2SoftHevcEncFactory : public C2ComponentFactory {
+ public:
+ C2SoftHevcEncFactory()
+ : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+ GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(
+ new C2SoftHevcEnc(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(C2ComponentInterface*)> deleter) override {
+ *interface = std::shared_ptr<C2ComponentInterface>(
+ new SimpleInterface<C2SoftHevcEnc::IntfImpl>(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftHevcEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual ~C2SoftHevcEncFactory() override = default;
+
+ private:
+ std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
+ return new ::android::C2SoftHevcEncFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
+ delete factory;
+}
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
new file mode 100644
index 0000000..c22fea2
--- /dev/null
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_HEVC_ENC_H_
+#define ANDROID_C2_SOFT_HEVC_ENC_H_
+
+#include <map>
+#include <utils/Vector.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <SimpleC2Component.h>
+
+#include "ihevc_typedefs.h"
+
+namespace android {
+#define MIN(a, b) ((a) < (b)) ? (a) : (b)
+
+/** Get time */
+#define GETTIME(a, b) gettimeofday(a, b);
+
+/** Compute difference between start and end */
+#define TIME_DIFF(start, end, diff) \
+ diff = (((end).tv_sec - (start).tv_sec) * 1000000) + \
+ ((end).tv_usec - (start).tv_usec);
+
+#define CODEC_MAX_CORES 4
+
+struct C2SoftHevcEnc : public SimpleC2Component {
+ class IntfImpl;
+
+ C2SoftHevcEnc(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl);
+
+ // From SimpleC2Component
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+ c2_status_t drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) override;
+
+ protected:
+ virtual ~C2SoftHevcEnc();
+
+ private:
+ std::shared_ptr<IntfImpl> mIntf;
+ ihevce_static_cfg_params_t mEncParams;
+ size_t mNumCores;
+ UWORD32 mIDRInterval;
+ IV_COLOR_FORMAT_T mIvVideoColorFormat;
+ UWORD32 mHevcEncProfile;
+ UWORD32 mHevcEncLevel;
+ bool mStarted;
+ bool mSpsPpsHeaderReceived;
+ bool mSignalledEos;
+ bool mSignalledError;
+ void* mCodecCtx;
+ MemoryBlockPool mConversionBuffers;
+ std::map<void*, MemoryBlock> mConversionBuffersInUse;
+ // configurations used by component in process
+ // (TODO: keep this in intf but make them internal only)
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
+ std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+
+#ifdef FILE_DUMP_ENABLE
+ char mInFile[200];
+ char mOutFile[200];
+#endif /* FILE_DUMP_ENABLE */
+
+ // profile
+ struct timeval mTimeStart;
+ struct timeval mTimeEnd;
+
+ c2_status_t initEncParams();
+ c2_status_t initEncoder();
+ c2_status_t releaseEncoder();
+ c2_status_t setEncodeArgs(ihevce_inp_buf_t* ps_encode_ip,
+ const C2GraphicView* const input,
+ uint64_t timestamp);
+ C2_DO_NOT_COPY(C2SoftHevcEnc);
+};
+
+#ifdef FILE_DUMP_ENABLE
+
+#define INPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define INPUT_DUMP_EXT "yuv"
+#define OUTPUT_DUMP_PATH "/data/local/tmp/hevc"
+#define OUTPUT_DUMP_EXT "h265"
+#define GENERATE_FILE_NAMES() \
+{ \
+ GETTIME(&mTimeStart, NULL); \
+ strcpy(mInFile, ""); \
+ ALOGD("GENERATE_FILE_NAMES"); \
+ sprintf(mInFile, "%s_%ld.%ld.%s", INPUT_DUMP_PATH, mTimeStart.tv_sec, \
+ mTimeStart.tv_usec, INPUT_DUMP_EXT); \
+ strcpy(mOutFile, ""); \
+ sprintf(mOutFile, "%s_%ld.%ld.%s", OUTPUT_DUMP_PATH, \
+ mTimeStart.tv_sec, mTimeStart.tv_usec, OUTPUT_DUMP_EXT); \
+}
+
+#define CREATE_DUMP_FILE(m_filename) \
+{ \
+ FILE* fp = fopen(m_filename, "wb"); \
+ if (fp != NULL) { \
+ ALOGD("Opened file %s", m_filename); \
+ fclose(fp); \
+ } else { \
+ ALOGD("Could not open file %s", m_filename); \
+ } \
+}
+#define DUMP_TO_FILE(m_filename, m_buf, m_size) \
+{ \
+ FILE* fp = fopen(m_filename, "ab"); \
+ if (fp != NULL && m_buf != NULL) { \
+ int i; \
+ ALOGD("Dump to file!"); \
+ i = fwrite(m_buf, 1, m_size, fp); \
+ if (i != (int)m_size) { \
+ ALOGD("Error in fwrite, returned %d", i); \
+ perror("Error in write to file"); \
+ } \
+ fclose(fp); \
+ } else { \
+ ALOGD("Could not write to file %s", m_filename); \
+ if (fp != NULL) fclose(fp); \
+ } \
+}
+#else /* FILE_DUMP_ENABLE */
+#define INPUT_DUMP_PATH
+#define INPUT_DUMP_EXT
+#define OUTPUT_DUMP_PATH
+#define OUTPUT_DUMP_EXT
+#define GENERATE_FILE_NAMES()
+#define CREATE_DUMP_FILE(m_filename)
+#define DUMP_TO_FILE(m_filename, m_buf, m_size)
+#endif /* FILE_DUMP_ENABLE */
+
+} // namespace android
+
+#endif // C2_SOFT_HEVC_ENC_H__
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index c8b8397..9db6d8f 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -40,29 +40,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MPEG))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({8000, 11025, 12000, 16000,
22050, 24000, 32000, 44100, 48000})})
@@ -70,15 +70,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 320000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -90,13 +90,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -555,4 +555,3 @@
ALOGV("in %s", __func__);
delete factory;
}
-
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index da32ec0..290677e 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -180,7 +180,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -892,7 +892,7 @@
ALOGI("Configuring decoder: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
@@ -931,7 +931,7 @@
ALOGI("Configuring decoder out: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 901f5ed..3d4a733 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -60,7 +60,11 @@
noInputLatency();
noTimeStretch();
- // TODO: output latency and reordering
+ // TODO: Proper support for reorder depth.
+ addParameter(
+ DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+ .withConstValue(new C2PortActualDelayTuning::output(1u))
+ .build());
addParameter(
DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
@@ -182,7 +186,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -570,7 +574,7 @@
PVSetPostProcType(mDecHandle, 0);
if (handleResChange(work)) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
@@ -642,7 +646,7 @@
return;
} else if (resChange) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index c8796f3..89fa59d 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -52,26 +52,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef MPEG4
MEDIA_MIMETYPE_VIDEO_MPEG4
#else
@@ -80,14 +80,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 176, 144))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 176, 144))
.withFields({
#ifdef MPEG4
C2F(mSize, width).inRange(16, 176, 16),
@@ -101,7 +101,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 17.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -110,8 +110,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -217,14 +217,14 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -446,8 +446,8 @@
}
++mNumInputFrames;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(outputSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(outputSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -595,7 +595,7 @@
work->worklets.front()->output.ordinal.timestamp = inputTimeStamp;
if (hintTrack.CodeType == 0) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/opus/Android.bp b/media/codec2/components/opus/Android.bp
index 240cdb9..0ed141b 100644
--- a/media/codec2/components/opus/Android.bp
+++ b/media/codec2/components/opus/Android.bp
@@ -9,3 +9,14 @@
shared_libs: ["libopus"],
}
+cc_library_shared {
+ name: "libcodec2_soft_opusenc",
+ defaults: [
+ "libcodec2_soft-defaults",
+ "libcodec2_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["C2SoftOpusEnc.cpp"],
+
+ shared_libs: ["libopus"],
+}
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 2439c3c..680712e 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -19,10 +19,9 @@
#include <log/log.h>
#include <media/stagefright/foundation/MediaDefs.h>
-
+#include <media/stagefright/foundation/OpusHeader.h>
#include <C2PlatformSupport.h>
#include <SimpleC2Interface.h>
-
#include "C2SoftOpusDec.h"
extern "C" {
@@ -41,44 +40,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_OPUS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).equalTo(48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 6000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6000))
.withFields({C2F(mBitrate, value).inRange(6000, 510000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -90,13 +89,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -188,16 +187,6 @@
work->workletsProcessed = 1u;
}
-static uint16_t ReadLE16(const uint8_t *data, size_t data_size,
- uint32_t read_offset) {
- if (read_offset + 1 > data_size)
- return 0;
- uint16_t val;
- val = data[read_offset];
- val |= data[read_offset + 1] << 8;
- return val;
-}
-
static const int kRate = 48000;
// Opus uses Vorbis channel mapping, and Vorbis channel mapping specifies
@@ -216,81 +205,6 @@
static const int kMaxChannelsWithDefaultLayout = 2;
static const uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = { 0, 1 };
-// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
-static bool ParseOpusHeader(const uint8_t *data, size_t data_size,
- OpusHeader* header) {
- // Size of the Opus header excluding optional mapping information.
- const size_t kOpusHeaderSize = 19;
-
- // Offset to the channel count byte in the Opus header.
- const size_t kOpusHeaderChannelsOffset = 9;
-
- // Offset to the pre-skip value in the Opus header.
- const size_t kOpusHeaderSkipSamplesOffset = 10;
-
- // Offset to the gain value in the Opus header.
- const size_t kOpusHeaderGainOffset = 16;
-
- // Offset to the channel mapping byte in the Opus header.
- const size_t kOpusHeaderChannelMappingOffset = 18;
-
- // Opus Header contains a stream map. The mapping values are in the header
- // beyond the always present |kOpusHeaderSize| bytes of data. The mapping
- // data contains stream count, coupling information, and per channel mapping
- // values:
- // - Byte 0: Number of streams.
- // - Byte 1: Number coupled.
- // - Byte 2: Starting at byte 2 are |header->channels| uint8 mapping
- // values.
- const size_t kOpusHeaderNumStreamsOffset = kOpusHeaderSize;
- const size_t kOpusHeaderNumCoupledOffset = kOpusHeaderNumStreamsOffset + 1;
- const size_t kOpusHeaderStreamMapOffset = kOpusHeaderNumStreamsOffset + 2;
-
- if (data_size < kOpusHeaderSize) {
- ALOGE("Header size is too small.");
- return false;
- }
- header->channels = *(data + kOpusHeaderChannelsOffset);
- if (header->channels <= 0 || header->channels > kMaxChannels) {
- ALOGE("Invalid Header, wrong channel count: %d", header->channels);
- return false;
- }
-
- header->skip_samples = ReadLE16(data,
- data_size,
- kOpusHeaderSkipSamplesOffset);
-
- header->gain_db = static_cast<int16_t>(ReadLE16(data,
- data_size,
- kOpusHeaderGainOffset));
-
- header->channel_mapping = *(data + kOpusHeaderChannelMappingOffset);
- if (!header->channel_mapping) {
- if (header->channels > kMaxChannelsWithDefaultLayout) {
- ALOGE("Invalid Header, missing stream map.");
- return false;
- }
- header->num_streams = 1;
- header->num_coupled = header->channels > 1;
- header->stream_map[0] = 0;
- header->stream_map[1] = 1;
- return true;
- }
- if (data_size < kOpusHeaderStreamMapOffset + header->channels) {
- ALOGE("Invalid stream map; insufficient data for current channel "
- "count: %d", header->channels);
- return false;
- }
- header->num_streams = *(data + kOpusHeaderNumStreamsOffset);
- header->num_coupled = *(data + kOpusHeaderNumCoupledOffset);
- if (header->num_streams + header->num_coupled != header->channels) {
- ALOGE("Inconsistent channel mapping.");
- return false;
- }
- for (int i = 0; i < header->channels; ++i)
- header->stream_map[i] = *(data + kOpusHeaderStreamMapOffset + i);
- return true;
-}
// Convert nanoseconds to number of samples.
static uint64_t ns_to_samples(uint64_t ns, int rate) {
@@ -338,7 +252,19 @@
const uint8_t *data = rView.data() + inOffset;
if (mInputBufferCount < 3) {
if (mInputBufferCount == 0) {
- if (!ParseOpusHeader(data, inSize, &mHeader)) {
+ size_t opusHeadSize = inSize;
+ size_t codecDelayBufSize = 0;
+ size_t seekPreRollBufSize = 0;
+ void *opusHeadBuf = (void *)data;
+ void *codecDelayBuf = NULL;
+ void *seekPreRollBuf = NULL;
+
+ GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
+ &opusHeadSize, &codecDelayBuf,
+ &codecDelayBufSize, &seekPreRollBuf,
+ &seekPreRollBufSize);
+
+ if (!ParseOpusHeader((uint8_t *)opusHeadBuf, opusHeadSize, &mHeader)) {
ALOGE("Encountered error while Parsing Opus Header.");
mSignalledError = true;
work->result = C2_CORRUPTED;
@@ -377,6 +303,20 @@
work->result = C2_CORRUPTED;
return;
}
+
+ if (codecDelayBuf && codecDelayBufSize == 8) {
+ uint64_t value;
+ memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+ mCodecDelay = ns_to_samples(value, kRate);
+ mSamplesToDiscard = mCodecDelay;
+ ++mInputBufferCount;
+ }
+ if (seekPreRollBuf && seekPreRollBufSize == 8) {
+ uint64_t value;
+ memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+ mSeekPreRoll = ns_to_samples(value, kRate);
+ ++mInputBufferCount;
+ }
} else {
if (inSize < 8) {
ALOGE("Input sample size is too small.");
@@ -392,29 +332,30 @@
}
else {
mSeekPreRoll = samples;
-
- ALOGI("Configuring decoder: %d Hz, %d channels",
- kRate, mHeader.channels);
- C2StreamSampleRateInfo::output sampleRateInfo(0u, kRate);
- C2StreamChannelCountInfo::output channelCountInfo(0u, mHeader.channels);
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- c2_status_t err = mIntf->config(
- { &sampleRateInfo, &channelCountInfo },
- C2_MAY_BLOCK,
- &failures);
- if (err == OK) {
- work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(sampleRateInfo));
- work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(channelCountInfo));
- } else {
- ALOGE("Config Update failed");
- mSignalledError = true;
- work->result = C2_CORRUPTED;
- return;
- }
}
}
++mInputBufferCount;
+ if (mInputBufferCount == 3) {
+ ALOGI("Configuring decoder: %d Hz, %d channels",
+ kRate, mHeader.channels);
+ C2StreamSampleRateInfo::output sampleRateInfo(0u, kRate);
+ C2StreamChannelCountInfo::output channelCountInfo(0u, mHeader.channels);
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err = mIntf->config(
+ { &sampleRateInfo, &channelCountInfo },
+ C2_MAY_BLOCK,
+ &failures);
+ if (err == OK) {
+ work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(sampleRateInfo));
+ work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(channelCountInfo));
+ } else {
+ ALOGE("Config Update failed");
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ }
fillEmptyWork(work);
if (eos) {
mSignalledOutputEos = true;
diff --git a/media/codec2/components/opus/C2SoftOpusDec.h b/media/codec2/components/opus/C2SoftOpusDec.h
index 92b7426..b0715ac 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.h
+++ b/media/codec2/components/opus/C2SoftOpusDec.h
@@ -24,16 +24,6 @@
namespace android {
-struct OpusHeader {
- int channels;
- int skip_samples;
- int channel_mapping;
- int num_streams;
- int num_coupled;
- int16_t gain_db;
- uint8_t stream_map[8];
-};
-
struct C2SoftOpusDec : public SimpleC2Component {
class IntfImpl;
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
new file mode 100644
index 0000000..a0b2443
--- /dev/null
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2SoftOpusEnc"
+#include <utils/Log.h>
+
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/OpusHeader.h>
+#include "C2SoftOpusEnc.h"
+
+extern "C" {
+ #include <opus.h>
+ #include <opus_multistream.h>
+}
+
+#define DEFAULT_FRAME_DURATION_MS 20
+namespace android {
+
+constexpr char COMPONENT_NAME[] = "c2.android.opus.encoder";
+
+class C2SoftOpusEnc::IntfImpl : public C2InterfaceHelper {
+public:
+ explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+ : C2InterfaceHelper(helper) {
+
+ setDerivedInstance(this);
+
+ addParameter(
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
+ .build());
+
+ addParameter(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+ MEDIA_MIMETYPE_AUDIO_RAW))
+ .build());
+
+ addParameter(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+ MEDIA_MIMETYPE_AUDIO_OPUS))
+ .build());
+
+ addParameter(
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
+ .withDefault(new C2StreamSampleRateInfo::input(0u, 48000))
+ .withFields({C2F(mSampleRate, value).oneOf({
+ 8000, 12000, 16000, 24000, 48000})})
+ .withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
+ .build());
+
+ addParameter(
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
+ .withDefault(new C2StreamChannelCountInfo::input(0u, 1))
+ .withFields({C2F(mChannelCount, value).inRange(1, 8)})
+ .withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
+ .build());
+
+ addParameter(
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 128000))
+ .withFields({C2F(mBitrate, value).inRange(500, 512000)})
+ .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mComplexity, C2_PARAMKEY_COMPLEXITY)
+ .withDefault(new C2StreamComplexityTuning::output(0u, 10))
+ .withFields({C2F(mComplexity, value).inRange(1, 10)})
+ .withSetter(Setter<decltype(*mComplexity)>::NonStrictValueWithNoDeps)
+ .build());
+
+ addParameter(
+ DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+ .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, 3840))
+ .build());
+ }
+
+ uint32_t getSampleRate() const { return mSampleRate->value; }
+ uint32_t getChannelCount() const { return mChannelCount->value; }
+ uint32_t getBitrate() const { return mBitrate->value; }
+ uint32_t getComplexity() const { return mComplexity->value; }
+
+private:
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+ std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
+ std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
+ std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
+};
+
+C2SoftOpusEnc::C2SoftOpusEnc(const char* name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl>& intfImpl)
+ : SimpleC2Component(
+ std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+ mIntf(intfImpl),
+ mOutputBlock(nullptr),
+ mEncoder(nullptr),
+ mInputBufferPcm16(nullptr),
+ mOutIndex(0u) {
+}
+
+C2SoftOpusEnc::~C2SoftOpusEnc() {
+ onRelease();
+}
+
+c2_status_t C2SoftOpusEnc::onInit() {
+ return initEncoder();
+}
+
+c2_status_t C2SoftOpusEnc::configureEncoder() {
+ unsigned char mono_mapping[256] = {0};
+ unsigned char stereo_mapping[256] = {0, 1};
+ unsigned char surround_mapping[256] = {0, 1, 255};
+ mSampleRate = mIntf->getSampleRate();
+ mChannelCount = mIntf->getChannelCount();
+ uint32_t bitrate = mIntf->getBitrate();
+ int complexity = mIntf->getComplexity();
+ mNumSamplesPerFrame = mSampleRate / (1000 / mFrameDurationMs);
+ mNumPcmBytesPerInputFrame =
+ mChannelCount * mNumSamplesPerFrame * sizeof(int16_t);
+ int err = C2_OK;
+
+ unsigned char* mapping;
+ if (mChannelCount < 2) {
+ mapping = mono_mapping;
+ } else if (mChannelCount == 2) {
+ mapping = stereo_mapping;
+ } else {
+ mapping = surround_mapping;
+ }
+
+ if (mEncoder != nullptr) {
+ opus_multistream_encoder_destroy(mEncoder);
+ }
+
+ mEncoder = opus_multistream_encoder_create(mSampleRate, mChannelCount,
+ 1, 1, mapping, OPUS_APPLICATION_AUDIO, &err);
+ if (err) {
+ ALOGE("Could not create libopus encoder. Error code: %i", err);
+ return C2_CORRUPTED;
+ }
+
+ // Complexity
+ if (opus_multistream_encoder_ctl(
+ mEncoder, OPUS_SET_COMPLEXITY(complexity)) != OPUS_OK) {
+ ALOGE("failed to set complexity");
+ return C2_BAD_VALUE;
+ }
+
+ // DTX
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_DTX(0) != OPUS_OK)) {
+ ALOGE("failed to set dtx");
+ return C2_BAD_VALUE;
+ }
+
+ // Application
+ if (opus_multistream_encoder_ctl(mEncoder,
+ OPUS_SET_APPLICATION(OPUS_APPLICATION_AUDIO)) != OPUS_OK) {
+ ALOGE("failed to set application");
+ return C2_BAD_VALUE;
+ }
+
+ // Signal type
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_SIGNAL(OPUS_AUTO)) !=
+ OPUS_OK) {
+ ALOGE("failed to set signal");
+ return C2_BAD_VALUE;
+ }
+
+ // Unconstrained VBR
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_VBR(0) != OPUS_OK)) {
+ ALOGE("failed to set vbr type");
+ return C2_BAD_VALUE;
+ }
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_VBR_CONSTRAINT(0) !=
+ OPUS_OK)) {
+ ALOGE("failed to set vbr constraint");
+ return C2_BAD_VALUE;
+ }
+
+ // Bitrate
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_SET_BITRATE(bitrate)) !=
+ OPUS_OK) {
+ ALOGE("failed to set bitrate");
+ return C2_BAD_VALUE;
+ }
+
+ // Get codecDelay
+ int32_t lookahead;
+ if (opus_multistream_encoder_ctl(mEncoder, OPUS_GET_LOOKAHEAD(&lookahead)) !=
+ OPUS_OK) {
+ ALOGE("failed to get lookahead");
+ return C2_BAD_VALUE;
+ }
+ mCodecDelay = lookahead * 1000000000ll / mSampleRate;
+
+ // Set seek preroll to 80 ms
+ mSeekPreRoll = 80000000;
+ return C2_OK;
+}
+
+c2_status_t C2SoftOpusEnc::initEncoder() {
+ mSignalledEos = false;
+ mSignalledError = false;
+ mHeaderGenerated = false;
+ mIsFirstFrame = true;
+ mEncoderFlushed = false;
+ mBufferAvailable = false;
+ mAnchorTimeStamp = 0ull;
+ mProcessedSamples = 0;
+ mFilledLen = 0;
+ mFrameDurationMs = DEFAULT_FRAME_DURATION_MS;
+ if (!mInputBufferPcm16) {
+ mInputBufferPcm16 =
+ (int16_t*)malloc(kFrameSize * kMaxNumChannels * sizeof(int16_t));
+ }
+ if (!mInputBufferPcm16) return C2_NO_MEMORY;
+
+ /* Default Configurations */
+ c2_status_t status = configureEncoder();
+ return status;
+}
+
+c2_status_t C2SoftOpusEnc::onStop() {
+ mSignalledEos = false;
+ mSignalledError = false;
+ mIsFirstFrame = true;
+ mEncoderFlushed = false;
+ mBufferAvailable = false;
+ mAnchorTimeStamp = 0ull;
+ mProcessedSamples = 0u;
+ mFilledLen = 0;
+ if (mEncoder) {
+ int status = opus_multistream_encoder_ctl(mEncoder, OPUS_RESET_STATE);
+ if (status != OPUS_OK) {
+ ALOGE("OPUS_RESET_STATE failed status = %s", opus_strerror(status));
+ mSignalledError = true;
+ return C2_CORRUPTED;
+ }
+ }
+ if (mOutputBlock) mOutputBlock.reset();
+ mOutputBlock = nullptr;
+
+ return C2_OK;
+}
+
+void C2SoftOpusEnc::onReset() {
+ (void)onStop();
+}
+
+void C2SoftOpusEnc::onRelease() {
+ (void)onStop();
+ if (mInputBufferPcm16) {
+ free(mInputBufferPcm16);
+ mInputBufferPcm16 = nullptr;
+ }
+ if (mEncoder) {
+ opus_multistream_encoder_destroy(mEncoder);
+ mEncoder = nullptr;
+ }
+}
+
+c2_status_t C2SoftOpusEnc::onFlush_sm() {
+ return onStop();
+}
+
+// Drain the encoder to get last frames (if any)
+int C2SoftOpusEnc::drainEncoder(uint8_t* outPtr) {
+ memset((uint8_t *)mInputBufferPcm16 + mFilledLen, 0,
+ (mNumPcmBytesPerInputFrame - mFilledLen));
+ int encodedBytes = opus_multistream_encode(
+ mEncoder, mInputBufferPcm16, mNumSamplesPerFrame, outPtr, kMaxPayload);
+ if (encodedBytes > mOutputBlock->capacity()) {
+ ALOGE("not enough space left to write encoded data, dropping %d bytes",
+ mBytesEncoded);
+ // a fatal error would stop the encoding
+ return -1;
+ }
+ ALOGV("encoded %i Opus bytes from %zu PCM bytes", encodedBytes,
+ mNumPcmBytesPerInputFrame);
+ mEncoderFlushed = true;
+ mFilledLen = 0;
+ return encodedBytes;
+}
+
+void C2SoftOpusEnc::process(const std::unique_ptr<C2Work>& work,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ // Initialize output work
+ work->result = C2_OK;
+ work->workletsProcessed = 1u;
+ work->worklets.front()->output.flags = work->input.flags;
+
+ if (mSignalledError || mSignalledEos) {
+ work->result = C2_BAD_VALUE;
+ return;
+ }
+
+ bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0;
+ C2ReadView rView = mDummyReadView;
+ size_t inOffset = 0u;
+ size_t inSize = 0u;
+ c2_status_t err = C2_OK;
+ if (!work->input.buffers.empty()) {
+ rView =
+ work->input.buffers[0]->data().linearBlocks().front().map().get();
+ inSize = rView.capacity();
+ if (inSize && rView.error()) {
+ ALOGE("read view map failed %d", rView.error());
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ }
+
+ ALOGV("in buffer attr. size %zu timestamp %d frameindex %d, flags %x",
+ inSize, (int)work->input.ordinal.timestamp.peeku(),
+ (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+
+ if (!mEncoder) {
+ if (initEncoder() != C2_OK) {
+ ALOGE("initEncoder failed with status %d", err);
+ work->result = err;
+ mSignalledError = true;
+ return;
+ }
+ }
+ if (mIsFirstFrame && inSize > 0) {
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mIsFirstFrame = false;
+ }
+
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ err = pool->fetchLinearBlock(kMaxPayload, usage, &mOutputBlock);
+ if (err != C2_OK) {
+ ALOGE("fetchLinearBlock for Output failed with status %d", err);
+ work->result = C2_NO_MEMORY;
+ return;
+ }
+
+ C2WriteView wView = mOutputBlock->map().get();
+ if (wView.error()) {
+ ALOGE("write view map failed %d", wView.error());
+ work->result = C2_CORRUPTED;
+ mOutputBlock.reset();
+ return;
+ }
+
+ size_t inPos = 0;
+ size_t processSize = 0;
+ mBytesEncoded = 0;
+ uint64_t outTimeStamp = 0u;
+ std::shared_ptr<C2Buffer> buffer;
+ uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
+ const uint8_t* inPtr = rView.data() + inOffset;
+
+ class FillWork {
+ public:
+ FillWork(uint32_t flags, C2WorkOrdinalStruct ordinal,
+ const std::shared_ptr<C2Buffer> &buffer)
+ : mFlags(flags), mOrdinal(ordinal), mBuffer(buffer) {
+ }
+ ~FillWork() = default;
+
+ void operator()(const std::unique_ptr<C2Work>& work) {
+ work->worklets.front()->output.flags = (C2FrameData::flags_t)mFlags;
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = mOrdinal;
+ work->workletsProcessed = 1u;
+ work->result = C2_OK;
+ if (mBuffer) {
+ work->worklets.front()->output.buffers.push_back(mBuffer);
+ }
+ ALOGV("timestamp = %lld, index = %lld, w/%s buffer",
+ mOrdinal.timestamp.peekll(),
+ mOrdinal.frameIndex.peekll(),
+ mBuffer ? "" : "o");
+ }
+
+ private:
+ const uint32_t mFlags;
+ const C2WorkOrdinalStruct mOrdinal;
+ const std::shared_ptr<C2Buffer> mBuffer;
+ };
+
+ C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+
+ if (!mHeaderGenerated) {
+ uint8_t header[AOPUS_UNIFIED_CSD_MAXSIZE];
+ memset(header, 0, sizeof(header));
+ OpusHeader opusHeader;
+ opusHeader.channels = mChannelCount;
+ opusHeader.num_streams = mChannelCount;
+ opusHeader.num_coupled = 0;
+ opusHeader.channel_mapping = ((mChannelCount > 8) ? 255 : (mChannelCount > 2));
+ opusHeader.gain_db = 0;
+ opusHeader.skip_samples = 0;
+ int headerLen = WriteOpusHeaders(opusHeader, mSampleRate, header,
+ sizeof(header), mCodecDelay, mSeekPreRoll);
+
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(headerLen, 0u);
+ if (!csd) {
+ ALOGE("CSD allocation failed");
+ mSignalledError = true;
+ work->result = C2_NO_MEMORY;
+ return;
+ }
+ ALOGV("put csd, %d bytes", headerLen);
+ memcpy(csd->m.value, header, headerLen);
+ work->worklets.front()->output.configUpdate.push_back(std::move(csd));
+ mHeaderGenerated = true;
+ }
+
+ /*
+ * For buffer size which is not a multiple of mNumPcmBytesPerInputFrame, we will
+ * accumulate the input and keep it. Once the input is filled with expected number
+ * of bytes, we will send it to encoder. mFilledLen manages the bytes of input yet
+ * to be processed. The next call will fill mNumPcmBytesPerInputFrame - mFilledLen
+ * bytes to input and send it to the encoder.
+ */
+ while (inPos < inSize) {
+ const uint8_t* pcmBytes = inPtr + inPos;
+ int filledSamples = mFilledLen / sizeof(int16_t);
+ if ((inPos + (mNumPcmBytesPerInputFrame - mFilledLen)) <= inSize) {
+ processSize = mNumPcmBytesPerInputFrame - mFilledLen;
+ mBufferAvailable = true;
+ } else {
+ processSize = inSize - inPos;
+ mBufferAvailable = false;
+ if (eos) {
+ memset(mInputBufferPcm16 + filledSamples, 0,
+ (mNumPcmBytesPerInputFrame - mFilledLen));
+ mBufferAvailable = true;
+ }
+ }
+ const unsigned nInputSamples = processSize / sizeof(int16_t);
+
+ for (unsigned i = 0; i < nInputSamples; i++) {
+ int32_t data = pcmBytes[2 * i + 1] << 8 | pcmBytes[2 * i];
+ data = ((data & 0xFFFF) ^ 0x8000) - 0x8000;
+ mInputBufferPcm16[i + filledSamples] = data;
+ }
+ inPos += processSize;
+ mFilledLen += processSize;
+ if (!mBufferAvailable) break;
+ uint8_t* outPtr = wView.data() + mBytesEncoded;
+ int encodedBytes =
+ opus_multistream_encode(mEncoder, mInputBufferPcm16,
+ mNumSamplesPerFrame, outPtr, kMaxPayload);
+ ALOGV("encoded %i Opus bytes from %zu PCM bytes", encodedBytes,
+ processSize);
+
+ if (encodedBytes < 0 || encodedBytes > kMaxPayload) {
+ ALOGE("opus_encode failed, encodedBytes : %d", encodedBytes);
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ if (buffer) {
+ outOrdinal.frameIndex = mOutIndex++;
+ outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+ cloneAndSend(
+ inputIndex, work,
+ FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
+ buffer.reset();
+ }
+ if (encodedBytes > 0) {
+ buffer =
+ createLinearBuffer(mOutputBlock, mBytesEncoded, encodedBytes);
+ }
+ mBytesEncoded += encodedBytes;
+ mProcessedSamples += (filledSamples + nInputSamples);
+ outTimeStamp =
+ mProcessedSamples * 1000000ll / mChannelCount / mSampleRate;
+ if ((processSize + mFilledLen) < mNumPcmBytesPerInputFrame)
+ mEncoderFlushed = true;
+ mFilledLen = 0;
+ }
+
+ uint32_t flags = 0;
+ if (eos) {
+ ALOGV("signalled eos");
+ mSignalledEos = true;
+ if (!mEncoderFlushed) {
+ if (buffer) {
+ outOrdinal.frameIndex = mOutIndex++;
+ outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+ cloneAndSend(
+ inputIndex, work,
+ FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
+ buffer.reset();
+ }
+ // drain the encoder for last buffer
+ drainInternal(pool, work);
+ }
+ flags = C2FrameData::FLAG_END_OF_STREAM;
+ }
+ if (buffer) {
+ outOrdinal.frameIndex = mOutIndex++;
+ outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+ FillWork((C2FrameData::flags_t)(flags), outOrdinal, buffer)(work);
+ buffer.reset();
+ }
+ mOutputBlock = nullptr;
+}
+
+c2_status_t C2SoftOpusEnc::drainInternal(
+ const std::shared_ptr<C2BlockPool>& pool,
+ const std::unique_ptr<C2Work>& work) {
+ mBytesEncoded = 0;
+ std::shared_ptr<C2Buffer> buffer = nullptr;
+ C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+ bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0;
+
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+ c2_status_t err = pool->fetchLinearBlock(kMaxPayload, usage, &mOutputBlock);
+ if (err != C2_OK) {
+ ALOGE("fetchLinearBlock for Output failed with status %d", err);
+ return C2_NO_MEMORY;
+ }
+
+ C2WriteView wView = mOutputBlock->map().get();
+ if (wView.error()) {
+ ALOGE("write view map failed %d", wView.error());
+ mOutputBlock.reset();
+ return C2_CORRUPTED;
+ }
+
+ int encBytes = drainEncoder(wView.data());
+ if (encBytes > 0) mBytesEncoded += encBytes;
+ if (mBytesEncoded > 0) {
+ buffer = createLinearBuffer(mOutputBlock, 0, mBytesEncoded);
+ mOutputBlock.reset();
+ }
+ mProcessedSamples += (mNumPcmBytesPerInputFrame / sizeof(int16_t));
+ uint64_t outTimeStamp =
+ mProcessedSamples * 1000000ll / mChannelCount / mSampleRate;
+ outOrdinal.frameIndex = mOutIndex++;
+ outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
+ work->worklets.front()->output.flags =
+ (C2FrameData::flags_t)(eos ? C2FrameData::FLAG_END_OF_STREAM : 0);
+ work->worklets.front()->output.buffers.clear();
+ work->worklets.front()->output.ordinal = outOrdinal;
+ work->workletsProcessed = 1u;
+ work->result = C2_OK;
+ if (buffer) {
+ work->worklets.front()->output.buffers.push_back(buffer);
+ }
+ mOutputBlock = nullptr;
+ return C2_OK;
+}
+
+c2_status_t C2SoftOpusEnc::drain(uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool>& pool) {
+ if (drainMode == NO_DRAIN) {
+ ALOGW("drain with NO_DRAIN: no-op");
+ return C2_OK;
+ }
+ if (drainMode == DRAIN_CHAIN) {
+ ALOGW("DRAIN_CHAIN not supported");
+ return C2_OMITTED;
+ }
+ mIsFirstFrame = true;
+ mAnchorTimeStamp = 0ull;
+ mProcessedSamples = 0u;
+ return drainInternal(pool, nullptr);
+}
+
+class C2SoftOpusEncFactory : public C2ComponentFactory {
+public:
+ C2SoftOpusEncFactory()
+ : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+ GetCodec2PlatformComponentStore()->getParamReflector())) {}
+
+ virtual c2_status_t createComponent(
+ c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ std::function<void(C2Component*)> deleter) override {
+ *component = std::shared_ptr<C2Component>(
+ new C2SoftOpusEnc(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftOpusEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual c2_status_t createInterface(
+ c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ std::function<void(C2ComponentInterface*)> deleter) override {
+ *interface = std::shared_ptr<C2ComponentInterface>(
+ new SimpleInterface<C2SoftOpusEnc::IntfImpl>(
+ COMPONENT_NAME, id,
+ std::make_shared<C2SoftOpusEnc::IntfImpl>(mHelper)),
+ deleter);
+ return C2_OK;
+ }
+
+ virtual ~C2SoftOpusEncFactory() override = default;
+private:
+ std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory* CreateCodec2Factory() {
+ ALOGV("in %s", __func__);
+ return new ::android::C2SoftOpusEncFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory* factory) {
+ ALOGV("in %s", __func__);
+ delete factory;
+}
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.h b/media/codec2/components/opus/C2SoftOpusEnc.h
new file mode 100644
index 0000000..69e5240
--- /dev/null
+++ b/media/codec2/components/opus/C2SoftOpusEnc.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_OPUS_ENC_H_
+#define ANDROID_C2_SOFT_OPUS_ENC_H_
+
+#include <atomic>
+#include <SimpleC2Component.h>
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+struct OpusMSEncoder;
+
+namespace android {
+
+struct C2SoftOpusEnc : public SimpleC2Component {
+ class IntfImpl;
+
+ C2SoftOpusEnc(const char *name, c2_node_id_t id,
+ const std::shared_ptr<IntfImpl> &intfImpl);
+ virtual ~C2SoftOpusEnc();
+
+ // From SimpleC2Component
+ c2_status_t onInit() override;
+ c2_status_t onStop() override;
+ void onReset() override;
+ void onRelease() override;
+ c2_status_t onFlush_sm() override;
+ void process(
+ const std::unique_ptr<C2Work> &work,
+ const std::shared_ptr<C2BlockPool> &pool) override;
+ c2_status_t drain(
+ uint32_t drainMode,
+ const std::shared_ptr<C2BlockPool> &pool) override;
+private:
+ /* OPUS_FRAMESIZE_20_MS */
+ const int kFrameSize = 960;
+ const int kMaxPayload = 4000;
+ const int kMaxNumChannels = 8;
+
+ std::shared_ptr<IntfImpl> mIntf;
+ std::shared_ptr<C2LinearBlock> mOutputBlock;
+
+ OpusMSEncoder* mEncoder;
+ int16_t* mInputBufferPcm16;
+
+ bool mHeaderGenerated;
+ bool mIsFirstFrame;
+ bool mEncoderFlushed;
+ bool mBufferAvailable;
+ bool mSignalledEos;
+ bool mSignalledError;
+ uint32_t mSampleRate;
+ uint32_t mChannelCount;
+ uint32_t mFrameDurationMs;
+ uint64_t mAnchorTimeStamp;
+ uint64_t mProcessedSamples;
+ // Codec delay in ns
+ uint64_t mCodecDelay;
+ // Seek pre-roll in ns
+ uint64_t mSeekPreRoll;
+ int mNumSamplesPerFrame;
+ int mBytesEncoded;
+ int32_t mFilledLen;
+ size_t mNumPcmBytesPerInputFrame;
+ std::atomic_uint64_t mOutIndex;
+ c2_status_t initEncoder();
+ c2_status_t configureEncoder();
+ int drainEncoder(uint8_t* outPtr);
+ c2_status_t drainInternal(const std::shared_ptr<C2BlockPool>& pool,
+ const std::unique_ptr<C2Work>& work);
+
+ C2_DO_NOT_COPY(C2SoftOpusEnc);
+};
+
+} // namespace android
+
+#endif // ANDROID_C2_SOFT_OPUS_ENC_H_
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 5c83481..802caa4 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(8000, 192000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(1, 10000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -98,13 +98,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 48825e4..e7393ee 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -45,44 +45,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_VORBIS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).inRange(8000, 96000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(32000, 500000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 9ba2362..3120f7a 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -215,7 +215,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -700,7 +700,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 155a84f..6509a88 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -633,7 +633,7 @@
std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block);
if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
work->worklets.front()->output.ordinal = work->input.ordinal;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 87ed1a9..5591a49 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -229,26 +229,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef VP9
MEDIA_MIMETYPE_VIDEO_VP9
#else
@@ -257,14 +257,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2048, 2),
C2F(mSize, height).inRange(2, 2048, 2),
@@ -285,7 +285,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -312,8 +312,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
.withSetter(BitrateSetter)
.build());
@@ -416,18 +416,18 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamTemporalLayeringTuning::output> mLayering;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index 1c0e70b..ed730c3 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -66,29 +66,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -97,15 +97,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -116,10 +116,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -203,7 +203,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
uint32_t getBitrate() const { return mBitrate->value; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
@@ -218,13 +218,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
@@ -1067,6 +1067,8 @@
int i_loud_norm;
int i_target_loudness;
unsigned int i_sbr_mode;
+ uint32_t ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
/* Sampling Frequency */
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
@@ -1115,6 +1117,24 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+ if (pv_alloc_ptr == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, nullptr);
diff --git a/media/codec2/core/include/C2Buffer.h b/media/codec2/core/include/C2Buffer.h
index 2997f6e..3d3587c 100644
--- a/media/codec2/core/include/C2Buffer.h
+++ b/media/codec2/core/include/C2Buffer.h
@@ -888,6 +888,7 @@
* \retval C2_OK the operation was successful
* \retval C2_NO_MEMORY not enough memory to complete any required allocation
* \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_BLOCKING the operation is blocked
* \retval C2_REFUSED no permission to complete any required allocation
* \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
* \retval C2_OMITTED this pool does not support linear blocks
@@ -916,6 +917,7 @@
* \retval C2_OK the operation was successful
* \retval C2_NO_MEMORY not enough memory to complete any required allocation
* \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_BLOCKING the operation is blocked
* \retval C2_REFUSED no permission to complete any required allocation
* \retval C2_BAD_VALUE capacity or usage are not supported (invalid) (caller error)
* \retval C2_OMITTED this pool does not support circular blocks
@@ -946,6 +948,7 @@
* \retval C2_OK the operation was successful
* \retval C2_NO_MEMORY not enough memory to complete any required allocation
* \retval C2_TIMED_OUT the operation timed out
+ * \retval C2_BLOCKING the operation is blocked
* \retval C2_REFUSED no permission to complete any required allocation
* \retval C2_BAD_VALUE width, height, format or usage are not supported (invalid) (caller
* error)
@@ -1991,7 +1994,6 @@
GRAPHIC, ///< the buffer contains a single graphic block
GRAPHIC_CHUNKS, ///< the buffer contains one of more graphic blocks
};
- typedef type_t Type; // deprecated
/**
* Gets the type of this buffer (data).
@@ -2039,23 +2041,6 @@
*/
const C2BufferData data() const;
- /**
- * These will still work if used in onDeathNotify.
- */
-#if 0
- inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
- return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
- return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
- return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
- }
-#endif
-
///@name Pre-destroy notification handling
///@{
@@ -2160,8 +2145,6 @@
*/
static std::shared_ptr<C2Buffer> CreateGraphicBuffer(const C2ConstGraphicBlock &block);
-
-
protected:
// no public constructor
explicit C2Buffer(const std::vector<C2ConstLinearBlock> &blocks);
@@ -2170,7 +2153,6 @@
private:
class Impl;
std::shared_ptr<Impl> mImpl;
-// Type _mType;
};
/**
@@ -2197,109 +2179,6 @@
/// @}
-/// \cond INTERNAL
-
-/// \todo These are no longer used
-
-/// \addtogroup linear
-/// @{
-
-/** \deprecated */
-class C2LinearBuffer
- : public C2Buffer, public _C2LinearRangeAspect,
- public std::enable_shared_from_this<C2LinearBuffer> {
-public:
- /** \todo what is this? */
- const C2Handle *handle() const;
-
-protected:
- inline C2LinearBuffer(const C2ConstLinearBlock &block);
-
-private:
- class Impl;
- Impl *mImpl;
-};
-
-class C2ReadCursor;
-
-class C2WriteCursor {
-public:
- uint32_t remaining() const; // remaining data to be read
- void commit(); // commits the current position. discard data before current position
- void reset() const; // resets position to the last committed position
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2ReadCursor slice(uint32_t size) const;
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2WriteCursor reserve(uint32_t size);
- // bool read(T&);
- // bool write(T&);
- C2Fence waitForSpace(uint32_t size);
-};
-
-/// @}
-
-/// \addtogroup graphic
-/// @{
-
-struct C2ColorSpace {
-//public:
- enum Standard {
- BT601,
- BT709,
- BT2020,
- // TODO
- };
-
- enum Range {
- LIMITED,
- FULL,
- // TODO
- };
-
- enum TransferFunction {
- BT709Transfer,
- BT2020Transfer,
- HybridLogGamma2,
- HybridLogGamma4,
- // TODO
- };
-};
-
-/** \deprecated */
-class C2GraphicBuffer : public C2Buffer {
-public:
- // constant attributes
- inline uint32_t width() const { return mWidth; }
- inline uint32_t height() const { return mHeight; }
- inline uint32_t format() const { return mFormat; }
- inline const C2MemoryUsage usage() const { return mUsage; }
-
- // modifiable attributes
-
-
- virtual const C2ColorSpace colorSpace() const = 0;
- // best effort
- virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
- virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
-
- const C2Handle *handle() const;
-
-protected:
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
- C2MemoryUsage mUsage;
-
- class Impl;
- Impl *mImpl;
-};
-
-/// @}
-
-/// \endcond
-
/// @}
#endif // C2BUFFER_H_
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index cf1f6cf..9545c45 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -180,6 +180,7 @@
kParamIndexPictureTypeMask,
kParamIndexPictureType,
+ kParamIndexHdr10PlusMetadata,
/* ------------------------------------ video components ------------------------------------ */
@@ -194,7 +195,6 @@
kParamIndexLayerIndex,
kParamIndexLayerCount,
kParamIndexIntraRefresh,
- kParamIndexHdr10PlusMetadata,
/* ------------------------------------ image components ------------------------------------ */
@@ -240,19 +240,6 @@
kParamIndexTimestampGapAdjustment, // input-surface, struct
kParamIndexSurfaceAllocator, // u32
-
- // deprecated indices due to renaming
- kParamIndexAacStreamFormat = kParamIndexAacPackaging,
- kParamIndexCsd = kParamIndexInitData,
- kParamIndexMaxVideoSizeHint = kParamIndexMaxPictureSize,
- kParamIndexMime = kParamIndexMediaType,
- kParamIndexRequestedInfos = kParamIndexSubscribedParamIndices,
-
-
- // deprecated indices due to removal
- kParamIndexSupportedParams = 0xDEAD0000,
- kParamIndexReadOnlyParams,
- kParamIndexTemporal,
};
}
@@ -337,14 +324,8 @@
// read-only
typedef C2GlobalParam<C2Setting, C2SimpleValueStruct<C2Component::domain_t>, kParamIndexDomain>
C2ComponentDomainSetting;
-typedef C2ComponentDomainSetting C2ComponentDomainInfo; // deprecated
-typedef C2Component::domain_t C2DomainKind; // deprecated
constexpr char C2_PARAMKEY_COMPONENT_DOMAIN[] = "component.domain";
-constexpr C2Component::domain_t C2DomainAudio = C2Component::DOMAIN_AUDIO; // deprecated
-constexpr C2Component::domain_t C2DomainOther = C2Component::DOMAIN_OTHER; // deprecate
-constexpr C2Component::domain_t C2DomainVideo = C2Component::DOMAIN_VIDEO; // deprecate
-
/**
* Component attributes.
*
@@ -359,9 +340,6 @@
C2ComponentAttributesSetting;
constexpr char C2_PARAMKEY_COMPONENT_ATTRIBUTES[] = "component.attributes";
-// deprecated
-typedef C2ComponentAttributesSetting C2ComponentTemporalInfo;
-
/**
* Time stretching.
*
@@ -597,6 +575,9 @@
LEVEL_AVC_5, ///< AVC (H.264) Level 5
LEVEL_AVC_5_1, ///< AVC (H.264) Level 5.1
LEVEL_AVC_5_2, ///< AVC (H.264) Level 5.2
+ LEVEL_AVC_6, ///< AVC (H.264) Level 6
+ LEVEL_AVC_6_1, ///< AVC (H.264) Level 6.1
+ LEVEL_AVC_6_2, ///< AVC (H.264) Level 6.2
// HEVC (H.265) tiers and levels
LEVEL_HEVC_MAIN_1 = _C2_PL_HEVC_BASE, ///< HEVC (H.265) Main Tier Level 1
@@ -638,7 +619,7 @@
LEVEL_VP9_6_1, ///< VP9 Level 6.1
LEVEL_VP9_6_2, ///< VP9 Level 6.2
- // Dolby Vision level
+ // Dolby Vision levels
LEVEL_DV_MAIN_HD_24 = _C2_PL_DV_BASE, ///< Dolby Vision main tier hd24
LEVEL_DV_MAIN_HD_30, ///< Dolby Vision main tier hd30
LEVEL_DV_MAIN_FHD_24, ///< Dolby Vision main tier fhd24
@@ -659,6 +640,7 @@
LEVEL_DV_HIGH_UHD_48, ///< Dolby Vision high tier uhd48
LEVEL_DV_HIGH_UHD_60, ///< Dolby Vision high tier uhd60
+ // AV1 levels
LEVEL_AV1_2 = _C2_PL_AV1_BASE , ///< AV1 Level 2
LEVEL_AV1_2_1, ///< AV1 Level 2.1
LEVEL_AV1_2_2, ///< AV1 Level 2.2
@@ -703,7 +685,6 @@
typedef C2StreamParam<C2Info, C2ProfileLevelStruct, kParamIndexProfileLevel>
C2StreamProfileLevelInfo;
constexpr char C2_PARAMKEY_PROFILE_LEVEL[] = "coded.pl";
-#define C2_PARAMKEY_STREAM_PROFILE_LEVEL C2_PARAMKEY_PROFILE_LEVEL
/**
* Codec-specific initialization data.
@@ -715,9 +696,7 @@
* TODO: define for other codecs.
*/
typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexInitData> C2StreamInitDataInfo;
-typedef C2StreamInitDataInfo C2StreamCsdInfo; // deprecated
constexpr char C2_PARAMKEY_INIT_DATA[] = "coded.init-data";
-#define C2_PARAMKEY_STREAM_INIT_DATA C2_PARAMKEY_INIT_DATA
/**
* Supplemental Data.
@@ -777,11 +756,8 @@
* port media type.
*/
typedef C2PortParam<C2Setting, C2StringValue, kParamIndexMediaType> C2PortMediaTypeSetting;
-typedef C2PortMediaTypeSetting C2PortMimeConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_MEDIA_TYPE[] = "input.media-type";
constexpr char C2_PARAMKEY_OUTPUT_MEDIA_TYPE[] = "output.media-type";
-#define C2_NAME_INPUT_PORT_MIME_SETTING C2_PARAMKEY_INPUT_MEDIA_TYPE
-#define C2_NAME_OUTPUT_PORT_MIME_SETTING C2_PARAMKEY_OUTPUT_MEDIA_TYPE
typedef C2StreamParam<C2Setting, C2StringValue, kParamIndexMediaType> C2StreamMediaTypeSetting;
@@ -804,24 +780,20 @@
*/
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest> C2PortRequestedDelayTuning;
-typedef C2PortRequestedDelayTuning C2PortRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY_REQUEST[] = "input.delay.requested";
constexpr char C2_PARAMKEY_OUTPUT_DELAY_REQUEST[] = "output.delay.requested";
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest>
C2RequestedPipelineDelayTuning;
-typedef C2RequestedPipelineDelayTuning C2ComponentRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY_REQUEST[] = "pipeline-delay.requested";
// read-only
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2PortActualDelayTuning;
-typedef C2PortActualDelayTuning C2PortLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY[] = "input.delay.actual";
constexpr char C2_PARAMKEY_OUTPUT_DELAY[] = "output.delay.actual";
// read-only
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2ActualPipelineDelayTuning;
-typedef C2ActualPipelineDelayTuning C2ComponentLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY[] = "algo.delay.actual";
/**
@@ -871,7 +843,6 @@
*/
// private
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountTuning;
-typedef C2PortStreamCountTuning C2PortStreamCountConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_COUNT[] = "input.stream-count";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_COUNT[] = "output.stream-count";
@@ -981,20 +952,9 @@
typedef C2StreamParam<C2Setting, C2SimpleValueStruct<C2EasyEnum<C2BufferData::type_t>>,
kParamIndexBufferType>
C2StreamBufferTypeSetting;
-
-constexpr C2BufferData::type_t C2FormatAudio = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatCompressed = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatVideo = C2BufferData::GRAPHIC; // deprecated
-typedef C2BufferData::type_t C2FormatKind; // deprecated
-
-typedef C2StreamBufferTypeSetting C2StreamFormatConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE[] = "input.buffers.type";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE[] = "output.buffers.type";
-// deprecated
-#define C2_NAME_INPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE
-#define C2_NAME_OUTPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE
-
/**
* Memory usage.
*
@@ -1003,8 +963,6 @@
typedef C2StreamParam<C2Tuning, C2Uint64Value, kParamIndexUsage> C2StreamUsageTuning;
constexpr char C2_PARAMKEY_INPUT_STREAM_USAGE[] = "input.buffers.usage";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_USAGE[] = "output.buffers.usage";
-// deprecated
-#define C2_NAME_INPUT_STREAM_USAGE_SETTING C2_PARAMKEY_INPUT_STREAM_USAGE
/**
* Picture (video or image frame) size.
@@ -1064,8 +1022,6 @@
constexpr char C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE[] = "input.buffers.max-size";
constexpr char C2_PARAMKEY_OUTPUT_MAX_BUFFER_SIZE[] = "output.buffers.max-size";
-#define C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE
-
/* ---------------------------------------- misc. state ---------------------------------------- */
/**
@@ -1166,9 +1122,7 @@
* Bitrate
*/
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexBitrate> C2StreamBitrateInfo;
-typedef C2StreamBitrateInfo C2BitrateTuning; // deprecated
constexpr char C2_PARAMKEY_BITRATE[] = "coded.bitrate";
-#define C2_NAME_STREAM_BITRATE_SETTING C2_PARAMKEY_BITRATE
/**
* Bitrate mode.
@@ -1257,15 +1211,8 @@
*
* This is used for the output of the video decoder, and the input of the video encoder.
*/
-typedef C2PictureSizeStruct C2VideoSizeStruct; // deprecated
-
typedef C2StreamParam<C2Info, C2PictureSizeStruct, kParamIndexPictureSize> C2StreamPictureSizeInfo;
constexpr char C2_PARAMKEY_PICTURE_SIZE[] = "raw.size";
-#define C2_PARAMKEY_STREAM_PICTURE_SIZE C2_PARAMKEY_PICTURE_SIZE
-#define C2_NAME_STREAM_VIDEO_SIZE_INFO C2_PARAMKEY_PICTURE_SIZE
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamInfo; // deprecated
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamTuning; // deprecated
-#define C2_NAME_STREAM_VIDEO_SIZE_SETTING C2_PARAMKEY_PICTURE_SIZE
/**
* Crop rectangle.
@@ -1340,12 +1287,10 @@
kParamIndexScalingMethod>
C2StreamScalingMethodTuning;
constexpr char C2_PARAMKEY_SCALING_MODE[] = "raw.scaling-method";
-#define C2_PARAMKEY_STREAM_SCALING_MODE C2_PARAMKEY_SCALING_MODE
typedef C2StreamParam<C2Tuning, C2PictureSizeStruct, kParamIndexScaledPictureSize>
C2StreamScaledPictureSizeTuning;
constexpr char C2_PARAMKEY_SCALED_PICTURE_SIZE[] = "raw.scaled-size";
-#define C2_PARAMKEY_STREAM_SCALED_PICTURE_SIZE C2_PARAMKEY_SCALED_PICTURE_SIZE
typedef C2StreamParam<C2Tuning, C2RectStruct, kParamIndexScaledCropRect>
C2StreamScaledCropRectTuning;
@@ -1500,15 +1445,8 @@
MATRIX_BT2020_CONSTANT, ///< Rec.ITU-R BT.2020 constant luminance
MATRIX_VENDOR_START = 0x80, ///< vendor-specific matrix coefficient values start here
MATRIX_OTHER = 0xff, ///< max value, reserved for undefined values
-
- MATRIX_SMPTE240M = MATRIX_240M, // deprecated
- MATRIX_BT2020CONSTANT = MATRIX_BT2020_CONSTANT, // deprecated
)
-constexpr C2Color::matrix_t MATRIX_BT470_6M = MATRIX_FCC47_73_682; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT709_5 = MATRIX_BT709; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT601_6 = MATRIX_BT601; // deprecated
-
struct C2ColorAspectsStruct {
C2Color::range_t range;
C2Color::primaries_t primaries;
@@ -1631,7 +1569,6 @@
*/
typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2StreamFrameRateInfo;
constexpr char C2_PARAMKEY_FRAME_RATE[] = "coded.frame-rate";
-#define C2_NAME_STREAM_FRAME_RATE_SETTING C2_PARAMKEY_FRAME_RATE
typedef C2PortParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2PortFrameRateInfo;
constexpr char C2_PARAMKEY_INPUT_FRAME_RATE[] = "input.frame-rate";
@@ -1664,9 +1601,6 @@
B_FRAME = (1 << 3), ///< backward predicted (out-of-order) frame
)
-typedef C2Config::picture_type_t C2PictureTypeMask; // deprecated
-constexpr C2Config::picture_type_t C2PictureTypeKeyFrame = C2Config::SYNC_FRAME; // deprecated
-
/**
* Allowed picture types.
*/
@@ -1746,8 +1680,6 @@
typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexSyncFrameInterval>
C2StreamSyncFrameIntervalTuning;
constexpr char C2_PARAMKEY_SYNC_FRAME_INTERVAL[] = "coding.sync-frame-interval";
-// deprecated
-#define C2_PARAMKEY_SYNC_FRAME_PERIOD C2_PARAMKEY_SYNC_FRAME_INTERVAL
/**
* Temporal layering
@@ -1881,8 +1813,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexSampleRate> C2StreamSampleRateInfo;
constexpr char C2_PARAMKEY_SAMPLE_RATE[] = "raw.sample-rate";
constexpr char C2_PARAMKEY_CODED_SAMPLE_RATE[] = "coded.sample-rate";
-// deprecated
-#define C2_NAME_STREAM_SAMPLE_RATE_SETTING C2_PARAMKEY_SAMPLE_RATE
/**
* Channel count.
@@ -1890,8 +1820,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexChannelCount> C2StreamChannelCountInfo;
constexpr char C2_PARAMKEY_CHANNEL_COUNT[] = "raw.channel-count";
constexpr char C2_PARAMKEY_CODED_CHANNEL_COUNT[] = "coded.channel-count";
-// deprecated
-#define C2_NAME_STREAM_CHANNEL_COUNT_SETTING C2_PARAMKEY_CHANNEL_COUNT
/**
* Max channel count. Used to limit the number of coded or decoded channels.
@@ -2001,16 +1929,10 @@
AAC_PACKAGING_ADTS
)
-typedef C2Config::aac_packaging_t C2AacStreamFormatKind; // deprecated
-// deprecated
-constexpr C2Config::aac_packaging_t C2AacStreamFormatRaw = C2Config::AAC_PACKAGING_RAW;
-constexpr C2Config::aac_packaging_t C2AacStreamFormatAdts = C2Config::AAC_PACKAGING_ADTS;
-
typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2EasyEnum<C2Config::aac_packaging_t>>,
kParamIndexAacPackaging> C2StreamAacPackagingInfo;
typedef C2StreamAacPackagingInfo C2StreamAacFormatInfo;
constexpr char C2_PARAMKEY_AAC_PACKAGING[] = "coded.aac-packaging";
-#define C2_NAME_STREAM_AAC_FORMAT_SETTING C2_PARAMKEY_AAC_PACKAGING
/* ================================ PLATFORM-DEFINED PARAMETERS ================================ */
@@ -2130,7 +2052,6 @@
typedef C2GlobalParam<C2Tuning, C2EasyBoolValue, kParamIndexInputSurfaceEos>
C2InputSurfaceEosTuning;
constexpr char C2_PARAMKEY_INPUT_SURFACE_EOS[] = "input-surface.eos";
-#define C2_NAME_INPUT_SURFACE_EOS_TUNING C2_PARAMKEY_INPUT_SURFACE_EOS
/**
* Start/suspend/resume/stop controls and timestamps for input surface.
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index efc5c89..d264bf3 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -1012,15 +1012,6 @@
_mNamedValues(_NamedValuesGetter<B>::getNamedValues()),
_mFieldId(offset) {}
-/*
- template<typename T, typename B=typename std::remove_extent<T>::type>
- inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
- : _mType(this->GetType((B*)nullptr)),
- _mExtent(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mFieldId(offset) {}
-*/
-
/// \deprecated
template<typename T, typename S, class B=typename std::remove_extent<T>::type>
inline C2FieldDescriptor(S*, T S::* field, const char *name)
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index c5ad6a0..d0296a5 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -8,8 +8,10 @@
srcs: [
"Component.cpp",
+ "ComponentInterface.cpp",
"ComponentStore.cpp",
"Configurable.cpp",
+ "InputBufferManager.cpp",
"InputSurface.cpp",
"InputSurfaceConnection.cpp",
"types.cpp",
diff --git a/media/codec2/hidl/1.0/utils/Component.cpp b/media/codec2/hidl/1.0/utils/Component.cpp
index 5ae1972..0473b57 100644
--- a/media/codec2/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hidl/1.0/utils/Component.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,11 +18,11 @@
#define LOG_TAG "Codec2-Component"
#include <android-base/logging.h>
-#include <C2PlatformSupport.h>
#include <codec2/hidl/1.0/Component.h>
#include <codec2/hidl/1.0/ComponentStore.h>
-#include <codec2/hidl/1.0/types.h>
+#include <codec2/hidl/1.0/InputBufferManager.h>
+#include <android/hardware/media/c2/1.0/IInputSink.h>
#include <hidl/HidlBinderSupport.h>
#include <utils/Timers.h>
@@ -42,281 +42,6 @@
using namespace ::android;
-namespace /* unnamed */ {
-
-// Implementation of ConfigurableC2Intf based on C2ComponentInterface
-struct CompIntf : public ConfigurableC2Intf {
- CompIntf(const std::shared_ptr<C2ComponentInterface>& intf) :
- ConfigurableC2Intf(intf->getName()),
- mIntf(intf) {
- }
-
- virtual c2_status_t config(
- const std::vector<C2Param*>& params,
- c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures
- ) override {
- ALOGV("config");
- return mIntf->config_vb(params, mayBlock, failures);
- }
-
- virtual c2_status_t query(
- const std::vector<C2Param::Index>& indices,
- c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2Param>>* const params
- ) const override {
- ALOGV("query");
- return mIntf->query_vb({}, indices, mayBlock, params);
- }
-
- virtual c2_status_t querySupportedParams(
- std::vector<std::shared_ptr<C2ParamDescriptor>>* const params
- ) const override {
- ALOGV("querySupportedParams");
- return mIntf->querySupportedParams_nb(params);
- }
-
- virtual c2_status_t querySupportedValues(
- std::vector<C2FieldSupportedValuesQuery>& fields,
- c2_blocking_t mayBlock) const override {
- ALOGV("querySupportedValues");
- return mIntf->querySupportedValues_vb(fields, mayBlock);
- }
-
-protected:
- std::shared_ptr<C2ComponentInterface> mIntf;
-};
-
-} // unnamed namespace
-
-// InputBufferManager
-// ==================
-//
-// InputBufferManager presents a way to track and untrack input buffers in this
-// (codec) process and send a notification to a listener, possibly in a
-// different process, when a tracked buffer no longer has any references in this
-// process. (In fact, this class would work for listeners in the same process
-// too, but the optimization discussed below will not be beneficial.)
-//
-// InputBufferManager holds a collection of records representing tracked buffers
-// and their callback listeners. Conceptually, one record is a triple (listener,
-// frameIndex, bufferIndex) where
-//
-// - (frameIndex, bufferIndex) is a pair of indices used to identify the buffer.
-// - listener is of type IComponentListener. Its onFramesRendered() function
-// will be called after the associated buffer dies. The argument of
-// onFramesRendered() is a list of RenderedFrame objects, each of which has
-// the following members:
-//
-// uint64_t bufferQueueId
-// int32_t slotId
-// int64_t timestampNs
-//
-// When a tracked buffer associated to the triple (listener, frameIndex,
-// bufferIndex) goes out of scope, listener->onFramesRendered() will be called
-// with a RenderedFrame object whose members are set as follows:
-//
-// bufferQueueId = frameIndex
-// slotId = ~bufferIndex
-// timestampNs = systemTime() at the time of notification
-//
-// The reason for the bitwise negation of bufferIndex is that onFramesRendered()
-// may be used for a different purpose when slotId is non-negative (which is a
-// more general use case).
-//
-// IPC Optimization
-// ----------------
-//
-// Since onFramesRendered() generally is an IPC call, InputBufferManager tries
-// not to call it too often. There is a mechanism to guarantee that any two
-// calls to the same listener are at least kNotificationPeriodNs nanoseconds
-// apart.
-//
-struct InputBufferManager {
- // The minimum time period between IPC calls to notify the client about the
- // destruction of input buffers.
- static constexpr nsecs_t kNotificationPeriodNs = 1000000;
-
- // Track all buffers in a C2FrameData object.
- //
- // input (C2FrameData) has the following two members that are of interest:
- //
- // C2WorkOrdinal ordinal
- // vector<shared_ptr<C2Buffer>> buffers
- //
- // Calling registerFrameData(listener, input) will register multiple
- // triples (, frameIndex, bufferIndex) where frameIndex is equal to
- // input.ordinal.frameIndex and bufferIndex runs through the indices of
- // input.buffers such that input.buffers[bufferIndex] is not null.
- //
- // This should be called from queue().
- static void registerFrameData(
- const sp<IComponentListener>& listener,
- const C2FrameData& input);
-
- // Untrack all buffers in a C2FrameData object.
- //
- // Calling unregisterFrameData(listener, input) will unregister and remove
- // pending notifications for all triples (l, fi, bufferIndex) such that
- // l = listener and fi = input.ordinal.frameIndex.
- //
- // This should be called from onWorkDone() and flush().
- static void unregisterFrameData(
- const wp<IComponentListener>& listener,
- const C2FrameData& input);
-
- // Untrack all buffers associated to a given listener.
- //
- // Calling unregisterFrameData(listener) will unregister and remove
- // pending notifications for all triples (l, frameIndex, bufferIndex) such
- // that l = listener.
- //
- // This should be called when the component cleans up all input buffers,
- // i.e., when reset(), release(), stop() or ~Component() is called.
- static void unregisterFrameData(
- const wp<IComponentListener>& listener);
-
-private:
- void _registerFrameData(
- const sp<IComponentListener>& listener,
- const C2FrameData& input);
- void _unregisterFrameData(
- const wp<IComponentListener>& listener,
- const C2FrameData& input);
- void _unregisterFrameData(
- const wp<IComponentListener>& listener);
-
- // The callback function tied to C2Buffer objects.
- //
- // Note: This function assumes that sInstance is the only instance of this
- // class.
- static void onBufferDestroyed(const C2Buffer* buf, void* arg);
- void _onBufferDestroyed(const C2Buffer* buf, void* arg);
-
- // Comparison operator for weak pointers.
- struct CompareWeakComponentListener {
- constexpr bool operator()(
- const wp<IComponentListener>& x,
- const wp<IComponentListener>& y) const {
- return x.get_refs() < y.get_refs();
- }
- };
-
- // Persistent data to be passed as "arg" in onBufferDestroyed().
- // This is essentially the triple (listener, frameIndex, bufferIndex) plus a
- // weak pointer to the C2Buffer object.
- //
- // Note that the "key" is bufferIndex according to operator<(). This is
- // designed to work with TrackedBuffersMap defined below.
- struct TrackedBuffer {
- wp<IComponentListener> listener;
- uint64_t frameIndex;
- size_t bufferIndex;
- std::weak_ptr<C2Buffer> buffer;
- TrackedBuffer(const wp<IComponentListener>& listener,
- uint64_t frameIndex,
- size_t bufferIndex,
- const std::shared_ptr<C2Buffer>& buffer)
- : listener(listener),
- frameIndex(frameIndex),
- bufferIndex(bufferIndex),
- buffer(buffer) {}
- TrackedBuffer(const TrackedBuffer&) = default;
- bool operator<(const TrackedBuffer& other) const {
- return bufferIndex < other.bufferIndex;
- }
- };
-
- // Map: listener -> frameIndex -> set<TrackedBuffer>.
- // Essentially, this is used to store triples (listener, frameIndex,
- // bufferIndex) that's searchable by listener and (listener, frameIndex).
- // However, the value of the innermost map is TrackedBuffer, which also
- // contains an extra copy of listener and frameIndex. This is needed
- // because onBufferDestroyed() needs to know listener and frameIndex too.
- typedef std::map<wp<IComponentListener>,
- std::map<uint64_t,
- std::set<TrackedBuffer>>,
- CompareWeakComponentListener> TrackedBuffersMap;
-
- // Storage for pending (unsent) death notifications for one listener.
- // Each pair in member named "indices" are (frameIndex, bufferIndex) from
- // the (listener, frameIndex, bufferIndex) triple.
- struct DeathNotifications {
-
- // The number of pending notifications for this listener.
- // count may be 0, in which case the DeathNotifications object will
- // remain valid for only a small period (kNotificationPeriodNs
- // nanoseconds).
- size_t count;
-
- // The timestamp of the most recent callback on this listener. This is
- // used to guarantee that callbacks do not occur too frequently, and
- // also to trigger expiration of a DeathNotifications object that has
- // count = 0.
- nsecs_t lastSentNs;
-
- // Map: frameIndex -> vector of bufferIndices
- // This is essentially a collection of (framdeIndex, bufferIndex).
- std::map<uint64_t, std::vector<size_t>> indices;
-
- DeathNotifications()
- : count(0),
- lastSentNs(systemTime() - kNotificationPeriodNs),
- indices() {}
- };
-
- // Mutex for the management of all input buffers.
- std::mutex mMutex;
-
- // Tracked input buffers.
- TrackedBuffersMap mTrackedBuffersMap;
-
- // Death notifications to be sent.
- //
- // A DeathNotifications object is associated to each listener. An entry in
- // this map will be removed if its associated DeathNotifications has count =
- // 0 and lastSentNs < systemTime() - kNotificationPeriodNs.
- std::map<wp<IComponentListener>, DeathNotifications> mDeathNotifications;
-
- // Condition variable signaled when an entry is added to mDeathNotifications.
- std::condition_variable mOnBufferDestroyed;
-
- // Notify the clients about buffer destructions.
- // Return false if all destructions have been notified.
- // Return true and set timeToRetry to the duration to wait for before
- // retrying if some destructions have not been notified.
- bool processNotifications(nsecs_t* timeToRetryNs);
-
- // Main function for the input buffer manager thread.
- void main();
-
- // The thread that manages notifications.
- //
- // Note: This variable is declared last so its initialization will happen
- // after all other member variables have been initialized.
- std::thread mMainThread;
-
- // Private constructor.
- InputBufferManager();
-
- // The only instance of this class.
- static InputBufferManager& getInstance();
-
-};
-
-// ComponentInterface
-ComponentInterface::ComponentInterface(
- const std::shared_ptr<C2ComponentInterface>& intf,
- const sp<ComponentStore>& store) :
- Configurable(new CachedConfigurable(std::make_unique<CompIntf>(intf))),
- mInterface(intf) {
- mInit = init(store.get());
-}
-
-c2_status_t ComponentInterface::status() const {
- return mInit;
-}
-
// ComponentListener wrapper
struct Component::Listener : public C2Component::Listener {
@@ -328,12 +53,12 @@
virtual void onError_nb(
std::weak_ptr<C2Component> /* c2component */,
uint32_t errorCode) override {
- ALOGV("onError");
sp<IComponentListener> listener = mListener.promote();
if (listener) {
Return<void> transStatus = listener->onError(Status::OK, errorCode);
if (!transStatus.isOk()) {
- ALOGE("onError -- transaction failed.");
+ LOG(ERROR) << "Component::Listener::onError_nb -- "
+ << "transaction failed.";
}
}
}
@@ -342,7 +67,6 @@
std::weak_ptr<C2Component> /* c2component */,
std::vector<std::shared_ptr<C2SettingResult>> c2settingResult
) override {
- ALOGV("onTripped");
sp<IComponentListener> listener = mListener.promote();
if (listener) {
hidl_vec<SettingResult> settingResults(c2settingResult.size());
@@ -350,8 +74,7 @@
for (const std::shared_ptr<C2SettingResult> &c2result :
c2settingResult) {
if (c2result) {
- if (objcpy(&settingResults[ix++], *c2result) !=
- Status::OK) {
+ if (!objcpy(&settingResults[ix++], *c2result)) {
break;
}
}
@@ -359,7 +82,8 @@
settingResults.resize(ix);
Return<void> transStatus = listener->onTripped(settingResults);
if (!transStatus.isOk()) {
- ALOGE("onTripped -- transaction failed.");
+ LOG(ERROR) << "Component::Listener::onTripped_nb -- "
+ << "transaction failed.";
}
}
}
@@ -367,7 +91,6 @@
virtual void onWorkDone_nb(
std::weak_ptr<C2Component> /* c2component */,
std::list<std::unique_ptr<C2Work>> c2workItems) override {
- ALOGV("onWorkDone");
for (const std::unique_ptr<C2Work>& work : c2workItems) {
if (work) {
if (work->worklets.empty()
@@ -385,15 +108,16 @@
WorkBundle workBundle;
sp<Component> strongComponent = mComponent.promote();
- if (objcpy(&workBundle, c2workItems, strongComponent ?
- &strongComponent->mBufferPoolSender : nullptr)
- != Status::OK) {
- ALOGE("onWorkDone() received corrupted work items.");
+ if (!objcpy(&workBundle, c2workItems, strongComponent ?
+ &strongComponent->mBufferPoolSender : nullptr)) {
+ LOG(ERROR) << "Component::Listener::onWorkDone_nb -- "
+ << "received corrupted work items.";
return;
}
Return<void> transStatus = listener->onWorkDone(workBundle);
if (!transStatus.isOk()) {
- ALOGE("onWorkDone -- transaction failed.");
+ LOG(ERROR) << "Component::Listener::onWorkDone_nb -- "
+ << "transaction failed.";
return;
}
yieldBufferQueueBlocks(c2workItems, true);
@@ -405,23 +129,86 @@
wp<IComponentListener> mListener;
};
+// Component::Sink
+struct Component::Sink : public IInputSink {
+ std::shared_ptr<Component> mComponent;
+ sp<IConfigurable> mConfigurable;
+
+ virtual Return<Status> queue(const WorkBundle& workBundle) override {
+ return mComponent->queue(workBundle);
+ }
+
+ virtual Return<sp<IConfigurable>> getConfigurable() override {
+ return mConfigurable;
+ }
+
+ Sink(const std::shared_ptr<Component>& component);
+ virtual ~Sink() override;
+
+ // Process-wide map: Component::Sink -> C2Component.
+ static std::mutex sSink2ComponentMutex;
+ static std::map<IInputSink*, std::weak_ptr<C2Component>> sSink2Component;
+
+ static std::shared_ptr<C2Component> findLocalComponent(
+ const sp<IInputSink>& sink);
+};
+
+std::mutex
+ Component::Sink::sSink2ComponentMutex{};
+std::map<IInputSink*, std::weak_ptr<C2Component>>
+ Component::Sink::sSink2Component{};
+
+Component::Sink::Sink(const std::shared_ptr<Component>& component)
+ : mComponent{component},
+ mConfigurable{[&component]() -> sp<IConfigurable> {
+ Return<sp<IComponentInterface>> ret1 = component->getInterface();
+ if (!ret1.isOk()) {
+ LOG(ERROR) << "Sink::Sink -- component's transaction failed.";
+ return nullptr;
+ }
+ Return<sp<IConfigurable>> ret2 =
+ static_cast<sp<IComponentInterface>>(ret1)->
+ getConfigurable();
+ if (!ret2.isOk()) {
+ LOG(ERROR) << "Sink::Sink -- interface's transaction failed.";
+ return nullptr;
+ }
+ return static_cast<sp<IConfigurable>>(ret2);
+ }()} {
+ std::lock_guard<std::mutex> lock(sSink2ComponentMutex);
+ sSink2Component.emplace(this, component->mComponent);
+}
+
+Component::Sink::~Sink() {
+ std::lock_guard<std::mutex> lock(sSink2ComponentMutex);
+ sSink2Component.erase(this);
+}
+
+std::shared_ptr<C2Component> Component::Sink::findLocalComponent(
+ const sp<IInputSink>& sink) {
+ std::lock_guard<std::mutex> lock(sSink2ComponentMutex);
+ auto i = sSink2Component.find(sink.get());
+ if (i == sSink2Component.end()) {
+ return nullptr;
+ }
+ return i->second.lock();
+}
+
// Component
Component::Component(
const std::shared_ptr<C2Component>& component,
const sp<IComponentListener>& listener,
const sp<ComponentStore>& store,
const sp<::android::hardware::media::bufferpool::V2_0::
- IClientManager>& clientPoolManager) :
- Configurable(new CachedConfigurable(
- std::make_unique<CompIntf>(component->intf()))),
- mComponent(component),
- mInterface(component->intf()),
- mListener(listener),
- mStore(store),
- mBufferPoolSender(clientPoolManager) {
+ IClientManager>& clientPoolManager)
+ : mComponent{component},
+ mInterface{new ComponentInterface(component->intf(), store.get())},
+ mListener{listener},
+ mStore{store},
+ mBufferPoolSender{clientPoolManager} {
// Retrieve supported parameters from store
// TODO: We could cache this per component/interface type
- mInit = init(store.get());
+ mInit = mInterface->status();
}
c2_status_t Component::status() const {
@@ -430,11 +217,9 @@
// Methods from ::android::hardware::media::c2::V1_0::IComponent
Return<Status> Component::queue(const WorkBundle& workBundle) {
- ALOGV("queue -- converting input");
std::list<std::unique_ptr<C2Work>> c2works;
- if (objcpy(&c2works, workBundle) != C2_OK) {
- ALOGV("queue -- corrupted");
+ if (!objcpy(&c2works, workBundle)) {
return Status::CORRUPTED;
}
@@ -446,13 +231,11 @@
}
}
- ALOGV("queue -- calling");
return static_cast<Status>(mComponent->queue_nb(&c2works));
}
Return<void> Component::flush(flush_cb _hidl_cb) {
std::list<std::unique_ptr<C2Work>> c2flushedWorks;
- ALOGV("flush -- calling");
c2_status_t c2res = mComponent->flush_sm(
C2Component::FLUSH_COMPONENT,
&c2flushedWorks);
@@ -473,8 +256,9 @@
WorkBundle flushedWorkBundle;
Status res = static_cast<Status>(c2res);
if (c2res == C2_OK) {
- ALOGV("flush -- converting output");
- res = objcpy(&flushedWorkBundle, c2flushedWorks, &mBufferPoolSender);
+ if (!objcpy(&flushedWorkBundle, c2flushedWorks, &mBufferPoolSender)) {
+ res = Status::CORRUPTED;
+ }
}
_hidl_cb(res, flushedWorkBundle);
yieldBufferQueueBlocks(c2flushedWorks, true);
@@ -482,7 +266,6 @@
}
Return<Status> Component::drain(bool withEos) {
- ALOGV("drain");
return static_cast<Status>(mComponent->drain_nb(withEos ?
C2Component::DRAIN_COMPONENT_WITH_EOS :
C2Component::DRAIN_COMPONENT_NO_EOS));
@@ -512,14 +295,39 @@
return Status::OK;
}
-Return<Status> Component::connectToOmxInputSurface(
+Return<void> Component::connectToInputSurface(
+ const sp<IInputSurface>& inputSurface,
+ connectToInputSurface_cb _hidl_cb) {
+ sp<Sink> sink;
+ {
+ std::lock_guard<std::mutex> lock(mSinkMutex);
+ if (!mSink) {
+ mSink = new Sink(shared_from_this());
+ }
+ sink = mSink;
+ }
+ Status status;
+ sp<IInputSurfaceConnection> connection;
+ auto transStatus = inputSurface->connect(sink,
+ [&status, &connection](Status s,
+ const sp<IInputSurfaceConnection>& c) {
+ status = s;
+ connection = c;
+ }
+ );
+ _hidl_cb(status, connection);
+ return Void();
+}
+
+Return<void> Component::connectToOmxInputSurface(
const sp<HGraphicBufferProducer>& producer,
const sp<::android::hardware::media::omx::V1_0::
- IGraphicBufferSource>& source) {
- // TODO implement
+ IGraphicBufferSource>& source,
+ connectToOmxInputSurface_cb _hidl_cb) {
(void)producer;
(void)source;
- return Status::OMITTED;
+ (void)_hidl_cb;
+ return Void();
}
Return<Status> Component::disconnectFromInputSurface() {
@@ -530,11 +338,12 @@
namespace /* unnamed */ {
struct BlockPoolIntf : public ConfigurableC2Intf {
- BlockPoolIntf(const std::shared_ptr<C2BlockPool>& pool) :
- ConfigurableC2Intf("C2BlockPool:" +
- (pool ? std::to_string(pool->getLocalId()) :
- "null")),
- mPool(pool) {
+ BlockPoolIntf(const std::shared_ptr<C2BlockPool>& pool)
+ : ConfigurableC2Intf{
+ "C2BlockPool:" +
+ (pool ? std::to_string(pool->getLocalId()) : "null"),
+ 0},
+ mPool{pool} {
}
virtual c2_status_t config(
@@ -613,18 +422,15 @@
}
Return<Status> Component::start() {
- ALOGV("start");
return static_cast<Status>(mComponent->start());
}
Return<Status> Component::stop() {
- ALOGV("stop");
InputBufferManager::unregisterFrameData(mListener);
return static_cast<Status>(mComponent->stop());
}
Return<Status> Component::reset() {
- ALOGV("reset");
Status status = static_cast<Status>(mComponent->reset());
{
std::lock_guard<std::mutex> lock(mBlockPoolsMutex);
@@ -635,7 +441,6 @@
}
Return<Status> Component::release() {
- ALOGV("release");
Status status = static_cast<Status>(mComponent->release());
{
std::lock_guard<std::mutex> lock(mBlockPoolsMutex);
@@ -645,8 +450,13 @@
return status;
}
-void Component::setLocalId(const Component::LocalId& localId) {
- mLocalId = localId;
+Return<sp<IComponentInterface>> Component::getInterface() {
+ return sp<IComponentInterface>(mInterface);
+}
+
+std::shared_ptr<C2Component> Component::findLocalComponent(
+ const sp<IInputSink>& sink) {
+ return Component::Sink::findLocalComponent(sink);
}
void Component::initListener(const sp<Component>& self) {
@@ -660,395 +470,7 @@
Component::~Component() {
InputBufferManager::unregisterFrameData(mListener);
- mStore->reportComponentDeath(mLocalId);
-}
-
-Component::InterfaceKey::InterfaceKey(const sp<IComponent>& component) {
- isRemote = component->isRemote();
- if (isRemote) {
- remote = ::android::hardware::toBinder(component);
- } else {
- local = component;
- }
-}
-
-// InputBufferManager implementation
-
-constexpr nsecs_t InputBufferManager::kNotificationPeriodNs;
-
-void InputBufferManager::registerFrameData(
- const sp<IComponentListener>& listener,
- const C2FrameData& input) {
- getInstance()._registerFrameData(listener, input);
-}
-
-void InputBufferManager::unregisterFrameData(
- const wp<IComponentListener>& listener,
- const C2FrameData& input) {
- getInstance()._unregisterFrameData(listener, input);
-}
-
-void InputBufferManager::unregisterFrameData(
- const wp<IComponentListener>& listener) {
- getInstance()._unregisterFrameData(listener);
-}
-
-void InputBufferManager::_registerFrameData(
- const sp<IComponentListener>& listener,
- const C2FrameData& input) {
- uint64_t frameIndex = input.ordinal.frameIndex.peeku();
- ALOGV("InputBufferManager::_registerFrameData called "
- "(listener @ %p, frameIndex = %llu)",
- listener.get(),
- static_cast<long long unsigned>(frameIndex));
- std::lock_guard<std::mutex> lock(mMutex);
-
- std::set<TrackedBuffer> &bufferIds =
- mTrackedBuffersMap[listener][frameIndex];
-
- for (size_t i = 0; i < input.buffers.size(); ++i) {
- if (!input.buffers[i]) {
- ALOGV("InputBufferManager::_registerFrameData: "
- "Input buffer at index %zu is null", i);
- continue;
- }
- const TrackedBuffer &bufferId =
- *bufferIds.emplace(listener, frameIndex, i, input.buffers[i]).
- first;
-
- c2_status_t status = input.buffers[i]->registerOnDestroyNotify(
- onBufferDestroyed,
- const_cast<void*>(reinterpret_cast<const void*>(&bufferId)));
- if (status != C2_OK) {
- ALOGD("InputBufferManager: registerOnDestroyNotify failed "
- "(listener @ %p, frameIndex = %llu, bufferIndex = %zu) "
- "=> %s (%d)",
- listener.get(),
- static_cast<unsigned long long>(frameIndex),
- i,
- asString(status), static_cast<int>(status));
- }
- }
-
- mDeathNotifications.emplace(listener, DeathNotifications());
-}
-
-// Remove a pair (listener, frameIndex) from mTrackedBuffersMap and
-// mDeathNotifications. This implies all bufferIndices are removed.
-//
-// This is called from onWorkDone() and flush().
-void InputBufferManager::_unregisterFrameData(
- const wp<IComponentListener>& listener,
- const C2FrameData& input) {
- uint64_t frameIndex = input.ordinal.frameIndex.peeku();
- ALOGV("InputBufferManager::_unregisterFrameData called "
- "(listener @ %p, frameIndex = %llu)",
- listener.unsafe_get(),
- static_cast<long long unsigned>(frameIndex));
- std::lock_guard<std::mutex> lock(mMutex);
-
- auto findListener = mTrackedBuffersMap.find(listener);
- if (findListener != mTrackedBuffersMap.end()) {
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
- = findListener->second;
- auto findFrameIndex = frameIndex2BufferIds.find(frameIndex);
- if (findFrameIndex != frameIndex2BufferIds.end()) {
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- for (const TrackedBuffer& bufferId : bufferIds) {
- std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
- if (buffer) {
- c2_status_t status = buffer->unregisterOnDestroyNotify(
- onBufferDestroyed,
- const_cast<void*>(
- reinterpret_cast<const void*>(&bufferId)));
- if (status != C2_OK) {
- ALOGD("InputBufferManager: "
- "unregisterOnDestroyNotify failed "
- "(listener @ %p, "
- "frameIndex = %llu, "
- "bufferIndex = %zu) "
- "=> %s (%d)",
- bufferId.listener.unsafe_get(),
- static_cast<unsigned long long>(
- bufferId.frameIndex),
- bufferId.bufferIndex,
- asString(status), static_cast<int>(status));
- }
- }
- }
-
- frameIndex2BufferIds.erase(findFrameIndex);
- if (frameIndex2BufferIds.empty()) {
- mTrackedBuffersMap.erase(findListener);
- }
- }
- }
-
- auto findListenerD = mDeathNotifications.find(listener);
- if (findListenerD != mDeathNotifications.end()) {
- DeathNotifications &deathNotifications = findListenerD->second;
- auto findFrameIndex = deathNotifications.indices.find(frameIndex);
- if (findFrameIndex != deathNotifications.indices.end()) {
- std::vector<size_t> &bufferIndices = findFrameIndex->second;
- deathNotifications.count -= bufferIndices.size();
- deathNotifications.indices.erase(findFrameIndex);
- }
- }
-}
-
-// Remove listener from mTrackedBuffersMap and mDeathNotifications. This implies
-// all frameIndices and bufferIndices are removed.
-//
-// This is called when the component cleans up all input buffers, i.e., when
-// reset(), release(), stop() or ~Component() is called.
-void InputBufferManager::_unregisterFrameData(
- const wp<IComponentListener>& listener) {
- ALOGV("InputBufferManager::_unregisterFrameData called (listener @ %p)",
- listener.unsafe_get());
- std::lock_guard<std::mutex> lock(mMutex);
-
- auto findListener = mTrackedBuffersMap.find(listener);
- if (findListener != mTrackedBuffersMap.end()) {
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds =
- findListener->second;
- for (auto findFrameIndex = frameIndex2BufferIds.begin();
- findFrameIndex != frameIndex2BufferIds.end();
- ++findFrameIndex) {
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- for (const TrackedBuffer& bufferId : bufferIds) {
- std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
- if (buffer) {
- c2_status_t status = buffer->unregisterOnDestroyNotify(
- onBufferDestroyed,
- const_cast<void*>(
- reinterpret_cast<const void*>(&bufferId)));
- if (status != C2_OK) {
- ALOGD("InputBufferManager: "
- "unregisterOnDestroyNotify failed "
- "(listener @ %p, "
- "frameIndex = %llu, "
- "bufferIndex = %zu) "
- "=> %s (%d)",
- bufferId.listener.unsafe_get(),
- static_cast<unsigned long long>(bufferId.frameIndex),
- bufferId.bufferIndex,
- asString(status), static_cast<int>(status));
- }
- }
- }
- }
- mTrackedBuffersMap.erase(findListener);
- }
-
- mDeathNotifications.erase(listener);
-}
-
-// Move a buffer from mTrackedBuffersMap to mDeathNotifications.
-// This is called when a registered C2Buffer object is destroyed.
-void InputBufferManager::onBufferDestroyed(const C2Buffer* buf, void* arg) {
- getInstance()._onBufferDestroyed(buf, arg);
-}
-
-void InputBufferManager::_onBufferDestroyed(const C2Buffer* buf, void* arg) {
- if (!buf || !arg) {
- ALOGW("InputBufferManager::_onBufferDestroyed called "
- "with null argument(s) (buf @ %p, arg @ %p)",
- buf, arg);
- return;
- }
- TrackedBuffer id(*reinterpret_cast<TrackedBuffer*>(arg));
- ALOGV("InputBufferManager::_onBufferDestroyed called "
- "(listener @ %p, frameIndex = %llu, bufferIndex = %zu)",
- id.listener.unsafe_get(),
- static_cast<unsigned long long>(id.frameIndex),
- id.bufferIndex);
-
- std::lock_guard<std::mutex> lock(mMutex);
-
- auto findListener = mTrackedBuffersMap.find(id.listener);
- if (findListener == mTrackedBuffersMap.end()) {
- ALOGD("InputBufferManager::_onBufferDestroyed received "
- "invalid listener "
- "(listener @ %p, frameIndex = %llu, bufferIndex = %zu)",
- id.listener.unsafe_get(),
- static_cast<unsigned long long>(id.frameIndex),
- id.bufferIndex);
- return;
- }
-
- std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
- = findListener->second;
- auto findFrameIndex = frameIndex2BufferIds.find(id.frameIndex);
- if (findFrameIndex == frameIndex2BufferIds.end()) {
- ALOGD("InputBufferManager::_onBufferDestroyed received "
- "invalid frame index "
- "(listener @ %p, frameIndex = %llu, bufferIndex = %zu)",
- id.listener.unsafe_get(),
- static_cast<unsigned long long>(id.frameIndex),
- id.bufferIndex);
- return;
- }
-
- std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
- auto findBufferId = bufferIds.find(id);
- if (findBufferId == bufferIds.end()) {
- ALOGD("InputBufferManager::_onBufferDestroyed received "
- "invalid buffer index: "
- "(listener @ %p, frameIndex = %llu, bufferIndex = %zu)",
- id.listener.unsafe_get(),
- static_cast<unsigned long long>(id.frameIndex),
- id.bufferIndex);
- }
-
- bufferIds.erase(findBufferId);
- if (bufferIds.empty()) {
- frameIndex2BufferIds.erase(findFrameIndex);
- if (frameIndex2BufferIds.empty()) {
- mTrackedBuffersMap.erase(findListener);
- }
- }
-
- DeathNotifications &deathNotifications = mDeathNotifications[id.listener];
- deathNotifications.indices[id.frameIndex].emplace_back(id.bufferIndex);
- ++deathNotifications.count;
- mOnBufferDestroyed.notify_one();
-}
-
-// Notify the clients about buffer destructions.
-// Return false if all destructions have been notified.
-// Return true and set timeToRetry to the time point to wait for before
-// retrying if some destructions have not been notified.
-bool InputBufferManager::processNotifications(nsecs_t* timeToRetryNs) {
-
- struct Notification {
- sp<IComponentListener> listener;
- hidl_vec<IComponentListener::RenderedFrame> renderedFrames;
- Notification(const sp<IComponentListener>& l, size_t s)
- : listener(l), renderedFrames(s) {}
- };
- std::list<Notification> notifications;
-
- bool retry = false;
- {
- std::lock_guard<std::mutex> lock(mMutex);
- *timeToRetryNs = kNotificationPeriodNs;
- nsecs_t timeNowNs = systemTime();
- for (auto it = mDeathNotifications.begin();
- it != mDeathNotifications.end(); ) {
- sp<IComponentListener> listener = it->first.promote();
- if (!listener) {
- ++it;
- continue;
- }
- DeathNotifications &deathNotifications = it->second;
-
- nsecs_t timeSinceLastNotifiedNs =
- timeNowNs - deathNotifications.lastSentNs;
- // If not enough time has passed since the last callback, leave the
- // notifications for this listener untouched for now and retry
- // later.
- if (timeSinceLastNotifiedNs < kNotificationPeriodNs) {
- retry = true;
- *timeToRetryNs = std::min(*timeToRetryNs,
- kNotificationPeriodNs - timeSinceLastNotifiedNs);
- ALOGV("InputBufferManager: Notifications for "
- "listener @ %p will be postponed.",
- listener.get());
- ++it;
- continue;
- }
-
- // If enough time has passed since the last notification to this
- // listener but there are currently no pending notifications, the
- // listener can be removed from mDeathNotifications---there is no
- // need to keep track of the last notification time anymore.
- if (deathNotifications.count == 0) {
- it = mDeathNotifications.erase(it);
- continue;
- }
-
- // Create the argument for the callback.
- notifications.emplace_back(listener, deathNotifications.count);
- hidl_vec<IComponentListener::RenderedFrame>& renderedFrames =
- notifications.back().renderedFrames;
- size_t i = 0;
- for (std::pair<const uint64_t, std::vector<size_t>>& p :
- deathNotifications.indices) {
- uint64_t frameIndex = p.first;
- const std::vector<size_t> &bufferIndices = p.second;
- for (const size_t& bufferIndex : bufferIndices) {
- IComponentListener::RenderedFrame &renderedFrame
- = renderedFrames[i++];
- renderedFrame.slotId = ~bufferIndex;
- renderedFrame.bufferQueueId = frameIndex;
- renderedFrame.timestampNs = timeNowNs;
- ALOGV("InputBufferManager: "
- "Sending death notification (listener @ %p, "
- "frameIndex = %llu, bufferIndex = %zu)",
- listener.get(),
- static_cast<long long unsigned>(frameIndex),
- bufferIndex);
- }
- }
-
- // Clear deathNotifications for this listener and set retry to true
- // so processNotifications will be called again. This will
- // guarantee that a listener with no pending notifications will
- // eventually be removed from mDeathNotifications after
- // kNotificationPeriodNs nanoseconds has passed.
- retry = true;
- deathNotifications.indices.clear();
- deathNotifications.count = 0;
- deathNotifications.lastSentNs = timeNowNs;
- ++it;
- }
- }
-
- // Call onFramesRendered outside the lock to avoid deadlock.
- for (const Notification& notification : notifications) {
- if (!notification.listener->onFramesRendered(
- notification.renderedFrames).isOk()) {
- // This may trigger if the client has died.
- ALOGD("InputBufferManager: onFramesRendered transaction failed "
- "(listener @ %p)",
- notification.listener.get());
- }
- }
- if (retry) {
- ALOGV("InputBufferManager: Pending death notifications"
- "will be sent in %lldns.",
- static_cast<long long>(*timeToRetryNs));
- }
- return retry;
-}
-
-void InputBufferManager::main() {
- ALOGV("InputBufferManager: Starting main thread");
- nsecs_t timeToRetryNs;
- while (true) {
- std::unique_lock<std::mutex> lock(mMutex);
- while (mDeathNotifications.empty()) {
- ALOGV("InputBufferManager: Waiting for buffer deaths");
- mOnBufferDestroyed.wait(lock);
- }
- lock.unlock();
- ALOGV("InputBufferManager: Sending buffer death notifications");
- while (processNotifications(&timeToRetryNs)) {
- std::this_thread::sleep_for(
- std::chrono::nanoseconds(timeToRetryNs));
- ALOGV("InputBufferManager: Sending pending death notifications");
- }
- ALOGV("InputBufferManager: No pending death notifications");
- }
-}
-
-InputBufferManager::InputBufferManager()
- : mMainThread(&InputBufferManager::main, this) {
-}
-
-InputBufferManager& InputBufferManager::getInstance() {
- static InputBufferManager instance{};
- return instance;
+ mStore->reportComponentDeath(this);
}
} // namespace utils
diff --git a/media/codec2/hidl/1.0/utils/ComponentInterface.cpp b/media/codec2/hidl/1.0/utils/ComponentInterface.cpp
new file mode 100644
index 0000000..39e5357
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/ComponentInterface.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2-ComponentInterface"
+#include <android-base/logging.h>
+
+#include <codec2/hidl/1.0/Component.h>
+#include <codec2/hidl/1.0/ComponentInterface.h>
+#include <codec2/hidl/1.0/ComponentStore.h>
+
+#include <hidl/HidlBinderSupport.h>
+#include <utils/Timers.h>
+
+#include <C2BqBufferPriv.h>
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+
+#include <chrono>
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using namespace ::android;
+
+namespace /* unnamed */ {
+
+// Implementation of ConfigurableC2Intf based on C2ComponentInterface
+struct CompIntf : public ConfigurableC2Intf {
+ CompIntf(const std::shared_ptr<C2ComponentInterface>& intf) :
+ ConfigurableC2Intf{intf->getName(), intf->getId()},
+ mIntf{intf} {
+ }
+
+ virtual c2_status_t config(
+ const std::vector<C2Param*>& params,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures
+ ) override {
+ return mIntf->config_vb(params, mayBlock, failures);
+ }
+
+ virtual c2_status_t query(
+ const std::vector<C2Param::Index>& indices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>>* const params
+ ) const override {
+ return mIntf->query_vb({}, indices, mayBlock, params);
+ }
+
+ virtual c2_status_t querySupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>>* const params
+ ) const override {
+ return mIntf->querySupportedParams_nb(params);
+ }
+
+ virtual c2_status_t querySupportedValues(
+ std::vector<C2FieldSupportedValuesQuery>& fields,
+ c2_blocking_t mayBlock) const override {
+ return mIntf->querySupportedValues_vb(fields, mayBlock);
+ }
+
+protected:
+ std::shared_ptr<C2ComponentInterface> mIntf;
+};
+
+} // unnamed namespace
+
+// ComponentInterface
+ComponentInterface::ComponentInterface(
+ const std::shared_ptr<C2ComponentInterface>& intf,
+ ComponentStore* store)
+ : mInterface{intf},
+ mConfigurable{new CachedConfigurable(std::make_unique<CompIntf>(intf))} {
+ mInit = mConfigurable->init(store);
+}
+
+c2_status_t ComponentInterface::status() const {
+ return mInit;
+}
+
+Return<sp<IConfigurable>> ComponentInterface::getConfigurable() {
+ return mConfigurable;
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace c2
+} // namespace media
+} // namespace hardware
+} // namespace android
+
diff --git a/media/codec2/hidl/1.0/utils/ComponentStore.cpp b/media/codec2/hidl/1.0/utils/ComponentStore.cpp
index 9c05014..bb5faa5 100644
--- a/media/codec2/hidl/1.0/utils/ComponentStore.cpp
+++ b/media/codec2/hidl/1.0/utils/ComponentStore.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,37 +16,25 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2-ComponentStore"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/1.0/ComponentStore.h>
#include <codec2/hidl/1.0/InputSurface.h>
-#include <codec2/hidl/1.0/Component.h>
-#include <codec2/hidl/1.0/ConfigurableC2Intf.h>
#include <codec2/hidl/1.0/types.h>
+#include <android-base/file.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
#include <media/stagefright/bqhelper/GraphicBufferSource.h>
+#include <utils/Errors.h>
#include <C2PlatformSupport.h>
#include <util/C2InterfaceHelper.h>
-#include <utils/Errors.h>
-
-#include <android-base/file.h>
-
-#ifdef LOG
-#undef LOG
-#endif
-
-#ifdef PLOG
-#undef PLOG
-#endif
-
-#include <android-base/logging.h>
-
+#include <chrono>
+#include <ctime>
+#include <iomanip>
#include <ostream>
#include <sstream>
-#include <iomanip>
namespace android {
namespace hardware {
@@ -62,12 +50,12 @@
namespace /* unnamed */ {
struct StoreIntf : public ConfigurableC2Intf {
- StoreIntf(const std::shared_ptr<C2ComponentStore>& store) :
- ConfigurableC2Intf(store ? store->getName() : ""),
- mStore(store) {
+ StoreIntf(const std::shared_ptr<C2ComponentStore>& store)
+ : ConfigurableC2Intf{store ? store->getName() : "", 0},
+ mStore{store} {
}
- c2_status_t config(
+ virtual c2_status_t config(
const std::vector<C2Param*> ¶ms,
c2_blocking_t mayBlock,
std::vector<std::unique_ptr<C2SettingResult>> *const failures
@@ -80,7 +68,7 @@
return mStore->config_sm(params, failures);
}
- c2_status_t query(
+ virtual c2_status_t query(
const std::vector<C2Param::Index> &indices,
c2_blocking_t mayBlock,
std::vector<std::unique_ptr<C2Param>> *const params) const override {
@@ -92,13 +80,13 @@
return mStore->query_sm({}, indices, params);
}
- c2_status_t querySupportedParams(
+ virtual c2_status_t querySupportedParams(
std::vector<std::shared_ptr<C2ParamDescriptor>> *const params
) const override {
return mStore->querySupportedParams_nb(params);
}
- c2_status_t querySupportedValues(
+ virtual c2_status_t querySupportedValues(
std::vector<C2FieldSupportedValuesQuery> &fields,
c2_blocking_t mayBlock) const override {
// Assume all params are blocking
@@ -115,9 +103,9 @@
} // unnamed namespace
-ComponentStore::ComponentStore(const std::shared_ptr<C2ComponentStore>& store) :
- Configurable(new CachedConfigurable(std::make_unique<StoreIntf>(store))),
- mStore(store) {
+ComponentStore::ComponentStore(const std::shared_ptr<C2ComponentStore>& store)
+ : mConfigurable{new CachedConfigurable(std::make_unique<StoreIntf>(store))},
+ mStore{store} {
std::shared_ptr<C2ComponentStore> platformStore = android::GetCodec2PlatformComponentStore();
SetPreferredCodec2ComponentStore(store);
@@ -126,7 +114,11 @@
mParamReflector = mStore->getParamReflector();
// Retrieve supported parameters from store
- mInit = init(this);
+ mInit = mConfigurable->init(this);
+}
+
+c2_status_t ComponentStore::status() const {
+ return mInit;
}
c2_status_t ComponentStore::validateSupportedParams(
@@ -172,19 +164,15 @@
component = new Component(c2component, listener, this, pool);
if (!component) {
status = Status::CORRUPTED;
- } else if (component->status() != C2_OK) {
- status = static_cast<Status>(component->status());
} else {
- component->initListener(component);
+ reportComponentBirth(component.get());
if (component->status() != C2_OK) {
status = static_cast<Status>(component->status());
} else {
- std::lock_guard<std::mutex> lock(mComponentRosterMutex);
- component->setLocalId(
- mComponentRoster.emplace(
- Component::InterfaceKey(component),
- c2component)
- .first);
+ component->initListener(component);
+ if (component->status() != C2_OK) {
+ status = static_cast<Status>(component->status());
+ }
}
}
}
@@ -202,7 +190,7 @@
onInterfaceLoaded(c2interface);
interface = new ComponentInterface(c2interface, this);
}
- _hidl_cb((Status)res, interface);
+ _hidl_cb(static_cast<Status>(res), interface);
return Void();
}
@@ -213,27 +201,35 @@
size_t ix = 0;
for (const std::shared_ptr<const C2Component::Traits> &c2trait : c2traits) {
if (c2trait) {
- objcpy(&traits[ix++], *c2trait);
+ if (objcpy(&traits[ix], *c2trait)) {
+ ++ix;
+ } else {
+ break;
+ }
}
}
traits.resize(ix);
- _hidl_cb(traits);
+ _hidl_cb(Status::OK, traits);
return Void();
}
-Return<sp<IInputSurface>> ComponentStore::createInputSurface() {
+Return<void> ComponentStore::createInputSurface(createInputSurface_cb _hidl_cb) {
sp<GraphicBufferSource> source = new GraphicBufferSource();
if (source->initCheck() != OK) {
- return nullptr;
+ _hidl_cb(Status::CORRUPTED, nullptr);
+ return Void();
}
typedef ::android::hardware::graphics::bufferqueue::V1_0::
IGraphicBufferProducer HGbp;
typedef ::android::TWGraphicBufferProducer<HGbp> B2HGbp;
- return new InputSurface(
+ sp<InputSurface> inputSurface = new InputSurface(
this,
std::make_shared<C2ReflectorHelper>(),
new B2HGbp(source->getIGraphicBufferProducer()),
source);
+ _hidl_cb(inputSurface ? Status::OK : Status::NO_MEMORY,
+ inputSurface);
+ return Void();
}
void ComponentStore::onInterfaceLoaded(const std::shared_ptr<C2ComponentInterface> &intf) {
@@ -265,15 +261,25 @@
mUnsupportedStructDescriptors.emplace(coreIndex);
} else {
mStructDescriptors.insert({ coreIndex, structDesc });
- objcpy(&descriptors[dstIx++], *structDesc);
- continue;
+ if (objcpy(&descriptors[dstIx], *structDesc)) {
+ ++dstIx;
+ continue;
+ }
+ res = Status::CORRUPTED;
+ break;
}
}
res = Status::NOT_FOUND;
} else if (item->second) {
- objcpy(&descriptors[dstIx++], *item->second);
+ if (objcpy(&descriptors[dstIx], *item->second)) {
+ ++dstIx;
+ continue;
+ }
+ res = Status::CORRUPTED;
+ break;
} else {
res = Status::NO_MEMORY;
+ break;
}
}
descriptors.resize(dstIx);
@@ -292,29 +298,29 @@
return Status::OMITTED;
}
-void ComponentStore::reportComponentDeath(
- const Component::LocalId& componentLocalId) {
- std::lock_guard<std::mutex> lock(mComponentRosterMutex);
- mComponentRoster.erase(componentLocalId);
+Return<sp<IConfigurable>> ComponentStore::getConfigurable() {
+ return mConfigurable;
}
-std::shared_ptr<C2Component> ComponentStore::findC2Component(
- const sp<IComponent>& component) const {
+// Called from createComponent() after a successful creation of `component`.
+void ComponentStore::reportComponentBirth(Component* component) {
+ ComponentStatus componentStatus;
+ componentStatus.c2Component = component->mComponent;
+ componentStatus.birthTime = std::chrono::system_clock::now();
+
std::lock_guard<std::mutex> lock(mComponentRosterMutex);
- Component::LocalId it = mComponentRoster.find(
- Component::InterfaceKey(component));
- if (it == mComponentRoster.end()) {
- return std::shared_ptr<C2Component>();
- }
- return it->second.lock();
+ mComponentRoster.emplace(component, componentStatus);
}
-// Debug dump
+// Called from within the destructor of `component`. No virtual function calls
+// are made on `component` here.
+void ComponentStore::reportComponentDeath(Component* component) {
+ std::lock_guard<std::mutex> lock(mComponentRosterMutex);
+ mComponentRoster.erase(component);
+}
-namespace /* unnamed */ {
-
-// Dump component traits
-std::ostream& dump(
+// Dumps component traits.
+std::ostream& ComponentStore::dump(
std::ostream& out,
const std::shared_ptr<const C2Component::Traits>& comp) {
@@ -334,25 +340,38 @@
return out;
}
-// Dump component
-std::ostream& dump(
+// Dumps component status.
+std::ostream& ComponentStore::dump(
std::ostream& out,
- const std::shared_ptr<C2Component>& comp) {
+ ComponentStatus& compStatus) {
constexpr const char indent[] = " ";
- std::shared_ptr<C2ComponentInterface> intf = comp->intf();
+ // Print birth time.
+ std::chrono::milliseconds ms =
+ std::chrono::duration_cast<std::chrono::milliseconds>(
+ compStatus.birthTime.time_since_epoch());
+ std::time_t birthTime = std::chrono::system_clock::to_time_t(
+ compStatus.birthTime);
+ std::tm tm = *std::localtime(&birthTime);
+ out << indent << "Creation time: "
+ << std::put_time(&tm, "%Y-%m-%d %H:%M:%S")
+ << '.' << std::setfill('0') << std::setw(3) << ms.count() % 1000
+ << std::endl;
+
+ // Print name and id.
+ std::shared_ptr<C2ComponentInterface> intf = compStatus.c2Component->intf();
if (!intf) {
- out << indent << "Unknown -- null interface" << std::endl;
+ out << indent << "Unknown component -- null interface" << std::endl;
return out;
}
- out << indent << "name: " << intf->getName() << std::endl;
- out << indent << "id: " << intf->getId() << std::endl;
+ out << indent << "Name: " << intf->getName() << std::endl;
+ out << indent << "Id: " << intf->getId() << std::endl;
+
return out;
}
-} // unnamed namespace
-
+// Dumps information when lshal is called.
Return<void> ComponentStore::debug(
const hidl_handle& handle,
const hidl_vec<hidl_string>& /* args */) {
@@ -387,31 +406,16 @@
}
}
- // Retrieve the list of active components.
- std::list<std::shared_ptr<C2Component>> activeComps;
- {
- std::lock_guard<std::mutex> lock(mComponentRosterMutex);
- auto i = mComponentRoster.begin();
- while (i != mComponentRoster.end()) {
- std::shared_ptr<C2Component> c2comp = i->second.lock();
- if (!c2comp) {
- auto j = i;
- ++i;
- mComponentRoster.erase(j);
- } else {
- ++i;
- activeComps.emplace_back(c2comp);
- }
- }
- }
-
// Dump active components.
- out << indent << "Active components:" << std::endl << std::endl;
- if (activeComps.size() == 0) {
- out << indent << indent << "NONE" << std::endl << std::endl;
- } else {
- for (const std::shared_ptr<C2Component>& c2comp : activeComps) {
- dump(out, c2comp) << std::endl;
+ {
+ out << indent << "Active components:" << std::endl << std::endl;
+ std::lock_guard<std::mutex> lock(mComponentRosterMutex);
+ if (mComponentRoster.size() == 0) {
+ out << indent << indent << "NONE" << std::endl << std::endl;
+ } else {
+ for (auto& pair : mComponentRoster) {
+ dump(out, pair.second) << std::endl;
+ }
}
}
diff --git a/media/codec2/hidl/1.0/utils/Configurable.cpp b/media/codec2/hidl/1.0/utils/Configurable.cpp
index d023ba8..ec9c170 100644
--- a/media/codec2/hidl/1.0/utils/Configurable.cpp
+++ b/media/codec2/hidl/1.0/utils/Configurable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,11 +16,12 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2-Configurable"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/1.0/Configurable.h>
#include <codec2/hidl/1.0/ComponentStore.h>
#include <codec2/hidl/1.0/types.h>
+
#include <C2ParamInternal.h>
namespace android {
@@ -33,8 +34,8 @@
using namespace ::android;
CachedConfigurable::CachedConfigurable(
- std::unique_ptr<ConfigurableC2Intf>&& intf) :
- mIntf(std::move(intf)) {
+ std::unique_ptr<ConfigurableC2Intf>&& intf)
+ : mIntf{std::move(intf)} {
}
c2_status_t CachedConfigurable::init(ComponentStore* store) {
@@ -45,6 +46,10 @@
}
// Methods from ::android::hardware::media::c2::V1_0::IConfigurable follow.
+Return<uint32_t> CachedConfigurable::getId() {
+ return mIntf->getId();
+}
+
Return<void> CachedConfigurable::getName(getName_cb _hidl_cb) {
_hidl_cb(mIntf->getName());
return Void();
@@ -65,9 +70,10 @@
&c2heapParams);
hidl_vec<uint8_t> params;
- createParamsBlob(¶ms, c2heapParams);
+ if (!createParamsBlob(¶ms, c2heapParams)) {
+ LOG(WARNING) << "query -- invalid output params.";
+ }
_hidl_cb(static_cast<Status>(c2res), params);
-
return Void();
}
@@ -78,7 +84,8 @@
// inParams is not writable, so create a copy as config modifies the parameters
hidl_vec<uint8_t> inParamsCopy = inParams;
std::vector<C2Param*> c2params;
- if (parseParamsBlob(&c2params, inParamsCopy) != C2_OK) {
+ if (!parseParamsBlob(&c2params, inParamsCopy)) {
+ LOG(WARNING) << "config -- invalid input params.";
_hidl_cb(Status::CORRUPTED,
hidl_vec<SettingResult>(),
hidl_vec<uint8_t>());
@@ -95,13 +102,20 @@
size_t ix = 0;
for (const std::unique_ptr<C2SettingResult>& c2result : c2failures) {
if (c2result) {
- objcpy(&failures[ix++], *c2result);
+ if (objcpy(&failures[ix], *c2result)) {
+ ++ix;
+ } else {
+ LOG(DEBUG) << "config -- invalid setting results.";
+ break;
+ }
}
}
failures.resize(ix);
}
hidl_vec<uint8_t> outParams;
- createParamsBlob(&outParams, c2params);
+ if (!createParamsBlob(&outParams, c2params)) {
+ LOG(DEBUG) << "config -- invalid output params.";
+ }
_hidl_cb((Status)c2res, failures, outParams);
return Void();
}
@@ -117,7 +131,13 @@
size_t dstIx = 0;
for (size_t srcIx = request.offset(); srcIx < request.endOffset(); ++srcIx) {
if (mSupportedParams[srcIx]) {
- objcpy(¶ms[dstIx++], *mSupportedParams[srcIx]);
+ if (objcpy(¶ms[dstIx], *mSupportedParams[srcIx])) {
+ ++dstIx;
+ } else {
+ res = Status::CORRUPTED;
+ LOG(WARNING) << "querySupportedParams -- invalid output params.";
+ break;
+ }
} else {
res = Status::BAD_INDEX;
}
@@ -151,10 +171,15 @@
c2fields,
mayBlock ? C2_MAY_BLOCK : C2_DONT_BLOCK);
hidl_vec<FieldSupportedValuesQueryResult> outFields(inFields.size());
- {
- size_t ix = 0;
- for (const C2FieldSupportedValuesQuery &result : c2fields) {
- objcpy(&outFields[ix++], result);
+ size_t dstIx = 0;
+ for (const C2FieldSupportedValuesQuery &result : c2fields) {
+ if (objcpy(&outFields[dstIx], result)) {
+ ++dstIx;
+ } else {
+ outFields.resize(dstIx);
+ c2res = C2_CORRUPTED;
+ LOG(WARNING) << "querySupportedValues -- invalid output params.";
+ break;
}
}
_hidl_cb((Status)c2res, outFields);
diff --git a/media/codec2/hidl/1.0/utils/InputBufferManager.cpp b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
new file mode 100644
index 0000000..a023a05
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/InputBufferManager.cpp
@@ -0,0 +1,461 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2-InputBufferManager"
+#include <android-base/logging.h>
+
+#include <codec2/hidl/1.0/InputBufferManager.h>
+#include <codec2/hidl/1.0/types.h>
+
+#include <android/hardware/media/c2/1.0/IComponentListener.h>
+#include <android-base/logging.h>
+
+#include <C2Buffer.h>
+#include <C2Work.h>
+
+#include <chrono>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using namespace ::android;
+
+void InputBufferManager::registerFrameData(
+ const sp<IComponentListener>& listener,
+ const C2FrameData& input) {
+ getInstance()._registerFrameData(listener, input);
+}
+
+void InputBufferManager::unregisterFrameData(
+ const wp<IComponentListener>& listener,
+ const C2FrameData& input) {
+ getInstance()._unregisterFrameData(listener, input);
+}
+
+void InputBufferManager::unregisterFrameData(
+ const wp<IComponentListener>& listener) {
+ getInstance()._unregisterFrameData(listener);
+}
+
+void InputBufferManager::setNotificationInterval(
+ nsecs_t notificationIntervalNs) {
+ getInstance()._setNotificationInterval(notificationIntervalNs);
+}
+
+void InputBufferManager::_registerFrameData(
+ const sp<IComponentListener>& listener,
+ const C2FrameData& input) {
+ uint64_t frameIndex = input.ordinal.frameIndex.peeku();
+ LOG(VERBOSE) << "InputBufferManager::_registerFrameData -- called with "
+ << "listener @ 0x" << std::hex << listener.get()
+ << ", frameIndex = " << std::dec << frameIndex
+ << ".";
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ std::set<TrackedBuffer> &bufferIds =
+ mTrackedBuffersMap[listener][frameIndex];
+
+ for (size_t i = 0; i < input.buffers.size(); ++i) {
+ if (!input.buffers[i]) {
+ LOG(VERBOSE) << "InputBufferManager::_registerFrameData -- "
+ << "Input buffer at index " << i << " is null.";
+ continue;
+ }
+ const TrackedBuffer &bufferId =
+ *bufferIds.emplace(listener, frameIndex, i, input.buffers[i]).
+ first;
+
+ c2_status_t status = input.buffers[i]->registerOnDestroyNotify(
+ onBufferDestroyed,
+ const_cast<void*>(reinterpret_cast<const void*>(&bufferId)));
+ if (status != C2_OK) {
+ LOG(DEBUG) << "InputBufferManager::_registerFrameData -- "
+ << "registerOnDestroyNotify() failed "
+ << "(listener @ 0x" << std::hex << listener.get()
+ << ", frameIndex = " << std::dec << frameIndex
+ << ", bufferIndex = " << i
+ << ") => status = " << status
+ << ".";
+ }
+ }
+
+ mDeathNotifications.emplace(
+ listener,
+ DeathNotifications(
+ mNotificationIntervalNs.load(std::memory_order_relaxed)));
+}
+
+// Remove a pair (listener, frameIndex) from mTrackedBuffersMap and
+// mDeathNotifications. This implies all bufferIndices are removed.
+//
+// This is called from onWorkDone() and flush().
+void InputBufferManager::_unregisterFrameData(
+ const wp<IComponentListener>& listener,
+ const C2FrameData& input) {
+ uint64_t frameIndex = input.ordinal.frameIndex.peeku();
+ LOG(VERBOSE) << "InputBufferManager::_unregisterFrameData -- called with "
+ << "listener @ 0x" << std::hex << listener.unsafe_get()
+ << ", frameIndex = " << std::dec << frameIndex
+ << ".";
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ auto findListener = mTrackedBuffersMap.find(listener);
+ if (findListener != mTrackedBuffersMap.end()) {
+ std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+ = findListener->second;
+ auto findFrameIndex = frameIndex2BufferIds.find(frameIndex);
+ if (findFrameIndex != frameIndex2BufferIds.end()) {
+ std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
+ for (const TrackedBuffer& bufferId : bufferIds) {
+ std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+ if (buffer) {
+ c2_status_t status = buffer->unregisterOnDestroyNotify(
+ onBufferDestroyed,
+ const_cast<void*>(
+ reinterpret_cast<const void*>(&bufferId)));
+ if (status != C2_OK) {
+ LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
+ << "-- unregisterOnDestroyNotify() failed "
+ << "(listener @ 0x"
+ << std::hex
+ << bufferId.listener.unsafe_get()
+ << ", frameIndex = "
+ << std::dec << bufferId.frameIndex
+ << ", bufferIndex = " << bufferId.bufferIndex
+ << ") => status = " << status
+ << ".";
+ }
+ }
+ }
+
+ frameIndex2BufferIds.erase(findFrameIndex);
+ if (frameIndex2BufferIds.empty()) {
+ mTrackedBuffersMap.erase(findListener);
+ }
+ }
+ }
+
+ auto findListenerD = mDeathNotifications.find(listener);
+ if (findListenerD != mDeathNotifications.end()) {
+ DeathNotifications &deathNotifications = findListenerD->second;
+ auto findFrameIndex = deathNotifications.indices.find(frameIndex);
+ if (findFrameIndex != deathNotifications.indices.end()) {
+ std::vector<size_t> &bufferIndices = findFrameIndex->second;
+ deathNotifications.count -= bufferIndices.size();
+ deathNotifications.indices.erase(findFrameIndex);
+ }
+ }
+}
+
+// Remove listener from mTrackedBuffersMap and mDeathNotifications. This implies
+// all frameIndices and bufferIndices are removed.
+//
+// This is called when the component cleans up all input buffers, i.e., when
+// reset(), release(), stop() or ~Component() is called.
+void InputBufferManager::_unregisterFrameData(
+ const wp<IComponentListener>& listener) {
+ LOG(VERBOSE) << "InputBufferManager::_unregisterFrameData -- called with "
+ << "listener @ 0x" << std::hex << listener.unsafe_get()
+ << std::dec << ".";
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ auto findListener = mTrackedBuffersMap.find(listener);
+ if (findListener != mTrackedBuffersMap.end()) {
+ std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds =
+ findListener->second;
+ for (auto findFrameIndex = frameIndex2BufferIds.begin();
+ findFrameIndex != frameIndex2BufferIds.end();
+ ++findFrameIndex) {
+ std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
+ for (const TrackedBuffer& bufferId : bufferIds) {
+ std::shared_ptr<C2Buffer> buffer = bufferId.buffer.lock();
+ if (buffer) {
+ c2_status_t status = buffer->unregisterOnDestroyNotify(
+ onBufferDestroyed,
+ const_cast<void*>(
+ reinterpret_cast<const void*>(&bufferId)));
+ if (status != C2_OK) {
+ LOG(DEBUG) << "InputBufferManager::_unregisterFrameData "
+ << "-- unregisterOnDestroyNotify() failed "
+ << "(listener @ 0x"
+ << std::hex
+ << bufferId.listener.unsafe_get()
+ << ", frameIndex = "
+ << std::dec << bufferId.frameIndex
+ << ", bufferIndex = " << bufferId.bufferIndex
+ << ") => status = " << status
+ << ".";
+ }
+ }
+ }
+ }
+ mTrackedBuffersMap.erase(findListener);
+ }
+
+ mDeathNotifications.erase(listener);
+}
+
+// Set mNotificationIntervalNs.
+void InputBufferManager::_setNotificationInterval(
+ nsecs_t notificationIntervalNs) {
+ mNotificationIntervalNs.store(
+ notificationIntervalNs,
+ std::memory_order_relaxed);
+}
+
+// Move a buffer from mTrackedBuffersMap to mDeathNotifications.
+// This is called when a registered C2Buffer object is destroyed.
+void InputBufferManager::onBufferDestroyed(const C2Buffer* buf, void* arg) {
+ getInstance()._onBufferDestroyed(buf, arg);
+}
+
+void InputBufferManager::_onBufferDestroyed(const C2Buffer* buf, void* arg) {
+ if (!buf || !arg) {
+ LOG(WARNING) << "InputBufferManager::_onBufferDestroyed -- called with "
+ << "null argument (s): "
+ << "buf @ 0x" << std::hex << buf
+ << ", arg @ 0x" << std::hex << arg
+ << std::dec << ".";
+ return;
+ }
+ TrackedBuffer id(*reinterpret_cast<TrackedBuffer*>(arg));
+ LOG(VERBOSE) << "InputBufferManager::_onBufferDestroyed -- called with "
+ << "buf @ 0x" << std::hex << buf
+ << ", arg @ 0x" << std::hex << arg
+ << std::dec << " -- "
+ << "listener @ 0x" << std::hex << id.listener.unsafe_get()
+ << ", frameIndex = " << std::dec << id.frameIndex
+ << ", bufferIndex = " << id.bufferIndex
+ << ".";
+
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ auto findListener = mTrackedBuffersMap.find(id.listener);
+ if (findListener == mTrackedBuffersMap.end()) {
+ LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
+ << "received invalid listener: "
+ << "listener @ 0x" << std::hex << id.listener.unsafe_get()
+ << " (frameIndex = " << std::dec << id.frameIndex
+ << ", bufferIndex = " << id.bufferIndex
+ << ").";
+ return;
+ }
+
+ std::map<uint64_t, std::set<TrackedBuffer>> &frameIndex2BufferIds
+ = findListener->second;
+ auto findFrameIndex = frameIndex2BufferIds.find(id.frameIndex);
+ if (findFrameIndex == frameIndex2BufferIds.end()) {
+ LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
+ << "received invalid frame index: "
+ << "frameIndex = " << id.frameIndex
+ << " (listener @ 0x" << std::hex << id.listener.unsafe_get()
+ << ", bufferIndex = " << std::dec << id.bufferIndex
+ << ").";
+ return;
+ }
+
+ std::set<TrackedBuffer> &bufferIds = findFrameIndex->second;
+ auto findBufferId = bufferIds.find(id);
+ if (findBufferId == bufferIds.end()) {
+ LOG(DEBUG) << "InputBufferManager::_onBufferDestroyed -- "
+ << "received invalid buffer index: "
+ << "bufferIndex = " << id.bufferIndex
+ << " (frameIndex = " << id.frameIndex
+ << ", listener @ 0x" << std::hex << id.listener.unsafe_get()
+ << std::dec << ").";
+ return;
+ }
+
+ bufferIds.erase(findBufferId);
+ if (bufferIds.empty()) {
+ frameIndex2BufferIds.erase(findFrameIndex);
+ if (frameIndex2BufferIds.empty()) {
+ mTrackedBuffersMap.erase(findListener);
+ }
+ }
+
+ DeathNotifications &deathNotifications = mDeathNotifications[id.listener];
+ deathNotifications.indices[id.frameIndex].emplace_back(id.bufferIndex);
+ ++deathNotifications.count;
+ mOnBufferDestroyed.notify_one();
+}
+
+// Notify the clients about buffer destructions.
+// Return false if all destructions have been notified.
+// Return true and set timeToRetry to the time point to wait for before
+// retrying if some destructions have not been notified.
+bool InputBufferManager::processNotifications(nsecs_t* timeToRetryNs) {
+
+ struct Notification {
+ sp<IComponentListener> listener;
+ hidl_vec<IComponentListener::InputBuffer> inputBuffers;
+ Notification(const sp<IComponentListener>& l, size_t s)
+ : listener(l), inputBuffers(s) {}
+ };
+ std::list<Notification> notifications;
+ nsecs_t notificationIntervalNs =
+ mNotificationIntervalNs.load(std::memory_order_relaxed);
+
+ bool retry = false;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ *timeToRetryNs = notificationIntervalNs;
+ nsecs_t timeNowNs = systemTime();
+ for (auto it = mDeathNotifications.begin();
+ it != mDeathNotifications.end(); ) {
+ sp<IComponentListener> listener = it->first.promote();
+ if (!listener) {
+ ++it;
+ continue;
+ }
+ DeathNotifications &deathNotifications = it->second;
+
+ nsecs_t timeSinceLastNotifiedNs =
+ timeNowNs - deathNotifications.lastSentNs;
+ // If not enough time has passed since the last callback, leave the
+ // notifications for this listener untouched for now and retry
+ // later.
+ if (timeSinceLastNotifiedNs < notificationIntervalNs) {
+ retry = true;
+ *timeToRetryNs = std::min(*timeToRetryNs,
+ notificationIntervalNs - timeSinceLastNotifiedNs);
+ LOG(VERBOSE) << "InputBufferManager::processNotifications -- "
+ << "Notifications for listener @ "
+ << std::hex << listener.get()
+ << " will be postponed.";
+ ++it;
+ continue;
+ }
+
+ // If enough time has passed since the last notification to this
+ // listener but there are currently no pending notifications, the
+ // listener can be removed from mDeathNotifications---there is no
+ // need to keep track of the last notification time anymore.
+ if (deathNotifications.count == 0) {
+ it = mDeathNotifications.erase(it);
+ continue;
+ }
+
+ // Create the argument for the callback.
+ notifications.emplace_back(listener, deathNotifications.count);
+ hidl_vec<IComponentListener::InputBuffer> &inputBuffers =
+ notifications.back().inputBuffers;
+ size_t i = 0;
+ for (std::pair<const uint64_t, std::vector<size_t>>& p :
+ deathNotifications.indices) {
+ uint64_t frameIndex = p.first;
+ const std::vector<size_t> &bufferIndices = p.second;
+ for (const size_t& bufferIndex : bufferIndices) {
+ IComponentListener::InputBuffer &inputBuffer
+ = inputBuffers[i++];
+ inputBuffer.arrayIndex = bufferIndex;
+ inputBuffer.frameIndex = frameIndex;
+ }
+ }
+
+ // Clear deathNotifications for this listener and set retry to true
+ // so processNotifications will be called again. This will
+ // guarantee that a listener with no pending notifications will
+ // eventually be removed from mDeathNotifications after
+ // mNotificationIntervalNs nanoseconds has passed.
+ retry = true;
+ deathNotifications.indices.clear();
+ deathNotifications.count = 0;
+ deathNotifications.lastSentNs = timeNowNs;
+ ++it;
+ }
+ }
+
+ // Call onInputBuffersReleased() outside the lock to avoid deadlock.
+ for (const Notification& notification : notifications) {
+ if (!notification.listener->onInputBuffersReleased(
+ notification.inputBuffers).isOk()) {
+ // This may trigger if the client has died.
+ LOG(DEBUG) << "InputBufferManager::processNotifications -- "
+ << "failed to send death notifications to "
+ << "listener @ 0x" << std::hex
+ << notification.listener.get()
+ << std::dec << ".";
+ } else {
+#if LOG_NDEBUG == 0
+ std::stringstream inputBufferLog;
+ for (const IComponentListener::InputBuffer& inputBuffer :
+ notification.inputBuffers) {
+ inputBufferLog << " (" << inputBuffer.frameIndex
+ << ", " << inputBuffer.arrayIndex
+ << ")";
+ }
+ LOG(VERBOSE) << "InputBufferManager::processNotifications -- "
+ << "death notifications sent to "
+ << "listener @ 0x" << std::hex
+ << notification.listener.get()
+ << std::dec
+ << " with these (frameIndex, bufferIndex) pairs:"
+ << inputBufferLog.str();
+#endif
+ }
+ }
+#if LOG_NDEBUG == 0
+ if (retry) {
+ LOG(VERBOSE) << "InputBufferManager::processNotifications -- "
+ << "will retry again in " << *timeToRetryNs << "ns.";
+ } else {
+ LOG(VERBOSE) << "InputBufferManager::processNotifications -- "
+ << "no pending death notifications.";
+ }
+#endif
+ return retry;
+}
+
+void InputBufferManager::main() {
+ LOG(VERBOSE) << "InputBufferManager main -- started.";
+ nsecs_t timeToRetryNs;
+ while (true) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (mDeathNotifications.empty()) {
+ mOnBufferDestroyed.wait(lock);
+ }
+ lock.unlock();
+ while (processNotifications(&timeToRetryNs)) {
+ std::this_thread::sleep_for(
+ std::chrono::nanoseconds(timeToRetryNs));
+ }
+ }
+}
+
+InputBufferManager::InputBufferManager()
+ : mMainThread{&InputBufferManager::main, this} {
+}
+
+InputBufferManager& InputBufferManager::getInstance() {
+ static InputBufferManager instance{};
+ return instance;
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace c2
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+
+
diff --git a/media/codec2/hidl/1.0/utils/InputSurface.cpp b/media/codec2/hidl/1.0/utils/InputSurface.cpp
index b669460..85c44c3 100644
--- a/media/codec2/hidl/1.0/utils/InputSurface.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurface.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,12 +16,11 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2-InputSurface"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/1.0/InputSurface.h>
#include <codec2/hidl/1.0/InputSurfaceConnection.h>
-#include <util/C2InterfaceHelper.h>
#include <C2Component.h>
#include <C2Config.h>
@@ -36,16 +35,17 @@
using namespace ::android;
-class InputSurface::ConfigurableImpl : public C2InterfaceHelper {
+// Derived class of C2InterfaceHelper
+class InputSurface::Interface : public C2InterfaceHelper {
public:
- explicit ConfigurableImpl(
+ explicit Interface(
const std::shared_ptr<C2ReflectorHelper> &helper)
: C2InterfaceHelper(helper) {
setDerivedInstance(this);
addParameter(
- DefineParam(mEos, C2_NAME_INPUT_SURFACE_EOS_TUNING)
+ DefineParam(mEos, C2_PARAMKEY_INPUT_SURFACE_EOS)
.withDefault(new C2InputSurfaceEosTuning(false))
.withFields({C2F(mEos, value).oneOf({true, false})})
.withSetter(EosSetter)
@@ -63,33 +63,34 @@
std::shared_ptr<C2InputSurfaceEosTuning> mEos;
};
-namespace {
-
-class ConfigurableWrapper : public ConfigurableC2Intf {
+// Derived class of ConfigurableC2Intf
+class InputSurface::ConfigurableIntf : public ConfigurableC2Intf {
public:
- ConfigurableWrapper(
- const std::shared_ptr<InputSurface::ConfigurableImpl> &impl,
+ ConfigurableIntf(
+ const std::shared_ptr<InputSurface::Interface> &intf,
const sp<GraphicBufferSource> &source)
- : ConfigurableC2Intf("input-surface"),
- mImpl(impl),
+ : ConfigurableC2Intf("input-surface", 0),
+ mIntf(intf),
mSource(source) {
}
- ~ConfigurableWrapper() override = default;
+ virtual ~ConfigurableIntf() override = default;
- c2_status_t query(
+ virtual c2_status_t query(
const std::vector<C2Param::Index> &indices,
c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2Param>>* const params) const override {
- return mImpl->query({}, indices, mayBlock, params);
+ std::vector<std::unique_ptr<C2Param>>* const params
+ ) const override {
+ return mIntf->query({}, indices, mayBlock, params);
}
- c2_status_t config(
+ virtual c2_status_t config(
const std::vector<C2Param*> ¶ms,
c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) override {
- c2_status_t err = mImpl->config(params, mayBlock, failures);
- if (mImpl->eos()) {
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures
+ ) override {
+ c2_status_t err = mIntf->config(params, mayBlock, failures);
+ if (mIntf->eos()) {
sp<GraphicBufferSource> source = mSource.promote();
if (source == nullptr || source->signalEndOfInputStream() != OK) {
// TODO: put something in |failures|
@@ -100,202 +101,71 @@
return err;
}
- c2_status_t querySupportedParams(
- std::vector<std::shared_ptr<C2ParamDescriptor>>* const params) const override {
- return mImpl->querySupportedParams(params);
+ virtual c2_status_t querySupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>>* const params
+ ) const override {
+ return mIntf->querySupportedParams(params);
}
- c2_status_t querySupportedValues(
+ virtual c2_status_t querySupportedValues(
std::vector<C2FieldSupportedValuesQuery>& fields,
c2_blocking_t mayBlock) const override {
- return mImpl->querySupportedValues(fields, mayBlock);
+ return mIntf->querySupportedValues(fields, mayBlock);
}
private:
- const std::shared_ptr<InputSurface::ConfigurableImpl> mImpl;
+ const std::shared_ptr<InputSurface::Interface> mIntf;
wp<GraphicBufferSource> mSource;
};
-} // namespace
-
-
-Return<void> InputSurface::connectToComponent(
- const sp<IComponent>& component,
- connectToComponent_cb _hidl_cb) {
- Status status;
- sp<InputSurfaceConnection> conn;
- if (!component) {
- status = Status::BAD_VALUE;
- } else {
- std::shared_ptr<C2Component> comp = mStore->findC2Component(component);
- if (!comp) {
- conn = new InputSurfaceConnection(mSource, component);
- } else {
- conn = new InputSurfaceConnection(mSource, comp);
- }
- if (!conn->init()) {
- conn = nullptr;
- status = Status::BAD_VALUE;
- } else {
- status = Status::OK;
- }
- }
- _hidl_cb(status, conn);
- return Void();
+Return<sp<InputSurface::HGraphicBufferProducer>> InputSurface::getGraphicBufferProducer() {
+ return mProducer;
}
Return<sp<IConfigurable>> InputSurface::getConfigurable() {
return mConfigurable;
}
-// Derived methods from IGraphicBufferProducer
-
-Return<void> InputSurface::requestBuffer(
- int32_t slot,
- requestBuffer_cb _hidl_cb) {
- return mBase->requestBuffer(slot, _hidl_cb);
-}
-
-Return<int32_t> InputSurface::setMaxDequeuedBufferCount(
- int32_t maxDequeuedBuffers) {
- return mBase->setMaxDequeuedBufferCount(maxDequeuedBuffers);
-}
-
-Return<int32_t> InputSurface::setAsyncMode(
- bool async) {
- return mBase->setAsyncMode(async);
-}
-
-Return<void> InputSurface::dequeueBuffer(
- uint32_t width,
- uint32_t height,
- PixelFormat format,
- uint32_t usage,
- bool getFrameTimestamps,
- dequeueBuffer_cb _hidl_cb) {
- return mBase->dequeueBuffer(
- width, height, format, usage, getFrameTimestamps, _hidl_cb);
-}
-
-Return<int32_t> InputSurface::detachBuffer(
- int32_t slot) {
- return mBase->detachBuffer(slot);
-}
-
-Return<void> InputSurface::detachNextBuffer(
- detachNextBuffer_cb _hidl_cb) {
- return mBase->detachNextBuffer(_hidl_cb);
-}
-
-Return<void> InputSurface::attachBuffer(
- const AnwBuffer& buffer,
- attachBuffer_cb _hidl_cb) {
- return mBase->attachBuffer(buffer, _hidl_cb);
-}
-
-Return<void> InputSurface::queueBuffer(
- int32_t slot,
- const QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) {
- return mBase->queueBuffer(slot, input, _hidl_cb);
-}
-
-Return<int32_t> InputSurface::cancelBuffer(
- int32_t slot,
- const hidl_handle& fence) {
- return mBase->cancelBuffer(slot, fence);
-}
-
-Return<void> InputSurface::query(
- int32_t what,
- query_cb _hidl_cb) {
- return mBase->query(what, _hidl_cb);
-}
-
Return<void> InputSurface::connect(
- const sp<HProducerListener>& listener,
- int32_t api,
- bool producerControlledByApp,
+ const sp<IInputSink>& sink,
connect_cb _hidl_cb) {
- return mBase->connect(listener, api, producerControlledByApp, _hidl_cb);
+ Status status;
+ sp<InputSurfaceConnection> connection;
+ if (!sink) {
+ _hidl_cb(Status::BAD_VALUE, nullptr);
+ return Void();
+ }
+ std::shared_ptr<C2Component> comp = Component::findLocalComponent(sink);
+ if (comp) {
+ connection = new InputSurfaceConnection(mSource, comp, mStore);
+ } else {
+ connection = new InputSurfaceConnection(mSource, sink, mStore);
+ }
+ if (!connection->init()) {
+ connection = nullptr;
+ status = Status::BAD_VALUE;
+ } else {
+ status = Status::OK;
+ }
+ _hidl_cb(status, connection);
+ return Void();
}
-Return<int32_t> InputSurface::disconnect(
- int32_t api,
- DisconnectMode mode) {
- return mBase->disconnect(api, mode);
-}
-
-Return<int32_t> InputSurface::setSidebandStream(
- const hidl_handle& stream) {
- return mBase->setSidebandStream(stream);
-}
-
-Return<void> InputSurface::allocateBuffers(
- uint32_t width,
- uint32_t height,
- PixelFormat format,
- uint32_t usage) {
- return mBase->allocateBuffers(width, height, format, usage);
-}
-
-Return<int32_t> InputSurface::allowAllocation(
- bool allow) {
- return mBase->allowAllocation(allow);
-}
-
-Return<int32_t> InputSurface::setGenerationNumber(
- uint32_t generationNumber) {
- return mBase->setGenerationNumber(generationNumber);
-}
-
-Return<void> InputSurface::getConsumerName(
- getConsumerName_cb _hidl_cb) {
- return mBase->getConsumerName(_hidl_cb);
-}
-
-Return<int32_t> InputSurface::setSharedBufferMode(
- bool sharedBufferMode) {
- return mBase->setSharedBufferMode(sharedBufferMode);
-}
-
-Return<int32_t> InputSurface::setAutoRefresh(
- bool autoRefresh) {
- return mBase->setAutoRefresh(autoRefresh);
-}
-
-Return<int32_t> InputSurface::setDequeueTimeout(
- int64_t timeoutNs) {
- return mBase->setDequeueTimeout(timeoutNs);
-}
-
-Return<void> InputSurface::getLastQueuedBuffer(
- getLastQueuedBuffer_cb _hidl_cb) {
- return mBase->getLastQueuedBuffer(_hidl_cb);
-}
-
-Return<void> InputSurface::getFrameTimestamps(
- getFrameTimestamps_cb _hidl_cb) {
- return mBase->getFrameTimestamps(_hidl_cb);
-}
-
-Return<void> InputSurface::getUniqueId(
- getUniqueId_cb _hidl_cb) {
- return mBase->getUniqueId(_hidl_cb);
-}
+// Derived methods from IGraphicBufferProducer
// Constructor is exclusive to ComponentStore.
InputSurface::InputSurface(
const sp<ComponentStore>& store,
const std::shared_ptr<C2ReflectorHelper>& reflector,
- const sp<HGraphicBufferProducer>& base,
- const sp<GraphicBufferSource>& source) :
- mStore(store),
- mBase(base),
- mSource(source),
- mHelper(std::make_shared<ConfigurableImpl>(reflector)),
- mConfigurable(new CachedConfigurable(
- std::make_unique<ConfigurableWrapper>(mHelper, source))) {
+ const sp<HGraphicBufferProducer>& producer,
+ const sp<GraphicBufferSource>& source)
+ : mStore{store},
+ mProducer{producer},
+ mSource{source},
+ mIntf{std::make_shared<Interface>(reflector)},
+ mConfigurable{new CachedConfigurable(
+ std::make_unique<ConfigurableIntf>(
+ mIntf, source))} {
mConfigurable->init(store.get());
}
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index ba7c2d6..c9932ef 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,9 +16,10 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2-InputSurfaceConnection"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/1.0/InputSurfaceConnection.h>
+#include <codec2/hidl/1.0/InputSurfaceConnection.h>
#include <memory>
#include <list>
@@ -65,51 +66,74 @@
} // unnamed namespace
+// Derived class of ComponentWrapper for use with
+// GraphicBufferSource::configure().
+//
struct InputSurfaceConnection::Impl : public ComponentWrapper {
+
Impl(const sp<GraphicBufferSource>& source,
- const std::shared_ptr<C2Component>& comp) :
- mSource(source), mComp(comp), mRemoteComp(),
- mFrameIndex(0) {
- std::shared_ptr<C2ComponentInterface> intf = comp->intf();
- mCompName = intf ? intf->getName() : "";
+ const std::shared_ptr<C2Component>& localComp)
+ : mSource{source}, mLocalComp{localComp}, mSink{}, mFrameIndex{0} {
+ std::shared_ptr<C2ComponentInterface> intf = localComp->intf();
+ mSinkName = intf ? intf->getName() : "";
}
Impl(const sp<GraphicBufferSource>& source,
- const sp<IComponent>& comp) :
- mSource(source), mComp(), mRemoteComp(comp),
- mFrameIndex(0) {
- Return<void> transStatus = comp->getName(
- [this](const hidl_string& name) {
- mCompName = name.c_str();
+ const sp<IInputSink>& sink)
+ : mSource{source}, mLocalComp{}, mSink{sink}, mFrameIndex{0} {
+ Return<sp<IConfigurable>> transResult = sink->getConfigurable();
+ if (!transResult.isOk()) {
+ LOG(ERROR) << "Remote sink is dead.";
+ return;
+ }
+ mSinkConfigurable =
+ static_cast<sp<IConfigurable>>(transResult);
+ if (!mSinkConfigurable) {
+ LOG(ERROR) << "Remote sink is not configurable.";
+ mSinkName = "";
+ return;
+ }
+
+ hidl_string name;
+ Return<void> transStatus = mSinkConfigurable->getName(
+ [&name](const hidl_string& n) {
+ name = n;
});
if (!transStatus.isOk()) {
- ALOGD("getName -- Cannot obtain remote component name.");
+ LOG(ERROR) << "Remote sink's configurable is dead.";
+ mSinkName = "";
+ return;
}
+ mSinkName = name.c_str();
}
- virtual ~Impl() = default;
+ virtual ~Impl() {
+ mSource->stop();
+ mSource->release();
+ }
bool init() {
- sp<GraphicBufferSource> source = mSource.promote();
- if (source == nullptr) {
+ if (mSource == nullptr) {
return false;
}
- status_t err = source->initCheck();
+ status_t err = mSource->initCheck();
if (err != OK) {
- ALOGD("Impl::init -- GBS init failed: %d", err);
+ LOG(WARNING) << "Impl::init -- GraphicBufferSource init failed: "
+ << "status = " << err << ".";
return false;
}
// TODO: read settings properly from the interface
- C2VideoSizeStreamTuning::input inputSize;
+ C2StreamPictureSizeInfo::input inputSize;
C2StreamUsageTuning::input usage;
- c2_status_t c2Status = compQuery({ &inputSize, &usage },
+ c2_status_t c2Status = queryFromSink({ &inputSize, &usage },
{},
C2_MAY_BLOCK,
nullptr);
if (c2Status != C2_OK) {
- ALOGD("Impl::init -- cannot query information from "
- "the component interface: %s.", asString(c2Status));
+ LOG(WARNING) << "Impl::init -- cannot query information from "
+ "the component interface: "
+ << "status = " << asString(c2Status) << ".";
return false;
}
@@ -122,26 +146,27 @@
// asGrallocUsage();
uint32_t grallocUsage =
- mCompName.compare(0, 11, "c2.android.") == 0 ?
+ mSinkName.compare(0, 11, "c2.android.") == 0 ?
GRALLOC_USAGE_SW_READ_OFTEN :
GRALLOC_USAGE_HW_VIDEO_ENCODER;
- err = source->configure(
+ err = mSource->configure(
this, dataSpace, kBufferCount,
inputSize.width, inputSize.height,
grallocUsage);
if (err != OK) {
- ALOGD("Impl::init -- GBS configure failed: %d", err);
+ LOG(WARNING) << "Impl::init -- GBS configure failed: "
+ << "status = " << err << ".";
return false;
}
for (int32_t i = 0; i < kBufferCount; ++i) {
- if (!source->onInputBufferAdded(i).isOk()) {
- ALOGD("Impl::init: populating GBS slots failed");
+ if (!mSource->onInputBufferAdded(i).isOk()) {
+ LOG(WARNING) << "Impl::init: failed to populate GBS slots.";
return false;
}
}
- if (!source->start().isOk()) {
- ALOGD("Impl::init -- GBS start failed");
+ if (!mSource->start().isOk()) {
+ LOG(WARNING) << "Impl::init -- GBS failed to start.";
return false;
}
mAllocatorMutex.lock();
@@ -150,7 +175,8 @@
&mAllocator);
mAllocatorMutex.unlock();
if (c2err != OK) {
- ALOGD("Impl::init -- failed to fetch gralloc allocator: %d", c2err);
+ LOG(WARNING) << "Impl::init -- failed to fetch gralloc allocator: "
+ << "status = " << asString(c2err) << ".";
return false;
}
return true;
@@ -162,13 +188,13 @@
const sp<GraphicBuffer>& buffer,
int64_t timestamp,
int fenceFd) override {
- ALOGV("Impl::submitBuffer -- bufferId = %d", bufferId);
+ LOG(VERBOSE) << "Impl::submitBuffer -- bufferId = " << bufferId << ".";
// TODO: Use fd to construct fence
(void)fenceFd;
std::shared_ptr<C2GraphicAllocation> alloc;
C2Handle* handle = WrapNativeCodec2GrallocHandle(
- native_handle_clone(buffer->handle),
+ buffer->handle,
buffer->width, buffer->height,
buffer->format, buffer->usage, buffer->stride);
mAllocatorMutex.lock();
@@ -190,9 +216,8 @@
// TODO: fence
new Buffer2D(block->share(
C2Rect(block->width(), block->height()), ::C2Fence())),
- [bufferId, src = mSource](C2Buffer* ptr) {
+ [bufferId, source = mSource](C2Buffer* ptr) {
delete ptr;
- sp<GraphicBufferSource> source = src.promote();
if (source != nullptr) {
// TODO: fence
(void)source->onInputBufferEmptied(bufferId, -1);
@@ -204,12 +229,13 @@
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
- err = compQueue(&items);
+ err = queueToSink(&items);
return (err == C2_OK) ? OK : UNKNOWN_ERROR;
}
- virtual status_t submitEos(int32_t /* bufferId */) override {
- ALOGV("Impl::submitEos");
+ virtual status_t submitEos(int32_t bufferId) override {
+ LOG(VERBOSE) << "Impl::submitEos -- bufferId = " << bufferId << ".";
+ (void)bufferId;
std::unique_ptr<C2Work> work(new C2Work);
work->input.flags = (C2FrameData::flags_t)0;
@@ -221,11 +247,11 @@
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
- c2_status_t err = compQueue(&items);
+ c2_status_t err = queueToSink(&items);
return (err == C2_OK) ? OK : UNKNOWN_ERROR;
}
- void dispatchDataSpaceChanged(
+ virtual void dispatchDataSpaceChanged(
int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
// TODO
(void)dataSpace;
@@ -233,36 +259,63 @@
(void)pixelFormat;
}
+ // Configurable interface for InputSurfaceConnection::Impl.
+ //
+ // This class is declared as an inner class so that it will have access to
+ // all Impl's members.
+ struct ConfigurableIntf : public ConfigurableC2Intf {
+ sp<Impl> mConnection;
+ ConfigurableIntf(const sp<Impl>& connection)
+ : ConfigurableC2Intf{"input-surface-connection", 0},
+ mConnection{connection} {}
+ virtual c2_status_t config(
+ const std::vector<C2Param*> ¶ms,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures
+ ) override;
+ virtual c2_status_t query(
+ const std::vector<C2Param::Index> &indices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>> *const params) const override;
+ virtual c2_status_t querySupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params
+ ) const override;
+ virtual c2_status_t querySupportedValues(
+ std::vector<C2FieldSupportedValuesQuery> &fields,
+ c2_blocking_t mayBlock) const override;
+ };
+
private:
- c2_status_t compQuery(
+ c2_status_t queryFromSink(
const std::vector<C2Param*> &stackParams,
const std::vector<C2Param::Index> &heapParamIndices,
c2_blocking_t mayBlock,
std::vector<std::unique_ptr<C2Param>>* const heapParams) {
- std::shared_ptr<C2Component> comp = mComp.lock();
- if (comp) {
- std::shared_ptr<C2ComponentInterface> intf = comp->intf();
+ if (mLocalComp) {
+ std::shared_ptr<C2ComponentInterface> intf = mLocalComp->intf();
if (intf) {
return intf->query_vb(stackParams,
heapParamIndices,
mayBlock,
heapParams);
} else {
- ALOGD("compQuery -- component does not have an interface.");
+ LOG(ERROR) << "queryFromSink -- "
+ << "component does not have an interface.";
return C2_BAD_STATE;
}
}
- if (!mRemoteComp) {
- ALOGD("compQuery -- component no longer exists.");
- return C2_BAD_STATE;
- }
+
+ CHECK(mSink) << "-- queryFromSink "
+ << "-- connection has no sink.";
+ CHECK(mSinkConfigurable) << "-- queryFromSink "
+ << "-- sink has no configurable.";
hidl_vec<ParamIndex> indices(
stackParams.size() + heapParamIndices.size());
size_t numIndices = 0;
for (C2Param* const& stackParam : stackParams) {
if (!stackParam) {
- ALOGD("compQuery -- null stack param encountered.");
+ LOG(DEBUG) << "queryFromSink -- null stack param encountered.";
continue;
}
indices[numIndices++] = static_cast<ParamIndex>(stackParam->index());
@@ -277,22 +330,22 @@
heapParams->reserve(heapParams->size() + numIndices);
}
c2_status_t status;
- Return<void> transStatus = mRemoteComp->query(
+ Return<void> transStatus = mSinkConfigurable->query(
indices,
mayBlock == C2_MAY_BLOCK,
[&status, &numStackIndices, &stackParams, heapParams](
Status s, const Params& p) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK && status != C2_BAD_INDEX) {
- ALOGD("compQuery -- call failed: %s.", asString(status));
+ LOG(DEBUG) << "queryFromSink -- call failed: "
+ << "status = " << asString(status) << ".";
return;
}
std::vector<C2Param*> paramPointers;
- c2_status_t parseStatus = parseParamsBlob(¶mPointers, p);
- if (parseStatus != C2_OK) {
- ALOGD("compQuery -- error while parsing params: %s.",
- asString(parseStatus));
- status = parseStatus;
+ if (!parseParamsBlob(¶mPointers, p)) {
+ LOG(DEBUG) << "queryFromSink -- error while "
+ << "parsing params.";
+ status = C2_CORRUPTED;
return;
}
size_t i = 0;
@@ -302,7 +355,8 @@
if (numStackIndices > 0) {
--numStackIndices;
if (!paramPointer) {
- ALOGD("compQuery -- null stack param.");
+ LOG(DEBUG) << "queryFromSink -- "
+ "null stack param.";
++it;
continue;
}
@@ -313,25 +367,27 @@
CHECK(i < stackParams.size());
if (stackParams[i]->index() !=
paramPointer->index()) {
- ALOGD("compQuery -- param skipped. index = %d",
- static_cast<int>(
- stackParams[i]->index()));
+ LOG(DEBUG) << "queryFromSink -- "
+ "param skipped (index = "
+ << stackParams[i]->index() << ").";
stackParams[i++]->invalidate();
continue;
}
if (!stackParams[i++]->updateFrom(*paramPointer)) {
- ALOGD("compQuery -- param update failed: "
- "index = %d.",
- static_cast<int>(paramPointer->index()));
+ LOG(DEBUG) << "queryFromSink -- "
+ "param update failed (index = "
+ << paramPointer->index() << ").";
}
} else {
if (!paramPointer) {
- ALOGD("compQuery -- null heap param.");
+ LOG(DEBUG) << "queryFromSink -- "
+ "null heap param.";
++it;
continue;
}
if (!heapParams) {
- ALOGD("compQuery -- too many stack params.");
+ LOG(WARNING) << "queryFromSink -- "
+ "too many stack params.";
break;
}
heapParams->emplace_back(C2Param::Copy(*paramPointer));
@@ -340,96 +396,130 @@
}
});
if (!transStatus.isOk()) {
- ALOGD("compQuery -- transaction failed.");
+ LOG(ERROR) << "queryFromSink -- transaction failed.";
return C2_CORRUPTED;
}
return status;
}
- c2_status_t compQueue(std::list<std::unique_ptr<C2Work>>* const items) {
- std::shared_ptr<C2Component> comp = mComp.lock();
- if (comp) {
- return comp->queue_nb(items);
+ c2_status_t queueToSink(std::list<std::unique_ptr<C2Work>>* const items) {
+ if (mLocalComp) {
+ return mLocalComp->queue_nb(items);
}
+ CHECK(mSink) << "-- queueToSink "
+ << "-- connection has no sink.";
+
WorkBundle workBundle;
- Status hidlStatus = objcpy(&workBundle, *items, nullptr);
- if (hidlStatus != Status::OK) {
- ALOGD("compQueue -- bad input.");
+ if (!objcpy(&workBundle, *items, nullptr)) {
+ LOG(ERROR) << "queueToSink -- bad input.";
return C2_CORRUPTED;
}
- Return<Status> transStatus = mRemoteComp->queue(workBundle);
+ Return<Status> transStatus = mSink->queue(workBundle);
if (!transStatus.isOk()) {
- ALOGD("compQueue -- transaction failed.");
+ LOG(ERROR) << "queueToSink -- transaction failed.";
return C2_CORRUPTED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGV("compQueue -- call failed: %s.", asString(status));
+ LOG(DEBUG) << "queueToSink -- call failed: "
+ << asString(status);
}
return status;
}
- wp<GraphicBufferSource> mSource;
- std::weak_ptr<C2Component> mComp;
- sp<IComponent> mRemoteComp;
- std::string mCompName;
+ sp<GraphicBufferSource> mSource;
+ std::shared_ptr<C2Component> mLocalComp;
+ sp<IInputSink> mSink;
+ sp<IConfigurable> mSinkConfigurable;
+ std::string mSinkName;
// Needed for ComponentWrapper implementation
std::mutex mAllocatorMutex;
std::shared_ptr<C2Allocator> mAllocator;
std::atomic_uint64_t mFrameIndex;
+
};
InputSurfaceConnection::InputSurfaceConnection(
const sp<GraphicBufferSource>& source,
- const std::shared_ptr<C2Component>& comp) :
- mSource(source),
- mImpl(new Impl(source, comp)) {
+ const std::shared_ptr<C2Component>& comp,
+ const sp<ComponentStore>& store)
+ : mImpl{new Impl(source, comp)},
+ mConfigurable{new CachedConfigurable(
+ std::make_unique<Impl::ConfigurableIntf>(mImpl))} {
+ mConfigurable->init(store.get());
}
InputSurfaceConnection::InputSurfaceConnection(
const sp<GraphicBufferSource>& source,
- const sp<IComponent>& comp) :
- mSource(source),
- mImpl(new Impl(source, comp)) {
-}
-
-InputSurfaceConnection::~InputSurfaceConnection() {
- if (mSource) {
- (void)mSource->stop();
- (void)mSource->release();
- mSource.clear();
- }
- mImpl.clear();
-}
-
-bool InputSurfaceConnection::init() {
- mMutex.lock();
- sp<Impl> impl = mImpl;
- mMutex.unlock();
-
- if (!impl) {
- return false;
- }
- return impl->init();
+ const sp<IInputSink>& sink,
+ const sp<ComponentStore>& store)
+ : mImpl{new Impl(source, sink)},
+ mConfigurable{new CachedConfigurable(
+ std::make_unique<Impl::ConfigurableIntf>(mImpl))} {
+ mConfigurable->init(store.get());
}
Return<Status> InputSurfaceConnection::disconnect() {
- ALOGV("disconnect");
- mMutex.lock();
- if (mSource) {
- (void)mSource->stop();
- (void)mSource->release();
- mSource.clear();
- }
- mImpl.clear();
- mMutex.unlock();
- ALOGV("disconnected");
+ std::lock_guard<std::mutex> lock(mImplMutex);
+ mImpl = nullptr;
return Status::OK;
}
+InputSurfaceConnection::~InputSurfaceConnection() {
+ mImpl = nullptr;
+}
+
+bool InputSurfaceConnection::init() {
+ std::lock_guard<std::mutex> lock(mImplMutex);
+ return mImpl->init();
+}
+
+Return<sp<IConfigurable>> InputSurfaceConnection::getConfigurable() {
+ return mConfigurable;
+}
+
+// Configurable interface for InputSurfaceConnection::Impl
+c2_status_t InputSurfaceConnection::Impl::ConfigurableIntf::config(
+ const std::vector<C2Param*> ¶ms,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+ // TODO: implement
+ (void)params;
+ (void)mayBlock;
+ (void)failures;
+ return C2_OK;
+}
+
+c2_status_t InputSurfaceConnection::Impl::ConfigurableIntf::query(
+ const std::vector<C2Param::Index> &indices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>> *const params) const {
+ // TODO: implement
+ (void)indices;
+ (void)mayBlock;
+ (void)params;
+ return C2_OK;
+}
+
+c2_status_t InputSurfaceConnection::Impl::ConfigurableIntf::querySupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+ // TODO: implement
+ (void)params;
+ return C2_OK;
+}
+
+c2_status_t InputSurfaceConnection::Impl::ConfigurableIntf::querySupportedValues(
+ std::vector<C2FieldSupportedValuesQuery> &fields,
+ c2_blocking_t mayBlock) const {
+ // TODO: implement
+ (void)fields;
+ (void)mayBlock;
+ return C2_OK;
+}
+
} // namespace utils
} // namespace V1_0
} // namespace c2
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
index 0908226..4ac95c5 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,13 +17,15 @@
#ifndef CODEC2_HIDL_V1_0_UTILS_COMPONENT_H
#define CODEC2_HIDL_V1_0_UTILS_COMPONENT_H
+#include <codec2/hidl/1.0/ComponentInterface.h>
#include <codec2/hidl/1.0/Configurable.h>
#include <codec2/hidl/1.0/types.h>
#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
+#include <android/hardware/media/c2/1.0/IComponent.h>
+#include <android/hardware/media/c2/1.0/IComponentInterface.h>
#include <android/hardware/media/c2/1.0/IComponentListener.h>
#include <android/hardware/media/c2/1.0/IComponentStore.h>
-#include <android/hardware/media/c2/1.0/IComponent.h>
#include <hidl/Status.h>
#include <hwbinder/IBinder.h>
@@ -31,9 +33,9 @@
#include <C2Buffer.h>
#include <C2.h>
-#include <list>
#include <map>
#include <memory>
+#include <mutex>
namespace android {
namespace hardware {
@@ -54,19 +56,8 @@
struct ComponentStore;
-struct ComponentInterface : public Configurable<IComponentInterface> {
- ComponentInterface(
- const std::shared_ptr<C2ComponentInterface>& interface,
- const sp<ComponentStore>& store);
- c2_status_t status() const;
-
-protected:
- c2_status_t mInit;
- std::shared_ptr<C2ComponentInterface> mInterface;
- sp<ComponentStore> mStore;
-};
-
-struct Component : public Configurable<IComponent> {
+struct Component : public IComponent,
+ public std::enable_shared_from_this<Component> {
Component(
const std::shared_ptr<C2Component>&,
const sp<IComponentListener>& listener,
@@ -85,10 +76,14 @@
virtual Return<Status> setOutputSurface(
uint64_t blockPoolId,
const sp<HGraphicBufferProducer>& surface) override;
- virtual Return<Status> connectToOmxInputSurface(
+ virtual Return<void> connectToInputSurface(
+ const sp<IInputSurface>& inputSurface,
+ connectToInputSurface_cb _hidl_cb) override;
+ virtual Return<void> connectToOmxInputSurface(
const sp<HGraphicBufferProducer>& producer,
const sp<::android::hardware::media::omx::V1_0::
- IGraphicBufferSource>& source) override;
+ IGraphicBufferSource>& source,
+ connectToOmxInputSurface_cb _hidl_cb) override;
virtual Return<Status> disconnectFromInputSurface() override;
virtual Return<void> createBlockPool(
uint32_t allocatorId,
@@ -98,63 +93,34 @@
virtual Return<Status> stop() override;
virtual Return<Status> reset() override;
virtual Return<Status> release() override;
+ virtual Return<sp<IComponentInterface>> getInterface() override;
+
+ // Returns a C2Component associated to the given sink if the sink is indeed
+ // a local component. Returns nullptr otherwise.
+ //
+ // This function is used by InputSurface::connect().
+ static std::shared_ptr<C2Component> findLocalComponent(
+ const sp<IInputSink>& sink);
protected:
c2_status_t mInit;
std::shared_ptr<C2Component> mComponent;
- std::shared_ptr<C2ComponentInterface> mInterface;
+ sp<ComponentInterface> mInterface;
sp<IComponentListener> mListener;
sp<ComponentStore> mStore;
::android::hardware::media::c2::V1_0::utils::DefaultBufferPoolSender
mBufferPoolSender;
+ struct Sink;
+ std::mutex mSinkMutex;
+ sp<Sink> mSink;
+
std::mutex mBlockPoolsMutex;
// This map keeps C2BlockPool objects that are created by createBlockPool()
// alive. These C2BlockPool objects can be deleted by calling
// destroyBlockPool(), reset() or release(), or by destroying the component.
std::map<uint64_t, std::shared_ptr<C2BlockPool>> mBlockPools;
- // This struct is a comparable wrapper for IComponent.
- //
- // An IComponent object is either local or remote. If it is local, we can
- // use the underlying pointer as a key. If it is remote, we have to use the
- // underlying pointer of the associated binder object as a key.
- //
- // See interfacesEqual() for more detail.
- struct InterfaceKey {
- // An InterfaceKey is constructed from IComponent.
- InterfaceKey(const sp<IComponent>& component);
- // operator< is defined here to control the default definition of
- // std::less<InterfaceKey>, which will be used in type Roster defined
- // below.
- bool operator<(const InterfaceKey& other) const {
- return isRemote ?
- (other.isRemote ?
- // remote & remote
- std::less<IBinder*>()(
- remote.unsafe_get(),
- other.remote.unsafe_get()) :
- // remote & local
- false) :
- (other.isRemote ?
- // local & remote
- true :
- // local & local
- std::less<IComponent*>()(
- local.unsafe_get(),
- other.local.unsafe_get()));
- }
- private:
- bool isRemote;
- wp<IBinder> remote;
- wp<IComponent> local;
- };
-
- typedef std::map<InterfaceKey, std::weak_ptr<C2Component>> Roster;
- typedef Roster::const_iterator LocalId;
- LocalId mLocalId;
- void setLocalId(const LocalId& localId);
-
void initListener(const sp<Component>& self);
virtual ~Component() override;
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentInterface.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentInterface.h
new file mode 100644
index 0000000..a5d235e
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentInterface.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_HIDL_V1_0_UTILS_COMPONENT_INTERFACE_H
+#define CODEC2_HIDL_V1_0_UTILS_COMPONENT_INTERFACE_H
+
+#include <codec2/hidl/1.0/Configurable.h>
+#include <codec2/hidl/1.0/types.h>
+
+#include <android/hardware/media/c2/1.0/IComponentInterface.h>
+#include <hidl/Status.h>
+
+#include <C2Component.h>
+#include <C2Buffer.h>
+#include <C2.h>
+
+#include <memory>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::sp;
+
+struct ComponentStore;
+
+struct ComponentInterface : public IComponentInterface {
+ ComponentInterface(
+ const std::shared_ptr<C2ComponentInterface>& interface,
+ ComponentStore* store);
+ c2_status_t status() const;
+ virtual Return<sp<IConfigurable>> getConfigurable() override;
+
+protected:
+ std::shared_ptr<C2ComponentInterface> mInterface;
+ sp<CachedConfigurable> mConfigurable;
+ c2_status_t mInit;
+};
+
+
+} // namespace utils
+} // namespace V1_0
+} // namespace c2
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // CODEC2_HIDL_V1_0_UTILS_COMPONENT_INTERFACE_H
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
index 41e1416..be80c62 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ComponentStore.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,15 +18,18 @@
#define CODEC2_HIDL_V1_0_UTILS_COMPONENTSTORE_H
#include <codec2/hidl/1.0/Component.h>
+#include <codec2/hidl/1.0/ComponentInterface.h>
#include <codec2/hidl/1.0/Configurable.h>
-#include <android/hardware/media/c2/1.0/IComponentStore.h>
+
#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
+#include <android/hardware/media/c2/1.0/IComponentStore.h>
#include <hidl/Status.h>
#include <C2Component.h>
#include <C2Param.h>
#include <C2.h>
+#include <chrono>
#include <map>
#include <memory>
#include <mutex>
@@ -42,53 +45,61 @@
using ::android::hardware::media::bufferpool::V2_0::IClientManager;
-using ::android::hardware::hidl_array;
using ::android::hardware::hidl_handle;
-using ::android::hardware::hidl_memory;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::sp;
-using ::android::wp;
-struct ComponentStore : public Configurable<IComponentStore> {
+struct ComponentStore : public IComponentStore {
ComponentStore(const std::shared_ptr<C2ComponentStore>& store);
virtual ~ComponentStore() = default;
- c2_status_t status() const {
- return mInit;
- }
+ /**
+ * Returns the status of the construction of this object.
+ */
+ c2_status_t status() const;
+ /**
+ * This function is called by CachedConfigurable::init() to validate
+ * supported parameters.
+ */
c2_status_t validateSupportedParams(
const std::vector<std::shared_ptr<C2ParamDescriptor>>& params);
- // Methods from ::android::hardware::media::c2::V1_0::IComponentStore
- Return<void> createComponent(
+ // Methods from ::android::hardware::media::c2::V1_0::IComponentStore.
+ virtual Return<void> createComponent(
const hidl_string& name,
const sp<IComponentListener>& listener,
const sp<IClientManager>& pool,
createComponent_cb _hidl_cb) override;
- Return<void> createInterface(
+ virtual Return<void> createInterface(
const hidl_string& name,
createInterface_cb _hidl_cb) override;
- Return<void> listComponents(listComponents_cb _hidl_cb) override;
- Return<sp<IInputSurface>> createInputSurface() override;
- Return<void> getStructDescriptors(
+ virtual Return<void> listComponents(listComponents_cb _hidl_cb) override;
+ virtual Return<void> createInputSurface(
+ createInputSurface_cb _hidl_cb) override;
+ virtual Return<void> getStructDescriptors(
const hidl_vec<uint32_t>& indices,
getStructDescriptors_cb _hidl_cb) override;
- Return<sp<IClientManager>> getPoolClientManager() override;
- Return<Status> copyBuffer(
+ virtual Return<sp<IClientManager>> getPoolClientManager() override;
+ virtual Return<Status> copyBuffer(
const Buffer& src,
const Buffer& dst) override;
+ virtual Return<sp<IConfigurable>> getConfigurable() override;
- // Debug dump
- Return<void> debug(
+ /**
+ * Dumps information when lshal is called.
+ */
+ virtual Return<void> debug(
const hidl_handle& handle,
const hidl_vec<hidl_string>& args) override;
protected:
- // does bookkeeping for an interface that has been loaded
+ sp<CachedConfigurable> mConfigurable;
+
+ // Does bookkeeping for an interface that has been loaded.
void onInterfaceLoaded(const std::shared_ptr<C2ComponentInterface> &intf);
c2_status_t mInit;
@@ -100,18 +111,33 @@
std::set<C2String> mLoadedInterfaces;
mutable std::mutex mStructDescriptorsMutex;
- // Component lifetime management
- Component::Roster mComponentRoster;
+ // ComponentStore keeps track of live Components.
+
+ struct ComponentStatus {
+ std::shared_ptr<C2Component> c2Component;
+ std::chrono::system_clock::time_point birthTime;
+ };
+
mutable std::mutex mComponentRosterMutex;
- void reportComponentDeath(const Component::LocalId& componentLocalId);
+ std::map<Component*, ComponentStatus> mComponentRoster;
+
+ // Called whenever Component is created.
+ void reportComponentBirth(Component* component);
+ // Called only from the destructor of Component.
+ void reportComponentDeath(Component* component);
friend Component;
- // C2Component lookup
- std::shared_ptr<C2Component> findC2Component(
- const sp<IComponent>& component) const;
+ // Helper functions for dumping.
- friend struct InputSurface;
+ std::ostream& dump(
+ std::ostream& out,
+ const std::shared_ptr<const C2Component::Traits>& comp);
+
+ std::ostream& dump(
+ std::ostream& out,
+ ComponentStatus& compStatus);
+
};
} // namespace utils
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Configurable.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Configurable.h
index 2e33a6f..8095185 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Configurable.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Configurable.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,15 +17,13 @@
#ifndef CODEC2_HIDL_V1_0_UTILS_CONFIGURABLE_H
#define CODEC2_HIDL_V1_0_UTILS_CONFIGURABLE_H
-#include <codec2/hidl/1.0/ConfigurableC2Intf.h>
+#include <android/hardware/media/c2/1.0/IConfigurable.h>
+#include <hidl/Status.h>
#include <C2Component.h>
#include <C2Param.h>
#include <C2.h>
-#include <android/hardware/media/c2/1.0/IConfigurable.h>
-#include <hidl/Status.h>
-
#include <memory>
namespace android {
@@ -35,9 +33,6 @@
namespace V1_0 {
namespace utils {
-using ::android::hardware::hidl_array;
-using ::android::hardware::hidl_memory;
-using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
using ::android::hardware::Void;
@@ -46,12 +41,52 @@
struct ComponentStore;
/**
+ * Codec2 objects of different types may have different querying and configuring
+ * functions, but across the Treble boundary, they share the same HIDL
+ * interface, IConfigurable.
+ *
+ * ConfigurableC2Intf is an abstract class that a Codec2 object can implement to
+ * easily expose an IConfigurable instance. See CachedConfigurable below.
+ */
+struct ConfigurableC2Intf {
+ C2String getName() const { return mName; }
+ uint32_t getId() const { return mId; }
+ /** C2ComponentInterface::query_vb sans stack params */
+ virtual c2_status_t query(
+ const std::vector<C2Param::Index> &indices,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2Param>>* const params) const = 0;
+ /** C2ComponentInterface::config_vb */
+ virtual c2_status_t config(
+ const std::vector<C2Param*> ¶ms,
+ c2_blocking_t mayBlock,
+ std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
+ /** C2ComponentInterface::querySupportedParams_nb */
+ virtual c2_status_t querySupportedParams(
+ std::vector<std::shared_ptr<C2ParamDescriptor>>* const params) const = 0;
+ /** C2ComponentInterface::querySupportedParams_nb */
+ virtual c2_status_t querySupportedValues(
+ std::vector<C2FieldSupportedValuesQuery>& fields, c2_blocking_t mayBlock) const = 0;
+
+ virtual ~ConfigurableC2Intf() = default;
+
+ ConfigurableC2Intf(const C2String& name, uint32_t id)
+ : mName{name}, mId{id} {}
+
+protected:
+ C2String mName; /* cached component name */
+ uint32_t mId;
+};
+
+/**
* Implementation of the IConfigurable interface that supports caching of
* supported parameters from a supplied ComponentStore.
*
- * This is mainly the same for all of the configurable C2 interfaces though
- * there are slight differences in the blocking behavior. This is handled in the
- * ConfigurableC2Intf implementations.
+ * CachedConfigurable essentially converts a ConfigurableC2Intf into HIDL's
+ * IConfigurable. A Codec2 object generally implements ConfigurableC2Intf and
+ * passes the implementation to the constructor of CachedConfigurable.
+ *
+ * Note that caching happens
*/
struct CachedConfigurable : public IConfigurable {
CachedConfigurable(std::unique_ptr<ConfigurableC2Intf>&& intf);
@@ -60,6 +95,8 @@
// Methods from ::android::hardware::media::c2::V1_0::IConfigurable
+ virtual Return<uint32_t> getId() override;
+
virtual Return<void> getName(getName_cb _hidl_cb) override;
virtual Return<void> query(
@@ -90,63 +127,6 @@
std::vector<std::shared_ptr<C2ParamDescriptor>> mSupportedParams;
};
-/**
- * Template that implements the `IConfigurable` interface for an inherited
- * interface. Classes that implement a child interface `I` of `IConfigurable`
- * can derive from `Configurable<I>`.
- */
-template <typename I>
-struct Configurable : public I {
- Configurable(const sp<CachedConfigurable>& intf): mIntf(intf) {
- }
-
- c2_status_t init(ComponentStore* store) {
- return mIntf->init(store);
- }
-
- // Methods from ::android::hardware::media::c2::V1_0::IConfigurable
-
- using getName_cb = typename I::getName_cb;
- virtual Return<void> getName(getName_cb _hidl_cb) override {
- return mIntf->getName(_hidl_cb);
- }
-
- using query_cb = typename I::query_cb;
- virtual Return<void> query(
- const hidl_vec<uint32_t>& indices,
- bool mayBlock,
- query_cb _hidl_cb) override {
- return mIntf->query(indices, mayBlock, _hidl_cb);
- }
-
- using config_cb = typename I::config_cb;
- virtual Return<void> config(
- const hidl_vec<uint8_t>& inParams,
- bool mayBlock,
- config_cb _hidl_cb) override {
- return mIntf->config(inParams, mayBlock, _hidl_cb);
- }
-
- using querySupportedParams_cb = typename I::querySupportedParams_cb;
- virtual Return<void> querySupportedParams(
- uint32_t start,
- uint32_t count,
- querySupportedParams_cb _hidl_cb) override {
- return mIntf->querySupportedParams(start, count, _hidl_cb);
- }
-
- using querySupportedValues_cb = typename I::querySupportedValues_cb;
- virtual Return<void> querySupportedValues(
- const hidl_vec<FieldSupportedValuesQuery>& inFields,
- bool mayBlock,
- querySupportedValues_cb _hidl_cb) override {
- return mIntf->querySupportedValues(inFields, mayBlock, _hidl_cb);
- }
-
-protected:
- sp<CachedConfigurable> mIntf;
-};
-
} // namespace utils
} // namespace V1_0
} // namespace c2
@@ -155,3 +135,4 @@
} // namespace android
#endif // CODEC2_HIDL_V1_0_UTILS_CONFIGURABLE_H
+
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ConfigurableC2Intf.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ConfigurableC2Intf.h
deleted file mode 100644
index b8801bb..0000000
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ConfigurableC2Intf.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef CODEC2_HIDL_V1_0_UTILS_CONFIGURABLEC2INTF_H
-#define CODEC2_HIDL_V1_0_UTILS_CONFIGURABLEC2INTF_H
-
-#include <C2Work.h>
-#include <C2Component.h>
-#include <C2Param.h>
-#include <C2.h>
-
-#include <hidl/HidlSupport.h>
-#include <utils/StrongPointer.h>
-#include <vector>
-#include <memory>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace c2 {
-namespace V1_0 {
-namespace utils {
-
-using ::android::sp;
-using ::android::hardware::hidl_string;
-using ::android::hardware::hidl_vec;
-using ::android::hardware::Return;
-using ::android::hardware::Void;
-
-/**
- * Common Codec 2.0 interface wrapper.
- */
-struct ConfigurableC2Intf {
- C2String getName() const { return mName; }
- /** C2ComponentInterface::query_vb sans stack params */
- virtual c2_status_t query(
- const std::vector<C2Param::Index> &indices,
- c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2Param>>* const params) const = 0;
- /** C2ComponentInterface::config_vb */
- virtual c2_status_t config(
- const std::vector<C2Param*> ¶ms,
- c2_blocking_t mayBlock,
- std::vector<std::unique_ptr<C2SettingResult>>* const failures) = 0;
- /** C2ComponentInterface::querySupportedParams_nb */
- virtual c2_status_t querySupportedParams(
- std::vector<std::shared_ptr<C2ParamDescriptor>>* const params) const = 0;
- /** C2ComponentInterface::querySupportedParams_nb */
- virtual c2_status_t querySupportedValues(
- std::vector<C2FieldSupportedValuesQuery>& fields, c2_blocking_t mayBlock) const = 0;
-
- virtual ~ConfigurableC2Intf() = default;
-
- ConfigurableC2Intf(const C2String& name) : mName(name) {}
-
-protected:
- C2String mName; /* cache component name */
-};
-
-} // namespace utils
-} // namespace V1_0
-} // namespace c2
-} // namespace media
-} // namespace hardware
-} // namespace android
-
-#endif // CODEC2_HIDL_V1_0_UTILS_CONFIGURABLEC2INTF_H
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
new file mode 100644
index 0000000..b6857d5
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputBufferManager.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef CODEC2_HIDL_V1_0_UTILS_INPUT_BUFFER_MANAGER_H
+#define CODEC2_HIDL_V1_0_UTILS_INPUT_BUFFER_MANAGER_H
+
+#include <android/hardware/media/c2/1.0/IComponentListener.h>
+#include <utils/Timers.h>
+
+#include <C2Buffer.h>
+#include <C2Work.h>
+
+#include <set>
+#include <map>
+#include <thread>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using namespace ::android;
+
+/**
+ * InputBufferManager
+ * ==================
+ *
+ * InputBufferManager presents a way to track and untrack input buffers in this
+ * (codec) process and send a notification to a listener, possibly in a
+ * different process, when a tracked buffer no longer has any references in this
+ * process.
+ *
+ * InputBufferManager holds a collection of records representing tracked buffers
+ * and their callback listeners. Conceptually, one record is a triple (listener,
+ * frameIndex, bufferIndex) where
+ *
+ * - (frameIndex, bufferIndex) is a pair of indices used to identify the buffer.
+ * - listener is of type IComponentListener. Its onInputBuffersReleased()
+ * function will be called after the associated buffer dies. The argument of
+ * onInputBuffersReleased() is a list of InputBuffer objects, each of which
+ * has the following members:
+ *
+ * uint64_t frameIndex
+ * uint32_t arrayIndex
+ *
+ * When a tracked buffer associated to the triple (listener, frameIndex,
+ * bufferIndex) goes out of scope, listener->onInputBuffersReleased() will be
+ * called with an InputBuffer object whose members are set as follows:
+ *
+ * inputBuffer.frameIndex = frameIndex
+ * inputBuffer.arrayIndex = bufferIndex
+ *
+ * IPC Optimization
+ * ----------------
+ *
+ * Since onInputBuffersReleased() is an IPC call, InputBufferManager tries not
+ * to call it too often. Any two calls to the same listener are at least
+ * mNotificationIntervalNs nanoseconds apart, where mNotificationIntervalNs is
+ * configurable via calling setNotificationInterval(). The default value of
+ * mNotificationIntervalNs is kDefaultNotificationInternalNs.
+ *
+ * Public Member Functions
+ * -----------------------
+ *
+ * InputBufferManager is a singleton class. Its only instance is accessible via
+ * the following public functions:
+ *
+ * - registerFrameData(const sp<IComponentListener>& listener,
+ * const C2FrameData& input)
+ *
+ * - unregisterFrameData(const sp<IComponentListener>& listener,
+ * const C2FrameData& input)
+ *
+ * - unregisterFrameData(const sp<IComponentListener>& listener)
+ *
+ * - setNotificationInterval(nsecs_t notificationIntervalNs)
+ *
+ */
+
+struct InputBufferManager {
+
+ /**
+ * The default value for the time interval between 2 subsequent IPCs.
+ */
+ static constexpr nsecs_t kDefaultNotificationIntervalNs = 1000000; /* 1ms */
+
+ /**
+ * Track all buffers in a C2FrameData object.
+ *
+ * input (C2FrameData) has the following two members that are of interest:
+ *
+ * C2WorkOrdinal ordinal
+ * vector<shared_ptr<C2Buffer>> buffers
+ *
+ * Calling registerFrameData(listener, input) will register multiple
+ * triples (listener, frameIndex, bufferIndex) where frameIndex is equal to
+ * input.ordinal.frameIndex and bufferIndex runs through the indices of
+ * input.buffers such that input.buffers[bufferIndex] is not null.
+ *
+ * This should be called from queue().
+ *
+ * \param listener Listener of death notifications.
+ * \param input Input frame data whose input buffers are to be tracked.
+ */
+ static void registerFrameData(
+ const sp<IComponentListener>& listener,
+ const C2FrameData& input);
+
+ /**
+ * Untrack all buffers in a C2FrameData object.
+ *
+ * Calling unregisterFrameData(listener, input) will unregister and remove
+ * pending notifications for all triples (l, fi, bufferIndex) such that
+ * l = listener and fi = input.ordinal.frameIndex.
+ *
+ * This should be called from onWorkDone() and flush().
+ *
+ * \param listener Previously registered listener.
+ * \param input Previously registered frame data.
+ */
+ static void unregisterFrameData(
+ const wp<IComponentListener>& listener,
+ const C2FrameData& input);
+
+ /**
+ * Untrack all buffers associated to a given listener.
+ *
+ * Calling unregisterFrameData(listener) will unregister and remove
+ * pending notifications for all triples (l, frameIndex, bufferIndex) such
+ * that l = listener.
+ *
+ * This should be called when the component cleans up all input buffers,
+ * i.e., when reset(), release(), stop() or ~Component() is called.
+ *
+ * \param listener Previously registered listener.
+ */
+ static void unregisterFrameData(
+ const wp<IComponentListener>& listener);
+
+ /**
+ * Set the notification interval.
+ *
+ * \param notificationIntervalNs New notification interval, in nanoseconds.
+ */
+ static void setNotificationInterval(nsecs_t notificationIntervalNs);
+
+private:
+ void _registerFrameData(
+ const sp<IComponentListener>& listener,
+ const C2FrameData& input);
+ void _unregisterFrameData(
+ const wp<IComponentListener>& listener,
+ const C2FrameData& input);
+ void _unregisterFrameData(
+ const wp<IComponentListener>& listener);
+ void _setNotificationInterval(nsecs_t notificationIntervalNs);
+
+ // The callback function tied to C2Buffer objects.
+ //
+ // Note: This function assumes that sInstance is the only instance of this
+ // class.
+ static void onBufferDestroyed(const C2Buffer* buf, void* arg);
+ void _onBufferDestroyed(const C2Buffer* buf, void* arg);
+
+ // Persistent data to be passed as "arg" in onBufferDestroyed().
+ // This is essentially the triple (listener, frameIndex, bufferIndex) plus a
+ // weak pointer to the C2Buffer object.
+ //
+ // Note that the "key" is bufferIndex according to operator<(). This is
+ // designed to work with TrackedBuffersMap defined below.
+ struct TrackedBuffer {
+ wp<IComponentListener> listener;
+ uint64_t frameIndex;
+ size_t bufferIndex;
+ std::weak_ptr<C2Buffer> buffer;
+ TrackedBuffer(const wp<IComponentListener>& listener,
+ uint64_t frameIndex,
+ size_t bufferIndex,
+ const std::shared_ptr<C2Buffer>& buffer)
+ : listener(listener),
+ frameIndex(frameIndex),
+ bufferIndex(bufferIndex),
+ buffer(buffer) {}
+ TrackedBuffer(const TrackedBuffer&) = default;
+ bool operator<(const TrackedBuffer& other) const {
+ return bufferIndex < other.bufferIndex;
+ }
+ };
+
+ // Map: listener -> frameIndex -> set<TrackedBuffer>.
+ // Essentially, this is used to store triples (listener, frameIndex,
+ // bufferIndex) that's searchable by listener and (listener, frameIndex).
+ // However, the value of the innermost map is TrackedBuffer, which also
+ // contains an extra copy of listener and frameIndex. This is needed
+ // because onBufferDestroyed() needs to know listener and frameIndex too.
+ typedef std::map<wp<IComponentListener>,
+ std::map<uint64_t,
+ std::set<TrackedBuffer>>> TrackedBuffersMap;
+
+ // Storage for pending (unsent) death notifications for one listener.
+ // Each pair in member named "indices" are (frameIndex, bufferIndex) from
+ // the (listener, frameIndex, bufferIndex) triple.
+ struct DeathNotifications {
+
+ // The number of pending notifications for this listener.
+ // count may be 0, in which case the DeathNotifications object will
+ // remain valid for only a small period (specified
+ // nanoseconds).
+ size_t count;
+
+ // The timestamp of the most recent callback on this listener. This is
+ // used to guarantee that callbacks do not occur too frequently, and
+ // also to trigger expiration of a DeathNotifications object that has
+ // count = 0.
+ nsecs_t lastSentNs;
+
+ // Map: frameIndex -> vector of bufferIndices
+ // This is essentially a collection of (framdeIndex, bufferIndex).
+ std::map<uint64_t, std::vector<size_t>> indices;
+
+ DeathNotifications(
+ nsecs_t notificationIntervalNs = kDefaultNotificationIntervalNs)
+ : count(0),
+ lastSentNs(systemTime() - notificationIntervalNs),
+ indices() {}
+ };
+
+ // The minimum time period between IPC calls to notify the client about the
+ // destruction of input buffers.
+ std::atomic<nsecs_t> mNotificationIntervalNs{kDefaultNotificationIntervalNs};
+
+ // Mutex for the management of all input buffers.
+ std::mutex mMutex;
+
+ // Tracked input buffers.
+ TrackedBuffersMap mTrackedBuffersMap;
+
+ // Death notifications to be sent.
+ //
+ // A DeathNotifications object is associated to each listener. An entry in
+ // this map will be removed if its associated DeathNotifications has count =
+ // 0 and lastSentNs < systemTime() - mNotificationIntervalNs.
+ std::map<wp<IComponentListener>, DeathNotifications> mDeathNotifications;
+
+ // Condition variable signaled when an entry is added to mDeathNotifications.
+ std::condition_variable mOnBufferDestroyed;
+
+ // Notify the clients about buffer destructions.
+ // Return false if all destructions have been notified.
+ // Return true and set timeToRetry to the duration to wait for before
+ // retrying if some destructions have not been notified.
+ bool processNotifications(nsecs_t* timeToRetryNs);
+
+ // Main function for the input buffer manager thread.
+ void main();
+
+ // The thread that manages notifications.
+ //
+ // Note: This variable is declared last so its initialization will happen
+ // after all other member variables have been initialized.
+ std::thread mMainThread;
+
+ // Private constructor.
+ InputBufferManager();
+
+ // The only instance of this class.
+ static InputBufferManager& getInstance();
+
+};
+
+} // namespace utils
+} // namespace V1_0
+} // namespace c2
+} // namespace media
+} // namespace hardware
+} // namespace android
+
+#endif // CODEC2_HIDL_V1_0_UTILS_INPUT_BUFFER_MANAGER_H
+
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurface.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurface.h
index cef258e..2682c13 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurface.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,21 +19,14 @@
#include <codec2/hidl/1.0/ComponentStore.h>
-#include <android/hardware/media/c2/1.0/IInputSurface.h>
-#include <android/hardware/media/c2/1.0/IComponent.h>
-
#include <android/hardware/graphics/bufferqueue/1.0/IGraphicBufferProducer.h>
-#include <android/hardware/graphics/bufferqueue/1.0/IProducerListener.h>
-#include <android/hardware/graphics/common/1.0/types.h>
-#include <android/hardware/media/1.0/types.h>
-
+#include <android/hardware/media/c2/1.0/IInputSink.h>
+#include <android/hardware/media/c2/1.0/IInputSurface.h>
#include <gui/IGraphicBufferProducer.h>
+#include <hidl/Status.h>
#include <media/stagefright/bqhelper/GraphicBufferSource.h>
-#include <hidl/HidlSupport.h>
-#include <hidl/Status.h>
-
-class C2ReflectorHelper;
+#include <util/C2InterfaceHelper.h>
namespace android {
namespace hardware {
@@ -49,133 +42,31 @@
using ::android::hardware::Void;
using ::android::sp;
-using ::android::hardware::graphics::common::V1_0::PixelFormat;
-using ::android::hardware::media::V1_0::AnwBuffer;
-
struct InputSurface : public IInputSurface {
- typedef ::android::hidl::base::V1_0::IBase IBase;
-
- typedef ::android::hardware::graphics::bufferqueue::V1_0::
- IProducerListener HProducerListener;
-
- typedef ::android::
- IGraphicBufferProducer BGraphicBufferProducer;
-
typedef ::android::hardware::graphics::bufferqueue::V1_0::
IGraphicBufferProducer HGraphicBufferProducer;
typedef ::android::
GraphicBufferSource GraphicBufferSource;
-// Type disambiguation
-
- typedef ::android::hardware::media::c2::V1_0::Status Status;
-
-// New methods from IInputSurface
-
- virtual Return<void> connectToComponent(
- const sp<IComponent>& component,
- connectToComponent_cb _hidl_cb) override;
+ virtual Return<sp<HGraphicBufferProducer>> getGraphicBufferProducer() override;
virtual Return<sp<IConfigurable>> getConfigurable() override;
-// Methods derived from IGraphicBufferProducer
-
- virtual Return<void> requestBuffer(
- int32_t slot,
- requestBuffer_cb _hidl_cb) override;
-
- virtual Return<int32_t> setMaxDequeuedBufferCount(
- int32_t maxDequeuedBuffers) override;
-
- virtual Return<int32_t> setAsyncMode(
- bool async) override;
-
- virtual Return<void> dequeueBuffer(
- uint32_t width,
- uint32_t height,
- PixelFormat format,
- uint32_t usage,
- bool getFrameTimestamps,
- dequeueBuffer_cb _hidl_cb) override;
-
- virtual Return<int32_t> detachBuffer(
- int32_t slot) override;
-
- virtual Return<void> detachNextBuffer(
- detachNextBuffer_cb _hidl_cb) override;
-
- virtual Return<void> attachBuffer(
- const AnwBuffer& buffer,
- attachBuffer_cb _hidl_cb) override;
-
- virtual Return<void> queueBuffer(
- int32_t slot,
- const QueueBufferInput& input,
- queueBuffer_cb _hidl_cb) override;
-
- virtual Return<int32_t> cancelBuffer(
- int32_t slot,
- const hidl_handle& fence) override;
-
- virtual Return<void> query(
- int32_t what,
- query_cb _hidl_cb) override;
-
virtual Return<void> connect(
- const sp<HProducerListener>& listener,
- int32_t api,
- bool producerControlledByApp,
+ const sp<IInputSink>& sink,
connect_cb _hidl_cb) override;
- virtual Return<int32_t> disconnect(
- int32_t api,
- DisconnectMode mode) override;
-
- virtual Return<int32_t> setSidebandStream(
- const hidl_handle& stream) override;
-
- virtual Return<void> allocateBuffers(
- uint32_t width,
- uint32_t height,
- PixelFormat format,
- uint32_t usage) override;
-
- virtual Return<int32_t> allowAllocation(
- bool allow) override;
-
- virtual Return<int32_t> setGenerationNumber(
- uint32_t generationNumber) override;
-
- virtual Return<void> getConsumerName(
- getConsumerName_cb _hidl_cb) override;
-
- virtual Return<int32_t> setSharedBufferMode(
- bool sharedBufferMode) override;
-
- virtual Return<int32_t> setAutoRefresh(
- bool autoRefresh) override;
-
- virtual Return<int32_t> setDequeueTimeout(
- int64_t timeoutNs) override;
-
- virtual Return<void> getLastQueuedBuffer(
- getLastQueuedBuffer_cb _hidl_cb) override;
-
- virtual Return<void> getFrameTimestamps(
- getFrameTimestamps_cb _hidl_cb) override;
-
- virtual Return<void> getUniqueId(
- getUniqueId_cb _hidl_cb) override;
-
- class ConfigurableImpl;
-
protected:
+
+ class Interface;
+ class ConfigurableIntf;
+
sp<ComponentStore> mStore;
- sp<HGraphicBufferProducer> mBase;
+ sp<HGraphicBufferProducer> mProducer;
sp<GraphicBufferSource> mSource;
- std::shared_ptr<ConfigurableImpl> mHelper;
+ std::shared_ptr<Interface> mIntf;
sp<CachedConfigurable> mConfigurable;
InputSurface(
@@ -187,6 +78,7 @@
virtual ~InputSurface() override = default;
friend struct ComponentStore;
+
};
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurfaceConnection.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurfaceConnection.h
index 904fa9e..758b6b2 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurfaceConnection.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/InputSurfaceConnection.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,8 +18,10 @@
#define CODEC2_HIDL_V1_0_UTILS_INPUTSURFACECONNECTION_H
#include <codec2/hidl/1.0/Component.h>
+#include <codec2/hidl/1.0/Configurable.h>
#include <android/hardware/media/c2/1.0/IComponent.h>
+#include <android/hardware/media/c2/1.0/IConfigurable.h>
#include <android/hardware/media/c2/1.0/IInputSurfaceConnection.h>
#include <media/stagefright/bqhelper/GraphicBufferSource.h>
@@ -44,19 +46,28 @@
using ::android::sp;
using ::android::GraphicBufferSource;
+// An InputSurfaceConnection connects an InputSurface to a sink, which may be an
+// IInputSink or a local C2Component. This can be specified by choosing the
+// corresponding constructor. The reason for distinguishing these two cases is
+// that when an InputSurfaceConnection lives in the same process as the
+// component that processes the buffers, data parceling is not needed.
struct InputSurfaceConnection : public IInputSurfaceConnection {
virtual Return<Status> disconnect() override;
+ virtual Return<sp<IConfigurable>> getConfigurable() override;
+
protected:
InputSurfaceConnection(
const sp<GraphicBufferSource>& source,
- const std::shared_ptr<C2Component>& component);
+ const std::shared_ptr<C2Component>& comp,
+ const sp<ComponentStore>& store);
InputSurfaceConnection(
const sp<GraphicBufferSource>& source,
- const sp<IComponent>& component);
+ const sp<IInputSink>& sink,
+ const sp<ComponentStore>& store);
bool init();
@@ -68,9 +79,9 @@
struct Impl;
- std::mutex mMutex;
- sp<GraphicBufferSource> mSource;
+ std::mutex mImplMutex;
sp<Impl> mImpl;
+ sp<CachedConfigurable> mConfigurable;
virtual ~InputSurfaceConnection() override;
};
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
index d8a50b6..b9f3aa8 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,8 +17,6 @@
#ifndef CODEC2_HIDL_V1_0_UTILS_TYPES_H
#define CODEC2_HIDL_V1_0_UTILS_TYPES_H
-#include <chrono>
-
#include <bufferpool/ClientManager.h>
#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
#include <android/hardware/media/bufferpool/2.0/types.h>
@@ -30,6 +28,9 @@
#include <C2Param.h>
#include <C2ParamDef.h>
#include <C2Work.h>
+#include <util/C2Debug-base.h>
+
+#include <chrono>
using namespace std::chrono_literals;
@@ -65,66 +66,72 @@
};
typedef C2GlobalParam<C2Info, C2Hidl_Rect, 1> C2Hidl_RectInfo;
+// Make asString() and operator<< work with Status as well as c2_status_t.
+C2_DECLARE_AS_STRING_AND_DEFINE_STREAM_OUT(Status);
+
+/**
+ * All objcpy() functions will return a boolean value indicating whether the
+ * conversion succeeds or not.
+ */
+
// C2SettingResult -> SettingResult
-Status objcpy(
+bool objcpy(
SettingResult* d,
const C2SettingResult& s);
// SettingResult -> std::unique_ptr<C2SettingResult>
-c2_status_t objcpy(
+bool objcpy(
std::unique_ptr<C2SettingResult>* d,
const SettingResult& s);
// C2ParamDescriptor -> ParamDescriptor
-Status objcpy(
+bool objcpy(
ParamDescriptor* d,
const C2ParamDescriptor& s);
// ParamDescriptor -> std::shared_ptr<C2ParamDescriptor>
-c2_status_t objcpy(
+bool objcpy(
std::shared_ptr<C2ParamDescriptor>* d,
const ParamDescriptor& s);
// C2FieldSupportedValuesQuery -> FieldSupportedValuesQuery
-Status objcpy(
+bool objcpy(
FieldSupportedValuesQuery* d,
const C2FieldSupportedValuesQuery& s);
// FieldSupportedValuesQuery -> C2FieldSupportedValuesQuery
-c2_status_t objcpy(
+bool objcpy(
C2FieldSupportedValuesQuery* d,
const FieldSupportedValuesQuery& s);
// C2FieldSupportedValuesQuery -> FieldSupportedValuesQueryResult
-Status objcpy(
+bool objcpy(
FieldSupportedValuesQueryResult* d,
const C2FieldSupportedValuesQuery& s);
// FieldSupportedValuesQuery, FieldSupportedValuesQueryResult -> C2FieldSupportedValuesQuery
-c2_status_t objcpy(
+bool objcpy(
C2FieldSupportedValuesQuery* d,
const FieldSupportedValuesQuery& sq,
const FieldSupportedValuesQueryResult& sr);
// C2Component::Traits -> ComponentTraits
-Status objcpy(
+bool objcpy(
IComponentStore::ComponentTraits* d,
const C2Component::Traits& s);
-// ComponentTraits -> C2Component::Traits, std::unique_ptr<std::vector<std::string>>
-// Note: The output d is only valid as long as aliasesBuffer remains alive.
-c2_status_t objcpy(
+// ComponentTraits -> C2Component::Traits
+bool objcpy(
C2Component::Traits* d,
- std::unique_ptr<std::vector<std::string>>* aliasesBuffer,
const IComponentStore::ComponentTraits& s);
// C2StructDescriptor -> StructDescriptor
-Status objcpy(
+bool objcpy(
StructDescriptor* d,
const C2StructDescriptor& s);
// StructDescriptor -> C2StructDescriptor
-c2_status_t objcpy(
+bool objcpy(
std::unique_ptr<C2StructDescriptor>* d,
const StructDescriptor& s);
@@ -208,68 +215,77 @@
// std::list<std::unique_ptr<C2Work>> -> WorkBundle
// Note: If bufferpool will be used, bpSender must not be null.
-Status objcpy(
+bool objcpy(
WorkBundle* d,
const std::list<std::unique_ptr<C2Work>>& s,
BufferPoolSender* bpSender = nullptr);
// WorkBundle -> std::list<std::unique_ptr<C2Work>>
-c2_status_t objcpy(
+bool objcpy(
std::list<std::unique_ptr<C2Work>>* d,
const WorkBundle& s);
/**
- * Parses a params blob and returns C2Param pointers to its params.
+ * Parses a params blob and returns C2Param pointers to its params. The pointers
+ * point to locations inside the underlying buffer of \p blob. If \p blob is
+ * destroyed, the pointers become invalid.
+ *
* \param[out] params target vector of C2Param pointers
* \param[in] blob parameter blob to parse
- * \retval C2_OK if the full blob was parsed
- * \retval C2_BAD_VALUE otherwise
+ * \retval true if the full blob was parsed
+ * \retval false otherwise
*/
-c2_status_t parseParamsBlob(
+bool parseParamsBlob(
std::vector<C2Param*> *params,
const hidl_vec<uint8_t> &blob);
/**
* Concatenates a list of C2Params into a params blob.
+ *
* \param[out] blob target blob
* \param[in] params parameters to concatenate
- * \retval C2_OK if the blob was successfully created
- * \retval C2_BAD_VALUE if the blob was not successful (this only happens if the parameters were
- * not const)
+ * \retval true if the blob was successfully created
+ * \retval false if the blob was not successful (this only happens if the
+ * parameters were not const)
*/
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<C2Param*> ¶ms);
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::unique_ptr<C2Param>> ¶ms);
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::shared_ptr<const C2Info>> ¶ms);
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::unique_ptr<C2Tuning>> ¶ms);
/**
* Parses a params blob and create a vector of C2Params whose members are copies
* of the params in the blob.
+ *
* \param[out] params the resulting vector
* \param[in] blob parameter blob to parse
- * \retval C2_OK if the full blob was parsed and params was constructed
- * \retval C2_BAD_VALUE otherwise
+ * \retval true if the full blob was parsed and params was constructed
+ * \retval false otherwise
*/
-c2_status_t copyParamsFromBlob(
+bool copyParamsFromBlob(
std::vector<std::unique_ptr<C2Param>>* params,
Params blob);
+bool copyParamsFromBlob(
+ std::vector<std::unique_ptr<C2Tuning>>* params,
+ Params blob);
/**
- * Parses a params blob and applies updates to params
+ * Parses a params blob and applies updates to params.
+ *
* \param[in,out] params params to be updated
* \param[in] blob parameter blob containing updates
- * \retval C2_OK if the full blob was parsed and params was updated
- * \retval C2_BAD_VALUE otherwise
+ * \retval true if the full blob was parsed and params was updated
+ * \retval false otherwise
*/
-c2_status_t updateParamsFromBlob(
+bool updateParamsFromBlob(
const std::vector<C2Param*>& params,
const Params& blob);
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index c053bc1..343bcb5 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,11 +16,12 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2-types"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/1.0/types.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
+#include <media/stagefright/foundation/AUtils.h>
#include <C2AllocatorIon.h>
#include <C2AllocatorGralloc.h>
@@ -35,10 +36,9 @@
#include <algorithm>
#include <functional>
+#include <iomanip>
#include <unordered_map>
-#include <media/stagefright/foundation/AUtils.h>
-
namespace android {
namespace hardware {
namespace media {
@@ -57,8 +57,18 @@
TransactionId;
using ::android::TWGraphicBufferProducer;
+const char* asString(Status status, const char* def) {
+ return asString(static_cast<c2_status_t>(status), def);
+}
+
namespace /* unnamed */ {
+template <typename EnumClass>
+typename std::underlying_type<EnumClass>::type underlying_value(
+ EnumClass x) {
+ return static_cast<typename std::underlying_type<EnumClass>::type>(x);
+}
+
template <typename Common, typename DstVector, typename SrcVector>
void copyVector(DstVector* d, const SrcVector& s) {
static_assert(sizeof(Common) == sizeof(decltype((*d)[0])),
@@ -73,10 +83,11 @@
}
// C2ParamField -> ParamField
-void objcpy(ParamField *d, const C2ParamField &s) {
+bool objcpy(ParamField *d, const C2ParamField &s) {
d->index = static_cast<ParamIndex>(_C2ParamInspector::GetIndex(s));
d->fieldId.offset = static_cast<uint32_t>(_C2ParamInspector::GetOffset(s));
d->fieldId.size = static_cast<uint32_t>(_C2ParamInspector::GetSize(s));
+ return true;
}
struct C2ParamFieldBuilder : public C2ParamField {
@@ -92,21 +103,23 @@
};
// C2WorkOrdinalStruct -> WorkOrdinal
-void objcpy(WorkOrdinal *d, const C2WorkOrdinalStruct &s) {
+bool objcpy(WorkOrdinal *d, const C2WorkOrdinalStruct &s) {
d->frameIndex = static_cast<uint64_t>(s.frameIndex.peeku());
d->timestampUs = static_cast<uint64_t>(s.timestamp.peeku());
d->customOrdinal = static_cast<uint64_t>(s.customOrdinal.peeku());
+ return true;
}
// WorkOrdinal -> C2WorkOrdinalStruct
-void objcpy(C2WorkOrdinalStruct *d, const WorkOrdinal &s) {
+bool objcpy(C2WorkOrdinalStruct *d, const WorkOrdinal &s) {
d->frameIndex = c2_cntr64_t(s.frameIndex);
d->timestamp = c2_cntr64_t(s.timestampUs);
d->customOrdinal = c2_cntr64_t(s.customOrdinal);
+ return true;
}
// C2FieldSupportedValues::range's type -> FieldSupportedValues::Range
-void objcpy(
+bool objcpy(
FieldSupportedValues::Range* d,
const decltype(C2FieldSupportedValues::range)& s) {
d->min = static_cast<PrimitiveValue>(s.min.u64);
@@ -114,21 +127,24 @@
d->step = static_cast<PrimitiveValue>(s.step.u64);
d->num = static_cast<PrimitiveValue>(s.num.u64);
d->denom = static_cast<PrimitiveValue>(s.denom.u64);
+ return true;
}
// C2FieldSupportedValues -> FieldSupportedValues
-Status objcpy(FieldSupportedValues *d, const C2FieldSupportedValues &s) {
- d->typeOther = static_cast<int32_t>(s.type);
+bool objcpy(FieldSupportedValues *d, const C2FieldSupportedValues &s) {
switch (s.type) {
case C2FieldSupportedValues::EMPTY:
d->type = FieldSupportedValues::Type::EMPTY;
d->values.resize(0);
- return Status::OK;
+ break;
case C2FieldSupportedValues::RANGE:
d->type = FieldSupportedValues::Type::RANGE;
- objcpy(&d->range, s.range);
+ if (!objcpy(&d->range, s.range)) {
+ LOG(ERROR) << "Invalid C2FieldSupportedValues::range.";
+ return false;
+ }
d->values.resize(0);
- return Status::OK;
+ break;
default:
switch (s.type) {
case C2FieldSupportedValues::VALUES:
@@ -138,18 +154,22 @@
d->type = FieldSupportedValues::Type::FLAGS;
break;
default:
- d->type = FieldSupportedValues::Type::OTHER;
- // Copy all fields in this case
- objcpy(&d->range, s.range);
+ LOG(DEBUG) << "Unrecognized C2FieldSupportedValues::type_t "
+ << "with underlying value " << underlying_value(s.type)
+ << ".";
+ d->type = static_cast<FieldSupportedValues::Type>(s.type);
+ if (!objcpy(&d->range, s.range)) {
+ LOG(ERROR) << "Invalid C2FieldSupportedValues::range.";
+ return false;
+ }
}
- d->values.resize(s.values.size());
copyVector<uint64_t>(&d->values, s.values);
- return Status::OK;
}
+ return true;
}
// FieldSupportedValues::Range -> C2FieldSupportedValues::range's type
-void objcpy(
+bool objcpy(
decltype(C2FieldSupportedValues::range)* d,
const FieldSupportedValues::Range& s) {
d->min.u64 = static_cast<uint64_t>(s.min);
@@ -157,19 +177,23 @@
d->step.u64 = static_cast<uint64_t>(s.step);
d->num.u64 = static_cast<uint64_t>(s.num);
d->denom.u64 = static_cast<uint64_t>(s.denom);
+ return true;
}
// FieldSupportedValues -> C2FieldSupportedValues
-c2_status_t objcpy(C2FieldSupportedValues *d, const FieldSupportedValues &s) {
+bool objcpy(C2FieldSupportedValues *d, const FieldSupportedValues &s) {
switch (s.type) {
case FieldSupportedValues::Type::EMPTY:
d->type = C2FieldSupportedValues::EMPTY;
- return C2_OK;
+ break;
case FieldSupportedValues::Type::RANGE:
d->type = C2FieldSupportedValues::RANGE;
- objcpy(&d->range, s.range);
+ if (!objcpy(&d->range, s.range)) {
+ LOG(ERROR) << "Invalid FieldSupportedValues::range.";
+ return false;
+ }
d->values.resize(0);
- return C2_OK;
+ break;
default:
switch (s.type) {
case FieldSupportedValues::Type::VALUES:
@@ -179,22 +203,30 @@
d->type = C2FieldSupportedValues::FLAGS;
break;
default:
- d->type = static_cast<C2FieldSupportedValues::type_t>(s.typeOther);
- // Copy all fields in this case
- objcpy(&d->range, s.range);
+ LOG(DEBUG) << "Unrecognized FieldSupportedValues::Type "
+ << "with underlying value " << underlying_value(s.type)
+ << ".";
+ d->type = static_cast<C2FieldSupportedValues::type_t>(s.type);
+ if (!objcpy(&d->range, s.range)) {
+ LOG(ERROR) << "Invalid FieldSupportedValues::range.";
+ return false;
+ }
}
copyVector<uint64_t>(&d->values, s.values);
- return C2_OK;
}
+ return true;
}
} // unnamed namespace
// C2FieldSupportedValuesQuery -> FieldSupportedValuesQuery
-Status objcpy(
+bool objcpy(
FieldSupportedValuesQuery* d,
const C2FieldSupportedValuesQuery& s) {
- objcpy(&d->field, s.field());
+ if (!objcpy(&d->field, s.field())) {
+ LOG(ERROR) << "Invalid C2FieldSupportedValuesQuery::field.";
+ return false;
+ }
switch (s.type()) {
case C2FieldSupportedValuesQuery::POSSIBLE:
d->type = FieldSupportedValuesQuery::Type::POSSIBLE;
@@ -203,15 +235,16 @@
d->type = FieldSupportedValuesQuery::Type::CURRENT;
break;
default:
- ALOGE("Unknown type of C2FieldSupportedValuesQuery: %u",
- static_cast<unsigned>(s.type()));
- return Status::BAD_VALUE;
+ LOG(DEBUG) << "Unrecognized C2FieldSupportedValuesQuery::type_t "
+ << "with underlying value " << underlying_value(s.type())
+ << ".";
+ d->type = static_cast<FieldSupportedValuesQuery::Type>(s.type());
}
- return Status::OK;
+ return true;
}
// FieldSupportedValuesQuery -> C2FieldSupportedValuesQuery
-c2_status_t objcpy(
+bool objcpy(
C2FieldSupportedValuesQuery* d,
const FieldSupportedValuesQuery& s) {
C2FieldSupportedValuesQuery::type_t dType;
@@ -223,16 +256,17 @@
dType = C2FieldSupportedValuesQuery::CURRENT;
break;
default:
- ALOGE("Unknown type of FieldSupportedValuesQuery: %u",
- static_cast<unsigned>(s.type));
- return C2_BAD_VALUE;
+ LOG(DEBUG) << "Unrecognized FieldSupportedValuesQuery::Type "
+ << "with underlying value " << underlying_value(s.type)
+ << ".";
+ dType = static_cast<C2FieldSupportedValuesQuery::type_t>(s.type);
}
*d = C2FieldSupportedValuesQuery(C2ParamFieldBuilder(s.field), dType);
- return C2_OK;
+ return true;
}
// C2FieldSupportedValuesQuery -> FieldSupportedValuesQueryResult
-Status objcpy(
+bool objcpy(
FieldSupportedValuesQueryResult* d,
const C2FieldSupportedValuesQuery& s) {
d->status = static_cast<Status>(s.status);
@@ -241,20 +275,24 @@
// FieldSupportedValuesQuery, FieldSupportedValuesQueryResult ->
// C2FieldSupportedValuesQuery
-c2_status_t objcpy(
+bool objcpy(
C2FieldSupportedValuesQuery* d,
const FieldSupportedValuesQuery& sq,
const FieldSupportedValuesQueryResult& sr) {
- c2_status_t status = objcpy(d, sq);
- if (status != C2_OK) {
- return status;
+ if (!objcpy(d, sq)) {
+ LOG(ERROR) << "Invalid FieldSupportedValuesQuery.";
+ return false;
}
d->status = static_cast<c2_status_t>(sr.status);
- return objcpy(&d->values, sr.values);
+ if (!objcpy(&d->values, sr.values)) {
+ LOG(ERROR) << "Invalid FieldSupportedValuesQueryResult::values.";
+ return false;
+ }
+ return true;
}
// C2Component::Traits -> IComponentStore::ComponentTraits
-Status objcpy(
+bool objcpy(
IComponentStore::ComponentTraits *d,
const C2Component::Traits &s) {
d->name = s.name;
@@ -266,10 +304,19 @@
case C2Component::DOMAIN_AUDIO:
d->domain = IComponentStore::ComponentTraits::Domain::AUDIO;
break;
- default:
+ case C2Component::DOMAIN_IMAGE:
+ d->domain = IComponentStore::ComponentTraits::Domain::IMAGE;
+ break;
+ case C2Component::DOMAIN_OTHER:
d->domain = IComponentStore::ComponentTraits::Domain::OTHER;
+ break;
+ default:
+ LOG(DEBUG) << "Unrecognized C2Component::domain_t "
+ << "with underlying value " << underlying_value(s.domain)
+ << ".";
+ d->domain = static_cast<IComponentStore::ComponentTraits::Domain>(
+ s.domain);
}
- d->domainOther = static_cast<uint32_t>(s.domain);
switch (s.kind) {
case C2Component::KIND_DECODER:
@@ -278,10 +325,16 @@
case C2Component::KIND_ENCODER:
d->kind = IComponentStore::ComponentTraits::Kind::ENCODER;
break;
- default:
+ case C2Component::KIND_OTHER:
d->kind = IComponentStore::ComponentTraits::Kind::OTHER;
+ break;
+ default:
+ LOG(DEBUG) << "Unrecognized C2Component::kind_t "
+ << "with underlying value " << underlying_value(s.kind)
+ << ".";
+ d->kind = static_cast<IComponentStore::ComponentTraits::Kind>(
+ s.kind);
}
- d->kindOther = static_cast<uint32_t>(s.kind);
d->rank = static_cast<uint32_t>(s.rank);
@@ -292,13 +345,12 @@
--ix;
d->aliases[ix] = s.aliases[ix];
}
- return Status::OK;
+ return true;
}
// ComponentTraits -> C2Component::Traits, std::unique_ptr<std::vector<std::string>>
-c2_status_t objcpy(
+bool objcpy(
C2Component::Traits* d,
- std::unique_ptr<std::vector<std::string>>* aliasesBuffer,
const IComponentStore::ComponentTraits& s) {
d->name = s.name.c_str();
@@ -309,8 +361,17 @@
case IComponentStore::ComponentTraits::Domain::AUDIO:
d->domain = C2Component::DOMAIN_AUDIO;
break;
+ case IComponentStore::ComponentTraits::Domain::IMAGE:
+ d->domain = C2Component::DOMAIN_IMAGE;
+ break;
+ case IComponentStore::ComponentTraits::Domain::OTHER:
+ d->domain = C2Component::DOMAIN_OTHER;
+ break;
default:
- d->domain = static_cast<C2Component::domain_t>(s.domainOther);
+ LOG(DEBUG) << "Unrecognized ComponentTraits::Domain "
+ << "with underlying value " << underlying_value(s.domain)
+ << ".";
+ d->domain = static_cast<C2Component::domain_t>(s.domain);
}
switch (s.kind) {
@@ -320,68 +381,71 @@
case IComponentStore::ComponentTraits::Kind::ENCODER:
d->kind = C2Component::KIND_ENCODER;
break;
+ case IComponentStore::ComponentTraits::Kind::OTHER:
+ d->kind = C2Component::KIND_OTHER;
+ break;
default:
- d->kind = static_cast<C2Component::kind_t>(s.kindOther);
+ LOG(DEBUG) << "Unrecognized ComponentTraits::Kind "
+ << "with underlying value " << underlying_value(s.kind)
+ << ".";
+ d->kind = static_cast<C2Component::kind_t>(s.kind);
}
d->rank = static_cast<C2Component::rank_t>(s.rank);
d->mediaType = s.mediaType.c_str();
-
- // aliasesBuffer must not be resized after this.
- *aliasesBuffer = std::make_unique<std::vector<std::string>>(
- s.aliases.size());
- (*aliasesBuffer)->resize(s.aliases.size());
- std::vector<C2StringLiteral> dAliases(s.aliases.size());
+ d->aliases.resize(s.aliases.size());
for (size_t i = 0; i < s.aliases.size(); ++i) {
- (**aliasesBuffer)[i] = s.aliases[i].c_str();
- d->aliases[i] = (**aliasesBuffer)[i].c_str();
+ d->aliases[i] = s.aliases[i];
}
- return C2_OK;
+ return true;
}
namespace /* unnamed */ {
// C2ParamFieldValues -> ParamFieldValues
-Status objcpy(ParamFieldValues *d, const C2ParamFieldValues &s) {
- objcpy(&d->paramOrField, s.paramOrField);
+bool objcpy(ParamFieldValues *d, const C2ParamFieldValues &s) {
+ if (!objcpy(&d->paramOrField, s.paramOrField)) {
+ LOG(ERROR) << "Invalid C2ParamFieldValues::paramOrField.";
+ return false;
+ }
if (s.values) {
d->values.resize(1);
- return objcpy(&d->values[0], *s.values);
+ if (!objcpy(&d->values[0], *s.values)) {
+ LOG(ERROR) << "Invalid C2ParamFieldValues::values.";
+ return false;
+ }
+ return true;
}
d->values.resize(0);
- return Status::OK;
+ return true;
}
// ParamFieldValues -> C2ParamFieldValues
-c2_status_t objcpy(C2ParamFieldValues *d, const ParamFieldValues &s) {
+bool objcpy(C2ParamFieldValues *d, const ParamFieldValues &s) {
d->paramOrField = C2ParamFieldBuilder(s.paramOrField);
if (s.values.size() == 1) {
d->values = std::make_unique<C2FieldSupportedValues>();
- return objcpy(d->values.get(), s.values[0]);
+ if (!objcpy(d->values.get(), s.values[0])) {
+ LOG(ERROR) << "Invalid ParamFieldValues::values.";
+ return false;
+ }
+ return true;
} else if (s.values.size() == 0) {
d->values.reset();
- return C2_OK;
+ return true;
}
- ALOGE("Multiple FieldSupportedValues objects. "
- "(Only one is allowed.)");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid ParamFieldValues: "
+ "Two or more FieldSupportedValues objects exist in "
+ "ParamFieldValues. "
+ "Only zero or one is allowed.";
+ return false;
}
} // unnamed namespace
// C2SettingResult -> SettingResult
-Status objcpy(SettingResult *d, const C2SettingResult &s) {
- d->failureOther = static_cast<uint32_t>(s.failure);
+bool objcpy(SettingResult *d, const C2SettingResult &s) {
switch (s.failure) {
- case C2SettingResult::READ_ONLY:
- d->failure = SettingResult::Failure::READ_ONLY;
- break;
- case C2SettingResult::MISMATCH:
- d->failure = SettingResult::Failure::MISMATCH;
- break;
- case C2SettingResult::BAD_VALUE:
- d->failure = SettingResult::Failure::BAD_VALUE;
- break;
case C2SettingResult::BAD_TYPE:
d->failure = SettingResult::Failure::BAD_TYPE;
break;
@@ -391,53 +455,61 @@
case C2SettingResult::BAD_INDEX:
d->failure = SettingResult::Failure::BAD_INDEX;
break;
+ case C2SettingResult::READ_ONLY:
+ d->failure = SettingResult::Failure::READ_ONLY;
+ break;
+ case C2SettingResult::MISMATCH:
+ d->failure = SettingResult::Failure::MISMATCH;
+ break;
+ case C2SettingResult::BAD_VALUE:
+ d->failure = SettingResult::Failure::BAD_VALUE;
+ break;
case C2SettingResult::CONFLICT:
d->failure = SettingResult::Failure::CONFLICT;
break;
case C2SettingResult::UNSUPPORTED:
d->failure = SettingResult::Failure::UNSUPPORTED;
break;
+ case C2SettingResult::INFO_BAD_VALUE:
+ d->failure = SettingResult::Failure::INFO_BAD_VALUE;
+ break;
case C2SettingResult::INFO_CONFLICT:
d->failure = SettingResult::Failure::INFO_CONFLICT;
break;
default:
- d->failure = SettingResult::Failure::OTHER;
+ LOG(DEBUG) << "Unrecognized C2SettingResult::Failure "
+ << "with underlying value " << underlying_value(s.failure)
+ << ".";
+ d->failure = static_cast<SettingResult::Failure>(s.failure);
}
- Status status = objcpy(&d->field, s.field);
- if (status != Status::OK) {
- return status;
+ if (!objcpy(&d->field, s.field)) {
+ LOG(ERROR) << "Invalid C2SettingResult::field.";
+ return false;
}
d->conflicts.resize(s.conflicts.size());
size_t i = 0;
for (const C2ParamFieldValues& sConflict : s.conflicts) {
ParamFieldValues &dConflict = d->conflicts[i++];
- status = objcpy(&dConflict, sConflict);
- if (status != Status::OK) {
- return status;
+ if (!objcpy(&dConflict, sConflict)) {
+ LOG(ERROR) << "Invalid C2SettingResult::conflicts["
+ << i - 1 << "].";
+ return false;
}
}
- return Status::OK;
+ return true;
}
// SettingResult -> std::unique_ptr<C2SettingResult>
-c2_status_t objcpy(std::unique_ptr<C2SettingResult> *d, const SettingResult &s) {
+bool objcpy(std::unique_ptr<C2SettingResult> *d, const SettingResult &s) {
*d = std::unique_ptr<C2SettingResult>(new C2SettingResult {
.field = C2ParamFieldValues(C2ParamFieldBuilder()) });
if (!*d) {
- return C2_NO_MEMORY;
+ LOG(ERROR) << "No memory for C2SettingResult.";
+ return false;
}
// failure
switch (s.failure) {
- case SettingResult::Failure::READ_ONLY:
- (*d)->failure = C2SettingResult::READ_ONLY;
- break;
- case SettingResult::Failure::MISMATCH:
- (*d)->failure = C2SettingResult::MISMATCH;
- break;
- case SettingResult::Failure::BAD_VALUE:
- (*d)->failure = C2SettingResult::BAD_VALUE;
- break;
case SettingResult::Failure::BAD_TYPE:
(*d)->failure = C2SettingResult::BAD_TYPE;
break;
@@ -447,23 +519,38 @@
case SettingResult::Failure::BAD_INDEX:
(*d)->failure = C2SettingResult::BAD_INDEX;
break;
+ case SettingResult::Failure::READ_ONLY:
+ (*d)->failure = C2SettingResult::READ_ONLY;
+ break;
+ case SettingResult::Failure::MISMATCH:
+ (*d)->failure = C2SettingResult::MISMATCH;
+ break;
+ case SettingResult::Failure::BAD_VALUE:
+ (*d)->failure = C2SettingResult::BAD_VALUE;
+ break;
case SettingResult::Failure::CONFLICT:
(*d)->failure = C2SettingResult::CONFLICT;
break;
case SettingResult::Failure::UNSUPPORTED:
(*d)->failure = C2SettingResult::UNSUPPORTED;
break;
+ case SettingResult::Failure::INFO_BAD_VALUE:
+ (*d)->failure = C2SettingResult::INFO_BAD_VALUE;
+ break;
case SettingResult::Failure::INFO_CONFLICT:
(*d)->failure = C2SettingResult::INFO_CONFLICT;
break;
default:
- (*d)->failure = static_cast<C2SettingResult::Failure>(s.failureOther);
+ LOG(DEBUG) << "Unrecognized SettingResult::Failure "
+ << "with underlying value " << underlying_value(s.failure)
+ << ".";
+ (*d)->failure = static_cast<C2SettingResult::Failure>(s.failure);
}
// field
- c2_status_t status = objcpy(&(*d)->field, s.field);
- if (status != C2_OK) {
- return status;
+ if (!objcpy(&(*d)->field, s.field)) {
+ LOG(ERROR) << "Invalid SettingResult::field.";
+ return false;
}
// conflicts
@@ -472,26 +559,26 @@
for (const ParamFieldValues& sConflict : s.conflicts) {
(*d)->conflicts.emplace_back(
C2ParamFieldValues{ C2ParamFieldBuilder(), nullptr });
- status = objcpy(&(*d)->conflicts.back(), sConflict);
- if (status != C2_OK) {
- return status;
+ if (!objcpy(&(*d)->conflicts.back(), sConflict)) {
+ LOG(ERROR) << "Invalid SettingResult::conflicts.";
+ return false;
}
}
- return C2_OK;
+ return true;
}
// C2ParamDescriptor -> ParamDescriptor
-Status objcpy(ParamDescriptor *d, const C2ParamDescriptor &s) {
+bool objcpy(ParamDescriptor *d, const C2ParamDescriptor &s) {
d->index = static_cast<ParamIndex>(s.index());
d->attrib = static_cast<hidl_bitfield<ParamDescriptor::Attrib>>(
_C2ParamInspector::GetAttrib(s));
d->name = s.name();
copyVector<uint32_t>(&d->dependencies, s.dependencies());
- return Status::OK;
+ return true;
}
// ParamDescriptor -> C2ParamDescriptor
-c2_status_t objcpy(std::shared_ptr<C2ParamDescriptor> *d, const ParamDescriptor &s) {
+bool objcpy(std::shared_ptr<C2ParamDescriptor> *d, const ParamDescriptor &s) {
std::vector<C2Param::Index> dDependencies;
dDependencies.reserve(s.dependencies.size());
for (const ParamIndex& sDependency : s.dependencies) {
@@ -502,11 +589,11 @@
static_cast<C2ParamDescriptor::attrib_t>(s.attrib),
C2String(s.name.c_str()),
std::move(dDependencies));
- return C2_OK;
+ return true;
}
// C2StructDescriptor -> StructDescriptor
-Status objcpy(StructDescriptor *d, const C2StructDescriptor &s) {
+bool objcpy(StructDescriptor *d, const C2StructDescriptor &s) {
d->type = static_cast<ParamIndex>(s.coreIndex().coreIndex());
d->fields.resize(s.numFields());
size_t i = 0;
@@ -518,7 +605,7 @@
_C2ParamInspector::GetSize(sField));
dField.type = static_cast<hidl_bitfield<FieldDescriptor::Type>>(
sField.type());
- dField.length = static_cast<uint32_t>(sField.extent());
+ dField.extent = static_cast<uint32_t>(sField.extent());
dField.name = static_cast<hidl_string>(sField.name());
const auto& sNamedValues = sField.namedValues();
dField.namedValues.resize(sNamedValues.size());
@@ -530,18 +617,18 @@
sNamedValue.second.u64);
}
}
- return Status::OK;
+ return true;
}
// StructDescriptor -> C2StructDescriptor
-c2_status_t objcpy(std::unique_ptr<C2StructDescriptor> *d, const StructDescriptor &s) {
+bool objcpy(std::unique_ptr<C2StructDescriptor> *d, const StructDescriptor &s) {
C2Param::CoreIndex dIndex = C2Param::CoreIndex(static_cast<uint32_t>(s.type));
std::vector<C2FieldDescriptor> dFields;
dFields.reserve(s.fields.size());
for (const auto &sField : s.fields) {
C2FieldDescriptor dField = {
static_cast<uint32_t>(sField.type),
- sField.length,
+ sField.extent,
sField.name,
sField.fieldId.offset,
sField.fieldId.size };
@@ -557,7 +644,7 @@
}
*d = std::make_unique<C2StructDescriptor>(
_C2ParamInspector::CreateStructDescriptor(dIndex, std::move(dFields)));
- return C2_OK;
+ return true;
}
namespace /* unnamed */ {
@@ -565,14 +652,14 @@
// Find or add a hidl BaseBlock object from a given C2Handle* to a list and an
// associated map.
// Note: The handle is not cloned.
-Status _addBaseBlock(
+bool _addBaseBlock(
uint32_t* index,
const C2Handle* handle,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
if (!handle) {
- ALOGE("addBaseBlock called on a null C2Handle.");
- return Status::BAD_VALUE;
+ LOG(ERROR) << "addBaseBlock called on a null C2Handle.";
+ return false;
}
auto it = baseBlockIndices->find(handle);
if (it != baseBlockIndices->end()) {
@@ -583,26 +670,25 @@
baseBlocks->emplace_back();
BaseBlock &dBaseBlock = baseBlocks->back();
- dBaseBlock.type = BaseBlock::Type::NATIVE;
// This does not clone the handle.
- dBaseBlock.nativeBlock =
- reinterpret_cast<const native_handle_t*>(handle);
+ dBaseBlock.nativeBlock(
+ reinterpret_cast<const native_handle_t*>(handle));
}
- return Status::OK;
+ return true;
}
// Find or add a hidl BaseBlock object from a given BufferPoolData to a list and
// an associated map.
-Status _addBaseBlock(
+bool _addBaseBlock(
uint32_t* index,
const std::shared_ptr<BufferPoolData> bpData,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
if (!bpData) {
- ALOGE("addBaseBlock called on a null BufferPoolData.");
- return Status::BAD_VALUE;
+ LOG(ERROR) << "addBaseBlock called on a null BufferPoolData.";
+ return false;
}
auto it = baseBlockIndices->find(bpData.get());
if (it != baseBlockIndices->end()) {
@@ -613,24 +699,26 @@
baseBlocks->emplace_back();
BaseBlock &dBaseBlock = baseBlocks->back();
- dBaseBlock.type = BaseBlock::Type::POOLED;
if (bufferPoolSender) {
+ BufferStatusMessage pooledBlock;
ResultStatus bpStatus = bufferPoolSender->send(
bpData,
- &dBaseBlock.pooledBlock);
+ &pooledBlock);
if (bpStatus != ResultStatus::OK) {
- ALOGE("Failed to send buffer with BufferPool. Error: %d.",
- static_cast<int>(bpStatus));
- return Status::BAD_VALUE;
+ LOG(ERROR) << "Failed to send buffer with BufferPool. Error: "
+ << static_cast<int32_t>(bpStatus)
+ << ".";
+ return false;
}
+ dBaseBlock.pooledBlock(pooledBlock);
}
}
- return Status::OK;
+ return true;
}
-Status addBaseBlock(
+bool addBaseBlock(
uint32_t* index,
const C2Handle* handle,
const std::shared_ptr<const _C2BlockPoolData>& blockPoolData,
@@ -649,8 +737,8 @@
std::shared_ptr<BufferPoolData> bpData;
if (!_C2BlockFactory::GetBufferPoolData(blockPoolData, &bpData)
|| !bpData) {
- ALOGE("BufferPoolData unavailable in a block.");
- return Status::BAD_VALUE;
+ LOG(ERROR) << "BufferPoolData unavailable in a block.";
+ return false;
}
return _addBaseBlock(
index, bpData,
@@ -662,69 +750,76 @@
index, handle,
baseBlocks, baseBlockIndices);
default:
- ALOGE("Unknown C2BlockPoolData type.");
- return Status::BAD_VALUE;
+ LOG(ERROR) << "Unknown C2BlockPoolData type.";
+ return false;
}
}
// C2Fence -> hidl_handle
// Note: File descriptors are not duplicated. The original file descriptor must
// not be closed before the transaction is complete.
-Status objcpy(hidl_handle* d, const C2Fence& s) {
+bool objcpy(hidl_handle* d, const C2Fence& s) {
(void)s; // TODO: implement s.fd()
int fenceFd = -1;
d->setTo(nullptr);
if (fenceFd >= 0) {
native_handle_t *handle = native_handle_create(1, 0);
if (!handle) {
- return Status::NO_MEMORY;
+ LOG(ERROR) << "Failed to create a native handle.";
+ return false;
}
handle->data[0] = fenceFd;
d->setTo(handle, true /* owns */);
}
- return Status::OK;
+ return true;
}
// C2ConstLinearBlock -> Block
// Note: Native handles are not duplicated. The original handles must not be
// closed before the transaction is complete.
-Status objcpy(Block* d, const C2ConstLinearBlock& s,
+bool objcpy(Block* d, const C2ConstLinearBlock& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
std::shared_ptr<const _C2BlockPoolData> bpData =
_C2BlockFactory::GetLinearBlockPoolData(s);
- Status status = addBaseBlock(&d->index, s.handle(), bpData,
- bufferPoolSender, baseBlocks, baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ if (!addBaseBlock(&d->index, s.handle(), bpData,
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid block data in C2ConstLinearBlock.";
+ return false;
}
// Create the metadata.
C2Hidl_RangeInfo dRangeInfo;
dRangeInfo.offset = static_cast<uint32_t>(s.offset());
dRangeInfo.length = static_cast<uint32_t>(s.size());
- status = createParamsBlob(&d->meta,
- std::vector<C2Param*>{ &dRangeInfo });
- if (status != Status::OK) {
- return Status::BAD_VALUE;
+ if (!createParamsBlob(&d->meta, std::vector<C2Param*>{ &dRangeInfo })) {
+ LOG(ERROR) << "Invalid range info in C2ConstLinearBlock.";
+ return false;
}
// Copy the fence
- return objcpy(&d->fence, s.fence());
+ if (!objcpy(&d->fence, s.fence())) {
+ LOG(ERROR) << "Invalid C2ConstLinearBlock::fence.";
+ return false;
+ }
+ return true;
}
// C2ConstGraphicBlock -> Block
// Note: Native handles are not duplicated. The original handles must not be
// closed before the transaction is complete.
-Status objcpy(Block* d, const C2ConstGraphicBlock& s,
+bool objcpy(Block* d, const C2ConstGraphicBlock& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
std::shared_ptr<const _C2BlockPoolData> bpData =
_C2BlockFactory::GetGraphicBlockPoolData(s);
- Status status = addBaseBlock(&d->index, s.handle(), bpData,
- bufferPoolSender, baseBlocks, baseBlockIndices);
+ if (!addBaseBlock(&d->index, s.handle(), bpData,
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid block data in C2ConstGraphicBlock.";
+ return false;
+ }
// Create the metadata.
C2Hidl_RectInfo dRectInfo;
@@ -733,62 +828,70 @@
dRectInfo.top = static_cast<uint32_t>(sRect.top);
dRectInfo.width = static_cast<uint32_t>(sRect.width);
dRectInfo.height = static_cast<uint32_t>(sRect.height);
- status = createParamsBlob(&d->meta,
- std::vector<C2Param*>{ &dRectInfo });
- if (status != Status::OK) {
- return Status::BAD_VALUE;
+ if (!createParamsBlob(&d->meta, std::vector<C2Param*>{ &dRectInfo })) {
+ LOG(ERROR) << "Invalid rect info in C2ConstGraphicBlock.";
+ return false;
}
// Copy the fence
- return objcpy(&d->fence, s.fence());
+ if (!objcpy(&d->fence, s.fence())) {
+ LOG(ERROR) << "Invalid C2ConstGraphicBlock::fence.";
+ return false;
+ }
+ return true;
}
// C2BufferData -> Buffer
// This function only fills in d->blocks.
-Status objcpy(Buffer* d, const C2BufferData& s,
+bool objcpy(Buffer* d, const C2BufferData& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
- Status status;
d->blocks.resize(
s.linearBlocks().size() +
s.graphicBlocks().size());
size_t i = 0;
for (const C2ConstLinearBlock& linearBlock : s.linearBlocks()) {
Block& dBlock = d->blocks[i++];
- status = objcpy(
+ if (!objcpy(
&dBlock, linearBlock,
- bufferPoolSender, baseBlocks, baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2BufferData::linearBlocks. "
+ << "(Destination index = " << i - 1 << ".)";
+ return false;
}
}
for (const C2ConstGraphicBlock& graphicBlock : s.graphicBlocks()) {
Block& dBlock = d->blocks[i++];
- status = objcpy(
+ if (!objcpy(
&dBlock, graphicBlock,
- bufferPoolSender, baseBlocks, baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2BufferData::graphicBlocks. "
+ << "(Destination index = " << i - 1 << ".)";
+ return false;
}
}
- return Status::OK;
+ return true;
}
// C2Buffer -> Buffer
-Status objcpy(Buffer* d, const C2Buffer& s,
+bool objcpy(Buffer* d, const C2Buffer& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
- Status status = createParamsBlob(&d->info, s.info());
- if (status != Status::OK) {
- return status;
+ if (!createParamsBlob(&d->info, s.info())) {
+ LOG(ERROR) << "Invalid C2Buffer::info.";
+ return false;
}
- return objcpy(d, s.data(), bufferPoolSender, baseBlocks, baseBlockIndices);
+ if (!objcpy(d, s.data(), bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2Buffer::data.";
+ return false;
+ }
+ return true;
}
// C2InfoBuffer -> InfoBuffer
-Status objcpy(InfoBuffer* d, const C2InfoBuffer& s,
+bool objcpy(InfoBuffer* d, const C2InfoBuffer& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
@@ -798,24 +901,21 @@
(void)bufferPoolSender;
(void)baseBlocks;
(void)baseBlockIndices;
- return Status::OK;
- /*
- // Stub implementation that may work in the future.
- d->index = static_cast<uint32_t>(s.index());
- d->buffer.info.resize(0);
- return objcpy(&d->buffer, s.data(), baseBlocks, baseBlockIndices);
- */
+ LOG(INFO) << "InfoBuffer not implemented.";
+ return true;
}
// C2FrameData -> FrameData
-Status objcpy(FrameData* d, const C2FrameData& s,
+bool objcpy(FrameData* d, const C2FrameData& s,
BufferPoolSender* bufferPoolSender,
std::list<BaseBlock>* baseBlocks,
std::map<const void*, uint32_t>* baseBlockIndices) {
d->flags = static_cast<hidl_bitfield<FrameData::Flags>>(s.flags);
- objcpy(&d->ordinal, s.ordinal);
+ if (!objcpy(&d->ordinal, s.ordinal)) {
+ LOG(ERROR) << "Invalid C2FrameData::ordinal.";
+ return false;
+ }
- Status status;
d->buffers.resize(s.buffers.size());
size_t i = 0;
for (const std::shared_ptr<C2Buffer>& sBuffer : s.buffers) {
@@ -827,17 +927,18 @@
dBuffer.blocks.resize(0);
continue;
}
- status = objcpy(
+ if (!objcpy(
&dBuffer, *sBuffer,
- bufferPoolSender, baseBlocks, baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2FrameData::buffers["
+ << i - 1 << "].";
+ return false;
}
}
- status = createParamsBlob(&d->configUpdate, s.configUpdate);
- if (status != Status::OK) {
- return status;
+ if (!createParamsBlob(&d->configUpdate, s.configUpdate)) {
+ LOG(ERROR) << "Invalid C2FrameData::configUpdate.";
+ return false;
}
d->infoBuffers.resize(s.infoBuffers.size());
@@ -845,17 +946,19 @@
for (const std::shared_ptr<C2InfoBuffer>& sInfoBuffer : s.infoBuffers) {
InfoBuffer& dInfoBuffer = d->infoBuffers[i++];
if (!sInfoBuffer) {
- ALOGE("Null C2InfoBuffer");
- return Status::BAD_VALUE;
+ LOG(ERROR) << "Null C2FrameData::infoBuffers["
+ << i - 1 << "].";
+ return false;
}
- status = objcpy(&dInfoBuffer, *sInfoBuffer,
- bufferPoolSender, baseBlocks, baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ if (!objcpy(&dInfoBuffer, *sInfoBuffer,
+ bufferPoolSender, baseBlocks, baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2FrameData::infoBuffers["
+ << i - 1 << "].";
+ return false;
}
}
- return status;
+ return true;
}
} // unnamed namespace
@@ -885,7 +988,7 @@
const std::shared_ptr<BufferPoolData>& bpData,
BufferStatusMessage* bpMessage) {
if (!mReceiverManager) {
- ALOGE("No access to receiver's BufferPool.");
+ LOG(ERROR) << "No access to receiver's BufferPool.";
return ResultStatus::NOT_FOUND;
}
ResultStatus rs;
@@ -893,7 +996,7 @@
if (!mSenderManager) {
mSenderManager = ClientManager::getInstance();
if (!mSenderManager) {
- ALOGE("Failed to retrieve local BufferPool ClientManager.");
+ LOG(ERROR) << "Failed to retrieve local BufferPool ClientManager.";
return ResultStatus::CRITICAL_ERROR;
}
}
@@ -915,11 +1018,11 @@
connectionId,
&receiverConnectionId);
if ((rs != ResultStatus::OK) && (rs != ResultStatus::ALREADY_EXISTS)) {
- ALOGW("registerSender -- returned error: %d.",
- static_cast<int>(rs));
+ LOG(WARNING) << "registerSender -- returned error: "
+ << static_cast<int32_t>(rs)
+ << ".";
return rs;
} else {
- ALOGV("registerSender -- succeeded.");
mReceiverConnectionId = receiverConnectionId;
}
}
@@ -929,12 +1032,13 @@
rs = mSenderManager->postSend(
mReceiverConnectionId, bpData, &transactionId, ×tampUs);
if (rs != ResultStatus::OK) {
- ALOGE("ClientManager::postSend -- returned error: %d.",
- static_cast<int>(rs));
+ LOG(ERROR) << "ClientManager::postSend -- returned error: "
+ << static_cast<int32_t>(rs)
+ << ".";
return rs;
}
if (!bpMessage) {
- ALOGE("Null output parameter for BufferStatusMessage.");
+ LOG(ERROR) << "Null output parameter for BufferStatusMessage.";
return ResultStatus::CRITICAL_ERROR;
}
bpMessage->connectionId = mReceiverConnectionId;
@@ -946,12 +1050,10 @@
}
// std::list<std::unique_ptr<C2Work>> -> WorkBundle
-Status objcpy(
+bool objcpy(
WorkBundle* d,
const std::list<std::unique_ptr<C2Work>>& s,
BufferPoolSender* bufferPoolSender) {
- Status status = Status::OK;
-
// baseBlocks holds a list of BaseBlock objects that Blocks can refer to.
std::list<BaseBlock> baseBlocks;
@@ -971,63 +1073,80 @@
for (const std::unique_ptr<C2Work>& sWork : s) {
Work &dWork = d->works[i++];
if (!sWork) {
- ALOGW("Null C2Work encountered.");
+ LOG(WARNING) << "Null C2Work encountered.";
continue;
}
- status = objcpy(&dWork.input, sWork->input,
- bufferPoolSender, &baseBlocks, &baseBlockIndices);
- if (status != Status::OK) {
- return status;
+
+ // chain info is not in use currently.
+
+ // input
+ if (!objcpy(&dWork.input, sWork->input,
+ bufferPoolSender, &baseBlocks, &baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2Work::input.";
+ return false;
}
+
+ // worklets
if (sWork->worklets.size() == 0) {
- ALOGW("Work with no worklets.");
+ LOG(DEBUG) << "Work with no worklets.";
} else {
- if (sWork->worklets.size() > 1) {
- ALOGW("Work with multiple worklets. "
- "Only the first worklet will be marshalled.");
- }
- if (!sWork->worklets.front()) {
- ALOGE("Null worklet encountered.");
- return Status::BAD_VALUE;
- }
-
- // Parcel the first worklet.
- const C2Worklet &sWorklet = *sWork->worklets.front();
- Worklet &dWorklet = dWork.worklet;
-
- dWorklet.tunings.resize(sWorklet.tunings.size());
+ // Parcel the worklets.
+ hidl_vec<Worklet> &dWorklets = dWork.worklets;
+ dWorklets.resize(sWork->worklets.size());
size_t j = 0;
- for (const std::unique_ptr<C2Tuning>& sTuning : sWorklet.tunings) {
- status = createParamsBlob(
- &dWorklet.tunings[j++],
- std::vector<C2Param*>
- { reinterpret_cast<C2Param*>(sTuning.get()) });
- if (status != Status::OK) {
- return status;
+ for (const std::unique_ptr<C2Worklet>& sWorklet : sWork->worklets)
+ {
+ if (!sWorklet) {
+ LOG(WARNING) << "Null C2Work::worklets["
+ << j << "].";
+ continue;
}
- }
+ Worklet &dWorklet = dWorklets[j++];
- dWorklet.failures.resize(sWorklet.failures.size());
- j = 0;
- for (const std::unique_ptr<C2SettingResult>& sFailure :
- sWorklet.failures) {
- if (!sFailure) {
- ALOGE("Null C2SettingResult");
- return Status::BAD_VALUE;
- }
- status = objcpy(&dWorklet.failures[j++], *sFailure);
- if (status != Status::OK) {
- return status;
- }
- }
+ // component id
+ dWorklet.componentId = static_cast<uint32_t>(
+ sWorklet->component);
- status = objcpy(&dWorklet.output, sWorklet.output,
- bufferPoolSender, &baseBlocks, &baseBlockIndices);
- if (status != Status::OK) {
- return status;
+ // tunings
+ if (!createParamsBlob(&dWorklet.tunings, sWorklet->tunings)) {
+ LOG(ERROR) << "Invalid C2Work::worklets["
+ << j - 1 << "]->tunings.";
+ return false;
+ }
+
+ // failures
+ dWorklet.failures.resize(sWorklet->failures.size());
+ size_t k = 0;
+ for (const std::unique_ptr<C2SettingResult>& sFailure :
+ sWorklet->failures) {
+ if (!sFailure) {
+ LOG(WARNING) << "Null C2Work::worklets["
+ << j - 1 << "]->failures["
+ << k << "].";
+ continue;
+ }
+ if (!objcpy(&dWorklet.failures[k++], *sFailure)) {
+ LOG(ERROR) << "Invalid C2Work::worklets["
+ << j - 1 << "]->failures["
+ << k - 1 << "].";
+ return false;
+ }
+ }
+
+ // output
+ if (!objcpy(&dWorklet.output, sWorklet->output,
+ bufferPoolSender, &baseBlocks, &baseBlockIndices)) {
+ LOG(ERROR) << "Invalid C2Work::worklets["
+ << j - 1 << "]->output.";
+ return false;
+ }
}
}
- dWork.workletProcessed = sWork->workletsProcessed > 0;
+
+ // worklets processed
+ dWork.workletsProcessed = sWork->workletsProcessed;
+
+ // result
dWork.result = static_cast<Status>(sWork->result);
}
@@ -1040,7 +1159,7 @@
}
}
- return Status::OK;
+ return true;
}
namespace /* unnamed */ {
@@ -1058,15 +1177,15 @@
// hidl_handle -> C2Fence
// Note: File descriptors are not duplicated. The original file descriptor must
// not be closed before the transaction is complete.
-c2_status_t objcpy(C2Fence* d, const hidl_handle& s) {
+bool objcpy(C2Fence* d, const hidl_handle& s) {
// TODO: Implement.
(void)s;
*d = C2Fence();
- return C2_OK;
+ return true;
}
// C2LinearBlock, vector<C2Param*>, C2Fence -> C2Buffer
-c2_status_t createLinearBuffer(
+bool createLinearBuffer(
std::shared_ptr<C2Buffer>* buffer,
const std::shared_ptr<C2LinearBlock>& block,
const std::vector<C2Param*>& meta,
@@ -1074,12 +1193,12 @@
// Check the block meta. It should have exactly 1 C2Info:
// C2Hidl_RangeInfo.
if ((meta.size() != 1) || !meta[0]) {
- ALOGE("Invalid block metadata for ion block.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid C2LinearBlock::meta.";
+ return false;
}
if (meta[0]->size() != sizeof(C2Hidl_RangeInfo)) {
- ALOGE("Invalid block metadata for ion block: range.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid range info in C2LinearBlock.";
+ return false;
}
C2Hidl_RangeInfo *rangeInfo =
reinterpret_cast<C2Hidl_RangeInfo*>(meta[0]);
@@ -1089,14 +1208,14 @@
rangeInfo->offset, rangeInfo->length,
fence));
if (!(*buffer)) {
- ALOGE("Cannot create a linear buffer.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "CreateLinearBuffer failed.";
+ return false;
}
- return C2_OK;
+ return true;
}
// C2GraphicBlock, vector<C2Param*>, C2Fence -> C2Buffer
-c2_status_t createGraphicBuffer(
+bool createGraphicBuffer(
std::shared_ptr<C2Buffer>* buffer,
const std::shared_ptr<C2GraphicBlock>& block,
const std::vector<C2Param*>& meta,
@@ -1104,12 +1223,12 @@
// Check the block meta. It should have exactly 1 C2Info:
// C2Hidl_RectInfo.
if ((meta.size() != 1) || !meta[0]) {
- ALOGE("Invalid block metadata for graphic block.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid C2GraphicBlock::meta.";
+ return false;
}
if (meta[0]->size() != sizeof(C2Hidl_RectInfo)) {
- ALOGE("Invalid block metadata for graphic block: crop rect.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid rect info in C2GraphicBlock.";
+ return false;
}
C2Hidl_RectInfo *rectInfo =
reinterpret_cast<C2Hidl_RectInfo*>(meta[0]);
@@ -1120,136 +1239,144 @@
at(rectInfo->left, rectInfo->top),
fence));
if (!(*buffer)) {
- ALOGE("Cannot create a graphic buffer.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "CreateGraphicBuffer failed.";
+ return false;
}
- return C2_OK;
+ return true;
}
// Buffer -> C2Buffer
// Note: The native handles will be cloned.
-c2_status_t objcpy(std::shared_ptr<C2Buffer>* d, const Buffer& s,
+bool objcpy(std::shared_ptr<C2Buffer>* d, const Buffer& s,
const std::vector<C2BaseBlock>& baseBlocks) {
- c2_status_t status;
*d = nullptr;
// Currently, a non-null C2Buffer must contain exactly 1 block.
if (s.blocks.size() == 0) {
- return C2_OK;
+ return true;
} else if (s.blocks.size() != 1) {
- ALOGE("Currently, a C2Buffer must contain exactly 1 block.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid Buffer: "
+ "Currently, a C2Buffer must contain exactly 1 block.";
+ return false;
}
const Block &sBlock = s.blocks[0];
if (sBlock.index >= baseBlocks.size()) {
- ALOGE("Index into baseBlocks is out of range.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid Buffer::blocks[0].index: "
+ "Array index out of range.";
+ return false;
}
const C2BaseBlock &baseBlock = baseBlocks[sBlock.index];
// Parse meta.
std::vector<C2Param*> sBlockMeta;
- status = parseParamsBlob(&sBlockMeta, sBlock.meta);
- if (status != C2_OK) {
- ALOGE("Invalid block params blob.");
- return C2_BAD_VALUE;
+ if (!parseParamsBlob(&sBlockMeta, sBlock.meta)) {
+ LOG(ERROR) << "Invalid Buffer::blocks[0].meta.";
+ return false;
}
// Copy fence.
C2Fence dFence;
- status = objcpy(&dFence, sBlock.fence);
+ if (!objcpy(&dFence, sBlock.fence)) {
+ LOG(ERROR) << "Invalid Buffer::blocks[0].fence.";
+ return false;
+ }
// Construct a block.
switch (baseBlock.type) {
case C2BaseBlock::LINEAR:
- status = createLinearBuffer(d, baseBlock.linear, sBlockMeta, dFence);
+ if (!createLinearBuffer(d, baseBlock.linear, sBlockMeta, dFence)) {
+ LOG(ERROR) << "Invalid C2BaseBlock::linear.";
+ return false;
+ }
break;
case C2BaseBlock::GRAPHIC:
- status = createGraphicBuffer(d, baseBlock.graphic, sBlockMeta, dFence);
+ if (!createGraphicBuffer(d, baseBlock.graphic, sBlockMeta, dFence)) {
+ LOG(ERROR) << "Invalid C2BaseBlock::graphic.";
+ return false;
+ }
break;
default:
- ALOGE("Invalid BaseBlock type.");
- return C2_BAD_VALUE;
- }
- if (status != C2_OK) {
- return status;
+ LOG(ERROR) << "Invalid C2BaseBlock::type.";
+ return false;
}
// Parse info
std::vector<C2Param*> params;
- status = parseParamsBlob(¶ms, s.info);
- if (status != C2_OK) {
- ALOGE("Invalid buffer params blob.");
- return status;
+ if (!parseParamsBlob(¶ms, s.info)) {
+ LOG(ERROR) << "Invalid Buffer::info.";
+ return false;
}
for (C2Param* param : params) {
if (param == nullptr) {
- ALOGE("Null buffer param encountered.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Null param in Buffer::info.";
+ return false;
}
- std::shared_ptr<C2Param> c2param(
- C2Param::Copy(*param).release());
+ std::shared_ptr<C2Param> c2param{
+ C2Param::Copy(*param).release()};
if (!c2param) {
- ALOGE("Invalid buffer param inside a blob.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Invalid param in Buffer::info.";
+ return false;
}
- status = (*d)->setInfo(std::static_pointer_cast<C2Info>(c2param));
+ c2_status_t status =
+ (*d)->setInfo(std::static_pointer_cast<C2Info>(c2param));
if (status != C2_OK) {
- ALOGE("C2Buffer::setInfo failed().");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "C2Buffer::setInfo failed.";
+ return false;
}
}
- return C2_OK;
+ return true;
}
// FrameData -> C2FrameData
-c2_status_t objcpy(C2FrameData* d, const FrameData& s,
+bool objcpy(C2FrameData* d, const FrameData& s,
const std::vector<C2BaseBlock>& baseBlocks) {
- c2_status_t status;
d->flags = static_cast<C2FrameData::flags_t>(s.flags);
- objcpy(&d->ordinal, s.ordinal);
+ if (!objcpy(&d->ordinal, s.ordinal)) {
+ LOG(ERROR) << "Invalid FrameData::ordinal.";
+ return false;
+ }
d->buffers.clear();
d->buffers.reserve(s.buffers.size());
for (const Buffer& sBuffer : s.buffers) {
std::shared_ptr<C2Buffer> dBuffer;
- status = objcpy(&dBuffer, sBuffer, baseBlocks);
- if (status != C2_OK) {
- return status;
+ if (!objcpy(&dBuffer, sBuffer, baseBlocks)) {
+ LOG(ERROR) << "Invalid FrameData::buffers.";
+ return false;
}
d->buffers.emplace_back(dBuffer);
}
std::vector<C2Param*> params;
- status = parseParamsBlob(¶ms, s.configUpdate);
- if (status != C2_OK) {
- ALOGE("Failed to parse frame data params.");
- return status;
+ if (!parseParamsBlob(¶ms, s.configUpdate)) {
+ LOG(ERROR) << "Invalid FrameData::configUpdate.";
+ return false;
}
d->configUpdate.clear();
for (C2Param* param : params) {
d->configUpdate.emplace_back(C2Param::Copy(*param));
if (!d->configUpdate.back()) {
- ALOGE("Unexpected error while parsing frame data params.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Unexpected error while parsing "
+ "FrameData::configUpdate.";
+ return false;
}
}
// TODO: Implement this once C2InfoBuffer has constructors.
d->infoBuffers.clear();
- return C2_OK;
+ return true;
}
// BaseBlock -> C2BaseBlock
-c2_status_t objcpy(C2BaseBlock* d, const BaseBlock& s) {
- switch (s.type) {
- case BaseBlock::Type::NATIVE: {
+bool objcpy(C2BaseBlock* d, const BaseBlock& s) {
+ switch (s.getDiscriminator()) {
+ case BaseBlock::hidl_discriminator::nativeBlock: {
native_handle_t* sHandle =
- native_handle_clone(s.nativeBlock);
+ native_handle_clone(s.nativeBlock());
if (sHandle == nullptr) {
- ALOGE("Null native handle in a block.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Null BaseBlock::nativeBlock.";
+ return false;
}
const C2Handle *sC2Handle =
reinterpret_cast<const C2Handle*>(sHandle);
@@ -1257,25 +1384,25 @@
d->linear = _C2BlockFactory::CreateLinearBlock(sC2Handle);
if (d->linear) {
d->type = C2BaseBlock::LINEAR;
- return C2_OK;
+ return true;
}
d->graphic = _C2BlockFactory::CreateGraphicBlock(sC2Handle);
if (d->graphic) {
d->type = C2BaseBlock::GRAPHIC;
- return C2_OK;
+ return true;
}
- ALOGE("Unknown handle type in native BaseBlock.");
+ LOG(ERROR) << "Unknown handle type in BaseBlock::nativeBlock.";
if (sHandle) {
native_handle_close(sHandle);
native_handle_delete(sHandle);
}
- return C2_BAD_VALUE;
+ return false;
}
- case BaseBlock::Type::POOLED: {
+ case BaseBlock::hidl_discriminator::pooledBlock: {
const BufferStatusMessage &bpMessage =
- s.pooledBlock;
+ s.pooledBlock();
sp<ClientManager> bp = ClientManager::getInstance();
std::shared_ptr<BufferPoolData> bpData;
native_handle_t *cHandle;
@@ -1287,48 +1414,49 @@
&cHandle,
&bpData);
if (bpStatus != ResultStatus::OK) {
- ALOGE("Failed to receive buffer from bufferpool -- "
- "resultStatus = %d",
- static_cast<int>(bpStatus));
- return toC2Status(bpStatus);
+ LOG(ERROR) << "Failed to receive buffer from bufferpool -- "
+ << "resultStatus = " << underlying_value(bpStatus)
+ << ".";
+ return false;
} else if (!bpData) {
- ALOGE("No data in bufferpool transaction.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "No data in bufferpool transaction.";
+ return false;
}
d->linear = _C2BlockFactory::CreateLinearBlock(cHandle, bpData);
if (d->linear) {
d->type = C2BaseBlock::LINEAR;
- return C2_OK;
+ return true;
}
d->graphic = _C2BlockFactory::CreateGraphicBlock(cHandle, bpData);
if (d->graphic) {
d->type = C2BaseBlock::GRAPHIC;
- return C2_OK;
+ return true;
}
- ALOGE("Unknown handle type in pooled BaseBlock.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Unknown handle type in BaseBlock::pooledBlock.";
+ return false;
}
default:
- ALOGE("Corrupted BaseBlock type: %d", static_cast<int>(s.type));
- return C2_BAD_VALUE;
+ LOG(ERROR) << "Unrecognized BaseBlock's discriminator with "
+ << "underlying value "
+ << underlying_value(s.getDiscriminator()) << ".";
+ return false;
}
}
} // unnamed namespace
// WorkBundle -> std::list<std::unique_ptr<C2Work>>
-c2_status_t objcpy(std::list<std::unique_ptr<C2Work>>* d, const WorkBundle& s) {
- c2_status_t status;
-
+bool objcpy(std::list<std::unique_ptr<C2Work>>* d, const WorkBundle& s) {
// Convert BaseBlocks to C2BaseBlocks.
std::vector<C2BaseBlock> dBaseBlocks(s.baseBlocks.size());
for (size_t i = 0; i < s.baseBlocks.size(); ++i) {
- status = objcpy(&dBaseBlocks[i], s.baseBlocks[i]);
- if (status != C2_OK) {
- return status;
+ if (!objcpy(&dBaseBlocks[i], s.baseBlocks[i])) {
+ LOG(ERROR) << "Invalid WorkBundle::baseBlocks["
+ << i << "].";
+ return false;
}
}
@@ -1337,74 +1465,58 @@
d->emplace_back(std::make_unique<C2Work>());
C2Work& dWork = *d->back();
+ // chain info is not in use currently.
+
// input
- status = objcpy(&dWork.input, sWork.input, dBaseBlocks);
- if (status != C2_OK) {
- ALOGE("Error constructing C2Work's input.");
- return C2_BAD_VALUE;
+ if (!objcpy(&dWork.input, sWork.input, dBaseBlocks)) {
+ LOG(ERROR) << "Invalid Work::input.";
+ return false;
}
// worklet(s)
dWork.worklets.clear();
- // TODO: Currently, tunneling is not supported.
- if (sWork.workletProcessed) {
- dWork.workletsProcessed = 1;
-
- const Worklet &sWorklet = sWork.worklet;
+ for (const Worklet& sWorklet : sWork.worklets) {
std::unique_ptr<C2Worklet> dWorklet = std::make_unique<C2Worklet>();
+ // component id
+ dWorklet->component = static_cast<c2_node_id_t>(
+ sWorklet.componentId);
+
// tunings
- dWorklet->tunings.clear();
- dWorklet->tunings.reserve(sWorklet.tunings.size());
- for (const Params& sTuning : sWorklet.tunings) {
- std::vector<C2Param*> dParams;
- status = parseParamsBlob(&dParams, sTuning);
- if (status != C2_OK) {
- ALOGE("Failed to parse C2Tuning in C2Worklet.");
- return C2_BAD_VALUE;
- }
- for (C2Param* param : dParams) {
- std::unique_ptr<C2Param> dParam = C2Param::Copy(*param);
- if (!dParam) {
- ALOGE("Null C2Tuning encountered while "
- "parsing C2Worklet.");
- return C2_BAD_VALUE;
- }
- dWorklet->tunings.emplace_back(
- std::unique_ptr<C2Tuning>(
- reinterpret_cast<C2Tuning*>(
- dParam.release())));
- }
+ if (!copyParamsFromBlob(&dWorklet->tunings, sWorklet.tunings)) {
+ LOG(ERROR) << "Invalid Worklet::tunings";
+ return false;
}
+
// failures
dWorklet->failures.clear();
dWorklet->failures.reserve(sWorklet.failures.size());
for (const SettingResult& sFailure : sWorklet.failures) {
std::unique_ptr<C2SettingResult> dFailure;
- status = objcpy(&dFailure, sFailure);
- if (status != C2_OK) {
- ALOGE("Failed to create C2SettingResult in C2Worklet.");
- return C2_BAD_VALUE;
+ if (!objcpy(&dFailure, sFailure)) {
+ LOG(ERROR) << "Invalid Worklet::failures.";
+ return false;
}
dWorklet->failures.emplace_back(std::move(dFailure));
}
+
// output
- status = objcpy(&dWorklet->output, sWorklet.output, dBaseBlocks);
- if (status != C2_OK) {
- ALOGE("Failed to create output C2FrameData.");
- return C2_BAD_VALUE;
+ if (!objcpy(&dWorklet->output, sWorklet.output, dBaseBlocks)) {
+ LOG(ERROR) << "Invalid Worklet::output.";
+ return false;
}
+
dWork.worklets.emplace_back(std::move(dWorklet));
- } else {
- dWork.worklets.emplace_back(std::make_unique<C2Worklet>());
- dWork.workletsProcessed = 0;
}
+ // workletsProcessed
+ dWork.workletsProcessed = sWork.workletsProcessed;
+
// result
dWork.result = static_cast<c2_status_t>(sWork.result);
}
- return C2_OK;
+ return true;
}
constexpr size_t PARAMS_ALIGNMENT = 8; // 64-bit alignment
@@ -1413,7 +1525,7 @@
static_assert(PARAMS_ALIGNMENT % alignof(C2Tuning) == 0, "C2Param alignment mismatch");
// Params -> std::vector<C2Param*>
-c2_status_t parseParamsBlob(std::vector<C2Param*> *params, const hidl_vec<uint8_t> &blob) {
+bool parseParamsBlob(std::vector<C2Param*> *params, const hidl_vec<uint8_t> &blob) {
// assuming blob is const here
size_t size = blob.size();
size_t ix = 0;
@@ -1429,21 +1541,27 @@
}
} while (p);
- return ix == size ? C2_OK : C2_BAD_VALUE;
+ if (ix != size) {
+ LOG(ERROR) << "parseParamsBlob -- inconsistent sizes.";
+ return false;
+ }
+ return true;
}
namespace /* unnamed */ {
/**
- * Concatenates a list of C2Params into a params blob.
+ * Concatenates a list of C2Params into a params blob. T is a container type
+ * whose member type is compatible with C2Param*.
+ *
* \param[out] blob target blob
* \param[in] params parameters to concatenate
* \retval C2_OK if the blob was successfully created
- * \retval C2_BAD_VALUE if the blob was not successful (this only happens if the parameters were
- * not const)
+ * \retval C2_BAD_VALUE if the blob was not successful created (this only
+ * happens if the parameters were not const)
*/
-template<typename T>
-Status _createParamsBlob(hidl_vec<uint8_t> *blob, const T ¶ms) {
+template <typename T>
+bool _createParamsBlob(hidl_vec<uint8_t> *blob, const T ¶ms) {
// assuming the parameter values are const
size_t size = 0;
for (const auto &p : params) {
@@ -1469,77 +1587,106 @@
ix = align(ix, PARAMS_ALIGNMENT);
}
blob->resize(ix);
- return ix == size ? Status::OK : Status::CORRUPTED;
+ if (ix != size) {
+ LOG(ERROR) << "createParamsBlob -- inconsistent sizes.";
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Parses a params blob and create a vector of new T objects that contain copies
+ * of the params in the blob. T is C2Param or its compatible derived class.
+ *
+ * \param[out] params the resulting vector
+ * \param[in] blob parameter blob to parse
+ * \retval C2_OK if the full blob was parsed and params was constructed
+ * \retval C2_BAD_VALUE otherwise
+ */
+template <typename T>
+bool _copyParamsFromBlob(
+ std::vector<std::unique_ptr<T>>* params,
+ Params blob) {
+
+ std::vector<C2Param*> paramPointers;
+ if (!parseParamsBlob(¶mPointers, blob)) {
+ LOG(ERROR) << "copyParamsFromBlob -- failed to parse.";
+ return false;
+ }
+
+ params->resize(paramPointers.size());
+ size_t i = 0;
+ for (C2Param* const& paramPointer : paramPointers) {
+ if (!paramPointer) {
+ LOG(ERROR) << "copyParamsFromBlob -- null paramPointer.";
+ return false;
+ }
+ (*params)[i++].reset(reinterpret_cast<T*>(
+ C2Param::Copy(*paramPointer).release()));
+ }
+ return true;
}
} // unnamed namespace
// std::vector<const C2Param*> -> Params
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<const C2Param*> ¶ms) {
return _createParamsBlob(blob, params);
}
// std::vector<C2Param*> -> Params
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<C2Param*> ¶ms) {
return _createParamsBlob(blob, params);
}
// std::vector<std::unique_ptr<C2Param>> -> Params
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::unique_ptr<C2Param>> ¶ms) {
return _createParamsBlob(blob, params);
}
// std::vector<std::unique_ptr<C2Tuning>> -> Params
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::unique_ptr<C2Tuning>> ¶ms) {
return _createParamsBlob(blob, params);
}
// std::vector<std::shared_ptr<const C2Info>> -> Params
-Status createParamsBlob(
+bool createParamsBlob(
hidl_vec<uint8_t> *blob,
const std::vector<std::shared_ptr<const C2Info>> ¶ms) {
return _createParamsBlob(blob, params);
}
// Params -> std::vector<std::unique_ptr<C2Param>>
-c2_status_t copyParamsFromBlob(
+bool copyParamsFromBlob(
std::vector<std::unique_ptr<C2Param>>* params,
Params blob) {
- std::vector<C2Param*> paramPointers;
- c2_status_t status = parseParamsBlob(¶mPointers, blob);
- if (status != C2_OK) {
- ALOGE("copyParamsFromBlob -- blob parsing failed.");
- return status;
- }
- params->resize(paramPointers.size());
- size_t i = 0;
- for (C2Param* const& paramPointer : paramPointers) {
- if (!paramPointer) {
- ALOGE("copyParamsFromBlob -- corrupted params blob.");
- return C2_BAD_VALUE;
- }
- (*params)[i++] = C2Param::Copy(*paramPointer);
- }
- return C2_OK;
+ return _copyParamsFromBlob(params, blob);
+}
+
+// Params -> std::vector<std::unique_ptr<C2Tuning>>
+bool copyParamsFromBlob(
+ std::vector<std::unique_ptr<C2Tuning>>* params,
+ Params blob) {
+ return _copyParamsFromBlob(params, blob);
}
// Params -> update std::vector<std::unique_ptr<C2Param>>
-c2_status_t updateParamsFromBlob(
+bool updateParamsFromBlob(
const std::vector<C2Param*>& params,
const Params& blob) {
std::unordered_map<uint32_t, C2Param*> index2param;
for (C2Param* const& param : params) {
if (!param) {
- ALOGE("updateParamsFromBlob -- corrupted input params.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "updateParamsFromBlob -- null output param.";
+ return false;
}
if (index2param.find(param->index()) == index2param.end()) {
index2param.emplace(param->index(), param);
@@ -1547,33 +1694,31 @@
}
std::vector<C2Param*> paramPointers;
- c2_status_t status = parseParamsBlob(¶mPointers, blob);
- if (status != C2_OK) {
- ALOGE("updateParamsFromBlob -- blob parsing failed.");
- return status;
+ if (!parseParamsBlob(¶mPointers, blob)) {
+ LOG(ERROR) << "updateParamsFromBlob -- failed to parse.";
+ return false;
}
for (C2Param* const& paramPointer : paramPointers) {
if (!paramPointer) {
- ALOGE("updateParamsFromBlob -- corrupted param in blob.");
- return C2_BAD_VALUE;
+ LOG(ERROR) << "updateParamsFromBlob -- null input param.";
+ return false;
}
decltype(index2param)::iterator i = index2param.find(
paramPointer->index());
if (i == index2param.end()) {
- ALOGW("updateParamsFromBlob -- unseen param index.");
+ LOG(DEBUG) << "updateParamsFromBlob -- index "
+ << paramPointer->index() << " not found. Skipping...";
continue;
}
if (!i->second->updateFrom(*paramPointer)) {
- ALOGE("updateParamsFromBlob -- mismatching sizes: "
- "%u vs %u (index = %u).",
- static_cast<unsigned>(params.size()),
- static_cast<unsigned>(paramPointer->size()),
- static_cast<unsigned>(i->first));
- return C2_BAD_VALUE;
+ LOG(ERROR) << "updateParamsFromBlob -- size mismatch: "
+ << params.size() << " vs " << paramPointer->size()
+ << " (index = " << i->first << ").";
+ return false;
}
}
- return C2_OK;
+ return true;
}
// Convert BufferPool ResultStatus to c2_status_t.
@@ -1590,7 +1735,8 @@
case ResultStatus::CRITICAL_ERROR:
return C2_CORRUPTED;
default:
- ALOGW("Unrecognized BufferPool ResultStatus: %d", static_cast<int>(rs));
+ LOG(WARNING) << "Unrecognized BufferPool ResultStatus: "
+ << static_cast<int32_t>(rs) << ".";
return C2_CORRUPTED;
}
}
@@ -1657,7 +1803,8 @@
}
sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
- sp<HGraphicBufferProducer> hgbp = igbp->getHalInterface();
+ sp<HGraphicBufferProducer> hgbp =
+ igbp->getHalInterface<HGraphicBufferProducer>();
return hgbp ? hgbp :
new TWGraphicBufferProducer<HGraphicBufferProducer>(igbp);
}
@@ -1669,35 +1816,34 @@
uint32_t generation,
int32_t* bqSlot) {
if (!igbp) {
- ALOGW("attachToBufferQueue -- null producer.");
+ LOG(WARNING) << "attachToBufferQueue -- null producer.";
return NO_INIT;
}
sp<GraphicBuffer> graphicBuffer = createGraphicBuffer(block);
graphicBuffer->setGenerationNumber(generation);
- ALOGV("attachToBufferQueue -- attaching buffer: "
- "block dimension %ux%u, "
- "graphicBuffer dimension %ux%u, "
- "format %#x, usage %#llx, stride %u, generation %u.",
- static_cast<unsigned>(block.width()),
- static_cast<unsigned>(block.height()),
- static_cast<unsigned>(graphicBuffer->getWidth()),
- static_cast<unsigned>(graphicBuffer->getHeight()),
- static_cast<unsigned>(graphicBuffer->getPixelFormat()),
- static_cast<unsigned long long>(graphicBuffer->getUsage()),
- static_cast<unsigned>(graphicBuffer->getStride()),
- static_cast<unsigned>(graphicBuffer->getGenerationNumber()));
+ LOG(VERBOSE) << "attachToBufferQueue -- attaching buffer:"
+ << " block dimension " << block.width() << "x"
+ << block.height()
+ << ", graphicBuffer dimension " << graphicBuffer->getWidth() << "x"
+ << graphicBuffer->getHeight()
+ << std::hex << std::setfill('0')
+ << ", format 0x" << std::setw(8) << graphicBuffer->getPixelFormat()
+ << ", usage 0x" << std::setw(16) << graphicBuffer->getUsage()
+ << std::dec << std::setfill(' ')
+ << ", stride " << graphicBuffer->getStride()
+ << ", generation " << graphicBuffer->getGenerationNumber();
status_t result = igbp->attachBuffer(bqSlot, graphicBuffer);
if (result != OK) {
- ALOGW("attachToBufferQueue -- attachBuffer failed. Error code = %d",
- static_cast<int>(result));
- return false;
+ LOG(WARNING) << "attachToBufferQueue -- attachBuffer failed: "
+ "status = " << result << ".";
+ return result;
}
- ALOGV("attachToBufferQueue -- attachBuffer returned slot %d",
- static_cast<int>(*bqSlot));
- return true;
+ LOG(VERBOSE) << "attachToBufferQueue -- attachBuffer returned slot #"
+ << *bqSlot << ".";
+ return OK;
}
bool getBufferQueueAssignment(const C2ConstGraphicBlock& block,
@@ -1747,11 +1893,11 @@
// If the block's bqId is the same as the desired bqId, just hold.
if ((oldId == bqId) && (oldGeneration == generation)) {
- ALOGV("holdBufferQueueBlock -- import without attaching: "
- "bqId %llu, bqSlot %d, generation %u.",
- static_cast<long long unsigned>(oldId),
- static_cast<int>(oldSlot),
- static_cast<unsigned>(generation));
+ LOG(VERBOSE) << "holdBufferQueueBlock -- import without attaching:"
+ << " bqId " << oldId
+ << ", bqSlot " << oldSlot
+ << ", generation " << generation
+ << ".";
_C2BlockFactory::HoldBlockFromBufferQueue(data, getHgbp(igbp));
return true;
}
@@ -1765,21 +1911,20 @@
status_t result = attachToBufferQueue(block, igbp, generation, &bqSlot);
if (result != OK) {
- ALOGE("holdBufferQueueBlock -- fail to attach: "
- "target bqId %llu, generation %u.",
- static_cast<long long unsigned>(bqId),
- static_cast<unsigned>(generation));
-
+ LOG(ERROR) << "holdBufferQueueBlock -- fail to attach:"
+ << " target bqId " << bqId
+ << ", generation " << generation
+ << ".";
return false;
}
- ALOGV("holdBufferQueueBlock -- attached: "
- "bqId %llu, bqSlot %d, generation %u.",
- static_cast<long long unsigned>(bqId),
- static_cast<int>(bqSlot),
- static_cast<unsigned>(generation));
+ LOG(VERBOSE) << "holdBufferQueueBlock -- attached:"
+ << " bqId " << bqId
+ << ", bqSlot " << bqSlot
+ << ", generation " << generation
+ << ".";
_C2BlockFactory::AssignBlockToBufferQueue(
- data, getHgbp(igbp), bqId, bqSlot, true);
+ data, getHgbp(igbp), generation, bqId, bqSlot, true);
return true;
}
diff --git a/media/codec2/hidl/1.0/vts/OWNERS b/media/codec2/hidl/1.0/vts/OWNERS
new file mode 100644
index 0000000..6733e0c
--- /dev/null
+++ b/media/codec2/hidl/1.0/vts/OWNERS
@@ -0,0 +1,9 @@
+# Media team
+lajos@google.com
+pawin@google.com
+taklee@google.com
+wonsik@google.com
+
+# VTS team
+yim@google.com
+zhuoyao@google.com
diff --git a/media/codec2/hidl/1.0/vts/audio/Android.bp b/media/codec2/hidl/1.0/vts/functional/audio/Android.bp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/audio/Android.bp
rename to media/codec2/hidl/1.0/vts/functional/audio/Android.bp
diff --git a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
similarity index 96%
rename from media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
index d4b973f..d3b37d7 100644
--- a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
@@ -272,7 +272,7 @@
}
// Set Default config param.
-void setupConfigParam(
+bool setupConfigParam(
const std::shared_ptr<android::Codec2Client::Component>& component,
int32_t* bitStreamInfo) {
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -282,8 +282,8 @@
std::vector<C2Param*> configParam{&sampleRateInfo, &channelCountInfo};
c2_status_t status =
component->config(configParam, C2_DONT_BLOCK, &failures);
- ASSERT_EQ(failures.size(), 0u);
- ASSERT_EQ(status, C2_OK);
+ if (status == C2_OK && failures.size() == 0u) return true;
+ return false;
}
// In decoder components, often the input parameters get updated upon
@@ -557,7 +557,11 @@
ASSERT_NO_FATAL_FAILURE(
getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
}
- setupConfigParam(mComponent, bitStreamInfo);
+ if (!setupConfigParam(mComponent, bitStreamInfo)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
ALOGV("mURL : %s", mURL);
eleStream.open(mURL, std::ifstream::binary);
ASSERT_EQ(eleStream.is_open(), true);
@@ -613,7 +617,6 @@
description("Test Request for thumbnail");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
@@ -642,7 +645,11 @@
ASSERT_NO_FATAL_FAILURE(
getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
}
- setupConfigParam(mComponent, bitStreamInfo);
+ if (!setupConfigParam(mComponent, bitStreamInfo)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
ALOGV("mURL : %s", mURL);
// request EOS for thumbnail
@@ -711,7 +718,6 @@
description("Tests Flush calls");
if (mDisableTest) return;
typedef std::unique_lock<std::mutex> ULock;
- ASSERT_EQ(mComponent->start(), C2_OK);
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
@@ -741,7 +747,11 @@
ASSERT_NO_FATAL_FAILURE(
getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
}
- setupConfigParam(mComponent, bitStreamInfo);
+ if (!setupConfigParam(mComponent, bitStreamInfo)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
ALOGV("mURL : %s", mURL);
eleStream.open(mURL, std::ifstream::binary);
ASSERT_EQ(eleStream.is_open(), true);
@@ -833,8 +843,6 @@
description("Decode with multiple empty input frames");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
-
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
@@ -868,7 +876,19 @@
frameId++;
}
eleInfo.close();
-
+ int32_t bitStreamInfo[2] = {0};
+ if (mCompName == raw) {
+ bitStreamInfo[0] = 8000;
+ bitStreamInfo[1] = 1;
+ } else {
+ ASSERT_NO_FATAL_FAILURE(
+ getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
+ }
+ if (!setupConfigParam(mComponent, bitStreamInfo)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
ALOGV("mURL : %s", mURL);
eleStream.open(mURL, std::ifstream::binary);
ASSERT_EQ(eleStream.is_open(), true);
diff --git a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
similarity index 97%
rename from media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
index 5d66ee5..a74d43e 100644
--- a/media/codec2/hidl/1.0/vts/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
@@ -219,7 +219,7 @@
}
// Set Default config param.
-void setupConfigParam(
+bool setupConfigParam(
const std::shared_ptr<android::Codec2Client::Component>& component,
int32_t nChannels, int32_t nSampleRate) {
std::vector<std::unique_ptr<C2SettingResult>> failures;
@@ -229,8 +229,8 @@
std::vector<C2Param*> configParam{&sampleRateInfo, &channelCountInfo};
c2_status_t status =
component->config(configParam, C2_DONT_BLOCK, &failures);
- ASSERT_EQ(failures.size(), 0u);
- ASSERT_EQ(status, C2_OK);
+ if (status == C2_OK && failures.size() == 0u) return true;
+ return false;
}
// LookUpTable of clips and metadata for component testing
@@ -358,7 +358,6 @@
TEST_F(Codec2AudioEncHidlTest, EncodeTest) {
ALOGV("EncodeTest");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
char mURL[512];
strcpy(mURL, gEnv->getRes().c_str());
GetURLForComponent(mCompName, mURL);
@@ -396,7 +395,11 @@
default:
ASSERT_TRUE(false);
}
- setupConfigParam(mComponent, nChannels, nSampleRate);
+ if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
std::ifstream eleStream;
uint32_t numFrames = 128;
eleStream.open(mURL, std::ifstream::binary);
@@ -469,7 +472,6 @@
TEST_F(Codec2AudioEncHidlTest, FlushTest) {
description("Test Request for flush");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
typedef std::unique_lock<std::mutex> ULock;
char mURL[512];
@@ -510,7 +512,13 @@
default:
ASSERT_TRUE(false);
}
- setupConfigParam(mComponent, nChannels, nSampleRate);
+
+ if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
+
std::ifstream eleStream;
uint32_t numFramesFlushed = 30;
uint32_t numFrames = 128;
diff --git a/media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/audio/media_c2_audio_hidl_test_common.h
similarity index 100%
rename from media/codec2/hidl/1.0/vts/audio/media_c2_audio_hidl_test_common.h
rename to media/codec2/hidl/1.0/vts/functional/audio/media_c2_audio_hidl_test_common.h
diff --git a/media/codec2/hidl/1.0/vts/common/Android.bp b/media/codec2/hidl/1.0/vts/functional/common/Android.bp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/common/Android.bp
rename to media/codec2/hidl/1.0/vts/functional/common/Android.bp
diff --git a/media/codec2/hidl/1.0/vts/common/README.md b/media/codec2/hidl/1.0/vts/functional/common/README.md
similarity index 100%
rename from media/codec2/hidl/1.0/vts/common/README.md
rename to media/codec2/hidl/1.0/vts/functional/common/README.md
diff --git a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
similarity index 79%
rename from media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp
rename to media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 64a458c..1f36270 100644
--- a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -93,36 +93,38 @@
std::vector<std::unique_ptr<C2SettingResult>> failures;
for (size_t i = 0; i < updates.size(); ++i) {
C2Param* param = updates[i].get();
- if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
+ if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
csd = true;
} else if ((param->index() ==
C2StreamSampleRateInfo::output::PARAM_TYPE) ||
(param->index() ==
C2StreamChannelCountInfo::output::PARAM_TYPE) ||
(param->index() ==
- C2VideoSizeStreamInfo::output::PARAM_TYPE)) {
+ C2StreamPictureSizeInfo::output::PARAM_TYPE)) {
configParam.push_back(param);
}
}
component->config(configParam, C2_DONT_BLOCK, &failures);
ASSERT_EQ(failures.size(), 0u);
}
- framesReceived++;
- eos = (work->worklets.front()->output.flags &
- C2FrameData::FLAG_END_OF_STREAM) != 0;
- auto frameIndexIt = std::find(flushedIndices.begin(), flushedIndices.end(),
- work->input.ordinal.frameIndex.peeku());
- ALOGV("WorkDone: frameID received %d",
- (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
- work->input.buffers.clear();
- work->worklets.clear();
- {
- typedef std::unique_lock<std::mutex> ULock;
- ULock l(queueLock);
- workQueue.push_back(std::move(work));
- if (!flushedIndices.empty()) {
- flushedIndices.erase(frameIndexIt);
+ if (work->worklets.front()->output.flags != C2FrameData::FLAG_INCOMPLETE) {
+ framesReceived++;
+ eos = (work->worklets.front()->output.flags &
+ C2FrameData::FLAG_END_OF_STREAM) != 0;
+ auto frameIndexIt = std::find(flushedIndices.begin(), flushedIndices.end(),
+ work->input.ordinal.frameIndex.peeku());
+ ALOGV("WorkDone: frameID received %d",
+ (int)work->worklets.front()->output.ordinal.frameIndex.peeku());
+ work->input.buffers.clear();
+ work->worklets.clear();
+ {
+ typedef std::unique_lock<std::mutex> ULock;
+ ULock l(queueLock);
+ workQueue.push_back(std::move(work));
+ if (!flushedIndices.empty()) {
+ flushedIndices.erase(frameIndexIt);
+ }
+ queueCondition.notify_all();
}
- queueCondition.notify_all();
}
}
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
similarity index 93%
rename from media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h
rename to media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index a688530..fca2902 100644
--- a/media/codec2/hidl/1.0/vts/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -55,12 +55,10 @@
: callBack(fn) {}
virtual void onWorkDone(
const std::weak_ptr<android::Codec2Client::Component>& comp,
- std::list<std::unique_ptr<C2Work>>& workItems,
- size_t numDiscardedInputBuffers) override {
+ std::list<std::unique_ptr<C2Work>>& workItems) override {
/* TODO */
ALOGD("onWorkDone called");
(void)comp;
- (void)numDiscardedInputBuffers;
if (callBack) callBack(workItems);
}
@@ -89,15 +87,20 @@
}
virtual void onInputBufferDone(
- const std::shared_ptr<C2Buffer>& buffer) override {
+ uint64_t frameIndex, size_t arrayIndex) override {
/* TODO */
- (void)buffer;
+ (void)frameIndex;
+ (void)arrayIndex;
}
- virtual void onFramesRendered(
- const std::vector<RenderedFrame>& renderedFrames) override {
+ virtual void onFrameRendered(
+ uint64_t bufferQueueId,
+ int32_t slotId,
+ int64_t timestampNs) override {
/* TODO */
- (void)renderedFrames;
+ (void)bufferQueueId;
+ (void)slotId;
+ (void)timestampNs;
}
// std::mutex mQueueLock;
// std::condition_variable mQueueCondition;
diff --git a/media/codec2/hidl/1.0/vts/component/Android.bp b/media/codec2/hidl/1.0/vts/functional/component/Android.bp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/component/Android.bp
rename to media/codec2/hidl/1.0/vts/functional/component/Android.bp
diff --git a/media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp b/media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/component/VtsHidlC2V1_0TargetComponentTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/component/VtsHidlC2V1_0TargetComponentTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/master/Android.bp b/media/codec2/hidl/1.0/vts/functional/master/Android.bp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/master/Android.bp
rename to media/codec2/hidl/1.0/vts/functional/master/Android.bp
diff --git a/media/codec2/hidl/1.0/vts/master/VtsHidlC2V1_0TargetMasterTest.cpp b/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/master/VtsHidlC2V1_0TargetMasterTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_352x288_420p_30fps_32frames.yuv b/media/codec2/hidl/1.0/vts/functional/res/bbb_352x288_420p_30fps_32frames.yuv
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_352x288_420p_30fps_32frames.yuv
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_352x288_420p_30fps_32frames.yuv
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz.aac b/media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz.aac
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz.aac
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz.aac
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_aac_stereo_128kbps_48000hz_multi_frame.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz.amrwb b/media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz.amrwb
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz.amrwb
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz.amrwb
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_amrwb_1ch_14kbps_16000hz_multi_frame.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.av1 b/media/codec2/hidl/1.0/vts/functional/res/bbb_av1_176_144.av1
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.av1
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_av1_176_144.av1
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_av1_176_144.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_av1_176_144.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_av1_176_144.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.av1 b/media/codec2/hidl/1.0/vts/functional/res/bbb_av1_640_360.av1
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.av1
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_av1_640_360.av1
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_av1_640_360.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_av1_640_360.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_av1_640_360.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_avc_176x144_300kbps_60fps.h264 b/media/codec2/hidl/1.0/vts/functional/res/bbb_avc_176x144_300kbps_60fps.h264
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_avc_176x144_300kbps_60fps.h264
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_avc_176x144_300kbps_60fps.h264
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_avc_176x144_300kbps_60fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_avc_176x144_300kbps_60fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_avc_176x144_300kbps_60fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_avc_176x144_300kbps_60fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_avc_640x360_768kbps_30fps.h264 b/media/codec2/hidl/1.0/vts/functional/res/bbb_avc_640x360_768kbps_30fps.h264
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_avc_640x360_768kbps_30fps.h264
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_avc_640x360_768kbps_30fps.h264
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_avc_640x360_768kbps_30fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_avc_640x360_768kbps_30fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_avc_640x360_768kbps_30fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_avc_640x360_768kbps_30fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_flac_stereo_680kbps_48000hz.flac b/media/codec2/hidl/1.0/vts/functional/res/bbb_flac_stereo_680kbps_48000hz.flac
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_flac_stereo_680kbps_48000hz.flac
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_flac_stereo_680kbps_48000hz.flac
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_flac_stereo_680kbps_48000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_flac_stereo_680kbps_48000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_flac_stereo_680kbps_48000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_flac_stereo_680kbps_48000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_g711alaw_1ch_8khz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_g711alaw_1ch_8khz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_g711alaw_1ch_8khz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_g711alaw_1ch_8khz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_g711alaw_1ch_8khz.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_g711alaw_1ch_8khz.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_g711alaw_1ch_8khz.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_g711alaw_1ch_8khz.raw
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_g711mulaw_1ch_8khz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_g711mulaw_1ch_8khz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_g711mulaw_1ch_8khz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_g711mulaw_1ch_8khz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_g711mulaw_1ch_8khz.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_g711mulaw_1ch_8khz.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_g711mulaw_1ch_8khz.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_g711mulaw_1ch_8khz.raw
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_gsm_1ch_8khz_13kbps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_gsm_1ch_8khz_13kbps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_gsm_1ch_8khz_13kbps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_gsm_1ch_8khz_13kbps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_gsm_1ch_8khz_13kbps.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_gsm_1ch_8khz_13kbps.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_gsm_1ch_8khz_13kbps.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_gsm_1ch_8khz_13kbps.raw
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_h263_352x288_300kbps_12fps.h263 b/media/codec2/hidl/1.0/vts/functional/res/bbb_h263_352x288_300kbps_12fps.h263
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_h263_352x288_300kbps_12fps.h263
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_h263_352x288_300kbps_12fps.h263
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_h263_352x288_300kbps_12fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_h263_352x288_300kbps_12fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_h263_352x288_300kbps_12fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_h263_352x288_300kbps_12fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_hevc_176x144_176kbps_60fps.hevc b/media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_176x144_176kbps_60fps.hevc
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_hevc_176x144_176kbps_60fps.hevc
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_176x144_176kbps_60fps.hevc
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_hevc_176x144_176kbps_60fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_176x144_176kbps_60fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_hevc_176x144_176kbps_60fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_176x144_176kbps_60fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_hevc_640x360_1600kbps_30fps.hevc b/media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_640x360_1600kbps_30fps.hevc
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_hevc_640x360_1600kbps_30fps.hevc
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_640x360_1600kbps_30fps.hevc
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_hevc_640x360_1600kbps_30fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_640x360_1600kbps_30fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_hevc_640x360_1600kbps_30fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_hevc_640x360_1600kbps_30fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz.mp3 b/media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz.mp3
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz.mp3
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz.mp3
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mp3_stereo_192kbps_48000hz_multi_frame.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg2_176x144_105kbps_25fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_176x144_105kbps_25fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg2_176x144_105kbps_25fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_176x144_105kbps_25fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg2_176x144_105kbps_25fps.m2v b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_176x144_105kbps_25fps.m2v
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg2_176x144_105kbps_25fps.m2v
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_176x144_105kbps_25fps.m2v
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg2_352x288_1mbps_60fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_352x288_1mbps_60fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg2_352x288_1mbps_60fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_352x288_1mbps_60fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg2_352x288_1mbps_60fps.m2v b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_352x288_1mbps_60fps.m2v
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg2_352x288_1mbps_60fps.m2v
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg2_352x288_1mbps_60fps.m2v
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg4_352x288_512kbps_30fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg4_352x288_512kbps_30fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg4_352x288_512kbps_30fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg4_352x288_512kbps_30fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_mpeg4_352x288_512kbps_30fps.m4v b/media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg4_352x288_512kbps_30fps.m4v
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_mpeg4_352x288_512kbps_30fps.m4v
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_mpeg4_352x288_512kbps_30fps.m4v
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_opus_stereo_128kbps_48000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_opus_stereo_128kbps_48000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_opus_stereo_128kbps_48000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_opus_stereo_128kbps_48000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_opus_stereo_128kbps_48000hz.opus b/media/codec2/hidl/1.0/vts/functional/res/bbb_opus_stereo_128kbps_48000hz.opus
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_opus_stereo_128kbps_48000hz.opus
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_opus_stereo_128kbps_48000hz.opus
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_16khz_s16le.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_16khz_s16le.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_16khz_s16le.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_16khz_s16le.raw
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s16le.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s16le.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s16le.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s16le.raw
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s32le.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s32le.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s32le.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s32le.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s32le.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s32le.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_raw_1ch_8khz_s32le.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_raw_1ch_8khz_s32le.raw
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_raw_2ch_48khz_s16le.raw b/media/codec2/hidl/1.0/vts/functional/res/bbb_raw_2ch_48khz_s16le.raw
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_raw_2ch_48khz_s16le.raw
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_raw_2ch_48khz_s16le.raw
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vorbis_stereo_128kbps_48000hz.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_vorbis_stereo_128kbps_48000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vorbis_stereo_128kbps_48000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vorbis_stereo_128kbps_48000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vorbis_stereo_128kbps_48000hz.vorbis b/media/codec2/hidl/1.0/vts/functional/res/bbb_vorbis_stereo_128kbps_48000hz.vorbis
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vorbis_stereo_128kbps_48000hz.vorbis
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vorbis_stereo_128kbps_48000hz.vorbis
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp8_176x144_240kbps_60fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_176x144_240kbps_60fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp8_176x144_240kbps_60fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_176x144_240kbps_60fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp8_176x144_240kbps_60fps.vp8 b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_176x144_240kbps_60fps.vp8
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp8_176x144_240kbps_60fps.vp8
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_176x144_240kbps_60fps.vp8
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp8_640x360_2mbps_30fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_640x360_2mbps_30fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp8_640x360_2mbps_30fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_640x360_2mbps_30fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp8_640x360_2mbps_30fps.vp8 b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_640x360_2mbps_30fps.vp8
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp8_640x360_2mbps_30fps.vp8
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp8_640x360_2mbps_30fps.vp8
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp9_176x144_285kbps_60fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_176x144_285kbps_60fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp9_176x144_285kbps_60fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_176x144_285kbps_60fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp9_176x144_285kbps_60fps.vp9 b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_176x144_285kbps_60fps.vp9
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp9_176x144_285kbps_60fps.vp9
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_176x144_285kbps_60fps.vp9
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp9_640x360_1600kbps_30fps.info b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_640x360_1600kbps_30fps.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp9_640x360_1600kbps_30fps.info
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_640x360_1600kbps_30fps.info
diff --git a/media/codec2/hidl/1.0/vts/res/bbb_vp9_640x360_1600kbps_30fps.vp9 b/media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_640x360_1600kbps_30fps.vp9
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/bbb_vp9_640x360_1600kbps_30fps.vp9
rename to media/codec2/hidl/1.0/vts/functional/res/bbb_vp9_640x360_1600kbps_30fps.vp9
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz.amrnb b/media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz.amrnb
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz.amrnb
rename to media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz.amrnb
Binary files differ
diff --git a/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz.info b/media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz.info
rename to media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz.info
diff --git a/media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info b/media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info
similarity index 100%
rename from media/codec2/hidl/1.0/vts/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info
rename to media/codec2/hidl/1.0/vts/functional/res/sine_amrnb_1ch_12kbps_8000hz_multi_frame.info
diff --git a/media/codec2/hidl/1.0/vts/video/Android.bp b/media/codec2/hidl/1.0/vts/functional/video/Android.bp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/video/Android.bp
rename to media/codec2/hidl/1.0/vts/functional/video/Android.bp
diff --git a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
similarity index 100%
rename from media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
diff --git a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
similarity index 96%
rename from media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
rename to media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
index 8585c87..7db41c0 100644
--- a/media/codec2/hidl/1.0/vts/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
@@ -118,7 +118,6 @@
}
mEos = false;
mCsd = false;
- mConfig = false;
mFramesReceived = 0;
mFailedWorkReceived = 0;
if (mCompName == unknown_comp) mDisableTest = true;
@@ -134,7 +133,7 @@
Super::TearDown();
}
- void setupConfigParam(int32_t nWidth, int32_t nHeight);
+ bool setupConfigParam(int32_t nWidth, int32_t nHeight);
// callback function to process onWorkDone received by Listener
void handleWorkDone(std::list<std::unique_ptr<C2Work>>& workItems) {
@@ -228,14 +227,14 @@
}
// Set Default config param.
-void Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
+bool Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
std::vector<std::unique_ptr<C2SettingResult>> failures;
- C2VideoSizeStreamTuning::input inputSize(0u, nWidth, nHeight);
+ C2StreamPictureSizeInfo::input inputSize(0u, nWidth, nHeight);
std::vector<C2Param*> configParam{&inputSize};
c2_status_t status =
mComponent->config(configParam, C2_DONT_BLOCK, &failures);
- if (failures.size() == 0u ) mConfig = true;
- ASSERT_EQ(status, C2_OK);
+ if (status == C2_OK && failures.size() == 0u) return true;
+ return false;
}
// LookUpTable of clips for component testing
@@ -360,8 +359,7 @@
ASSERT_EQ(eleStream.is_open(), true) << mURL << " file not found";
ALOGV("mURL : %s", mURL);
- setupConfigParam(nWidth, nHeight);
- if (!mConfig) {
+ if (!setupConfigParam(nWidth, nHeight)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
}
@@ -439,7 +437,6 @@
TEST_F(Codec2VideoEncHidlTest, FlushTest) {
description("Test Request for flush");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
typedef std::unique_lock<std::mutex> ULock;
char mURL[512];
@@ -447,7 +444,12 @@
int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
strcpy(mURL, gEnv->getRes().c_str());
GetURLForComponent(mURL);
- setupConfigParam(nWidth, nHeight);
+
+ if (!setupConfigParam(nWidth, nHeight)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
// Setting default configuration
mFlushedIndices.clear();
@@ -522,12 +524,16 @@
TEST_F(Codec2VideoEncHidlTest, InvalidBufferTest) {
description("Tests feeding larger/smaller input buffer");
if (mDisableTest) return;
- ASSERT_EQ(mComponent->start(), C2_OK);
std::ifstream eleStream;
int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH / 2;
int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT / 2;
- setupConfigParam(nWidth, nHeight);
+
+ if (!setupConfigParam(nWidth, nHeight)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
+ ASSERT_EQ(mComponent->start(), C2_OK);
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
@@ -579,10 +585,12 @@
int32_t nWidth = GetParam().first;
int32_t nHeight = GetParam().second;
ALOGD("Trying encode for width %d height %d", nWidth, nHeight);
- mConfig = false;
mEos = false;
- setupConfigParam(nWidth, nHeight);
- if (!mConfig) return;
+
+ if (!setupConfigParam(nWidth, nHeight)) {
+ std::cout << "[ WARN ] Test Skipped \n";
+ return;
+ }
ASSERT_EQ(mComponent->start(), C2_OK);
ASSERT_NO_FATAL_FAILURE(
diff --git a/media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
similarity index 100%
rename from media/codec2/hidl/1.0/vts/video/media_c2_video_hidl_test_common.h
rename to media/codec2/hidl/1.0/vts/functional/video/media_c2_video_hidl_test_common.h
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index f5cc9ff..7a2e549 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "Codec2Client"
-#include <log/log.h>
+#include <android-base/logging.h>
#include <codec2/hidl/client.h>
@@ -32,7 +32,6 @@
#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
#include <hidl/HidlSupport.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
-#undef LOG
#include <android/hardware/media/bufferpool/2.0/IClientManager.h>
#include <android/hardware/media/c2/1.0/IComponent.h>
@@ -99,19 +98,17 @@
return mName;
}
-Codec2ConfigurableClient::Base* Codec2ConfigurableClient::base() const {
- return static_cast<Base*>(mBase.get());
-}
-
Codec2ConfigurableClient::Codec2ConfigurableClient(
- const sp<Codec2ConfigurableClient::Base>& base) : mBase(base) {
- Return<void> transStatus = base->getName(
- [this](const hidl_string& name) {
- mName = name.c_str();
- });
- if (!transStatus.isOk()) {
- ALOGE("Cannot obtain name from IConfigurable.");
- }
+ const sp<IConfigurable>& base)
+ : mBase{base},
+ mName{[base]() -> C2String {
+ C2String outName;
+ Return<void> transStatus = base->getName(
+ [&outName](const hidl_string& name) {
+ outName = name.c_str();
+ });
+ return transStatus.isOk() ? outName : "";
+ }()} {
}
c2_status_t Codec2ConfigurableClient::query(
@@ -124,7 +121,7 @@
size_t numIndices = 0;
for (C2Param* const& stackParam : stackParams) {
if (!stackParam) {
- ALOGW("query -- null stack param encountered.");
+ LOG(WARNING) << "query -- null stack param encountered.";
continue;
}
indices[numIndices++] = static_cast<ParamIndex>(stackParam->index());
@@ -139,32 +136,31 @@
heapParams->reserve(heapParams->size() + numIndices);
}
c2_status_t status;
- Return<void> transStatus = base()->query(
+ Return<void> transStatus = mBase->query(
indices,
mayBlock == C2_MAY_BLOCK,
[&status, &numStackIndices, &stackParams, heapParams](
Status s, const Params& p) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK && status != C2_BAD_INDEX) {
- ALOGE("query -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "query -- call failed: "
+ << status << ".";
return;
}
std::vector<C2Param*> paramPointers;
- c2_status_t parseStatus = parseParamsBlob(¶mPointers, p);
- if (parseStatus != C2_OK) {
- ALOGE("query -- error while parsing params. "
- "Error code = %d", static_cast<int>(status));
- status = parseStatus;
+ if (!parseParamsBlob(¶mPointers, p)) {
+ LOG(ERROR) << "query -- error while parsing params.";
+ status = C2_CORRUPTED;
return;
}
size_t i = 0;
- for (auto it = paramPointers.begin(); it != paramPointers.end(); ) {
+ for (auto it = paramPointers.begin();
+ it != paramPointers.end(); ) {
C2Param* paramPointer = *it;
if (numStackIndices > 0) {
--numStackIndices;
if (!paramPointer) {
- ALOGW("query -- null stack param.");
+ LOG(WARNING) << "query -- null stack param.";
++it;
continue;
}
@@ -172,37 +168,41 @@
++i;
}
if (i >= stackParams.size()) {
- ALOGE("query -- unexpected error.");
+ LOG(ERROR) << "query -- unexpected error.";
status = C2_CORRUPTED;
return;
}
if (stackParams[i]->index() != paramPointer->index()) {
- ALOGW("query -- param skipped. index = %d",
- static_cast<int>(stackParams[i]->index()));
+ LOG(WARNING) << "query -- param skipped: "
+ "index = "
+ << stackParams[i]->index() << ".";
stackParams[i++]->invalidate();
continue;
}
if (!stackParams[i++]->updateFrom(*paramPointer)) {
- ALOGW("query -- param update failed. index = %d",
- static_cast<int>(paramPointer->index()));
+ LOG(WARNING) << "query -- param update failed: "
+ "index = "
+ << paramPointer->index() << ".";
}
} else {
if (!paramPointer) {
- ALOGW("query -- null heap param.");
+ LOG(WARNING) << "query -- null heap param.";
++it;
continue;
}
if (!heapParams) {
- ALOGW("query -- unexpected extra stack param.");
+ LOG(WARNING) << "query -- "
+ "unexpected extra stack param.";
} else {
- heapParams->emplace_back(C2Param::Copy(*paramPointer));
+ heapParams->emplace_back(
+ C2Param::Copy(*paramPointer));
}
}
++it;
}
});
if (!transStatus.isOk()) {
- ALOGE("query -- transaction failed.");
+ LOG(ERROR) << "query -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return status;
@@ -213,13 +213,12 @@
c2_blocking_t mayBlock,
std::vector<std::unique_ptr<C2SettingResult>>* const failures) {
Params hidlParams;
- Status hidlStatus = createParamsBlob(&hidlParams, params);
- if (hidlStatus != Status::OK) {
- ALOGE("config -- bad input.");
+ if (!createParamsBlob(&hidlParams, params)) {
+ LOG(ERROR) << "config -- bad input.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status;
- Return<void> transStatus = base()->config(
+ Return<void> transStatus = mBase->config(
hidlParams,
mayBlock == C2_MAY_BLOCK,
[&status, ¶ms, failures](
@@ -227,24 +226,27 @@
const hidl_vec<SettingResult> f,
const Params& o) {
status = static_cast<c2_status_t>(s);
- if (status != C2_OK) {
- ALOGD("config -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ if (status != C2_OK && status != C2_BAD_INDEX) {
+ LOG(DEBUG) << "config -- call failed: "
+ << status << ".";
}
size_t i = failures->size();
failures->resize(i + f.size());
for (const SettingResult& sf : f) {
- status = objcpy(&(*failures)[i++], sf);
- if (status != C2_OK) {
- ALOGE("config -- invalid returned SettingResult. "
- "Error code = %d", static_cast<int>(status));
+ if (!objcpy(&(*failures)[i++], sf)) {
+ LOG(ERROR) << "config -- "
+ << "invalid SettingResult returned.";
return;
}
}
- status = updateParamsFromBlob(params, o);
+ if (!updateParamsFromBlob(params, o)) {
+ LOG(ERROR) << "config -- "
+ << "failed to parse returned params.";
+ status = C2_CORRUPTED;
+ }
});
if (!transStatus.isOk()) {
- ALOGE("config -- transaction failed.");
+ LOG(ERROR) << "config -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return status;
@@ -254,7 +256,7 @@
std::vector<std::shared_ptr<C2ParamDescriptor>>* const params) const {
// TODO: Cache and query properly!
c2_status_t status;
- Return<void> transStatus = base()->querySupportedParams(
+ Return<void> transStatus = mBase->querySupportedParams(
std::numeric_limits<uint32_t>::min(),
std::numeric_limits<uint32_t>::max(),
[&status, params](
@@ -262,24 +264,22 @@
const hidl_vec<ParamDescriptor>& p) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK) {
- ALOGE("querySupportedParams -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "querySupportedParams -- call failed: "
+ << status << ".";
return;
}
size_t i = params->size();
params->resize(i + p.size());
for (const ParamDescriptor& sp : p) {
- status = objcpy(&(*params)[i++], sp);
- if (status != C2_OK) {
- ALOGE("querySupportedParams -- "
- "invalid returned ParamDescriptor. "
- "Error code = %d", static_cast<int>(status));
+ if (!objcpy(&(*params)[i++], sp)) {
+ LOG(ERROR) << "querySupportedParams -- "
+ << "invalid returned ParamDescriptor.";
return;
}
}
});
if (!transStatus.isOk()) {
- ALOGE("querySupportedParams -- transaction failed.");
+ LOG(ERROR) << "querySupportedParams -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return status;
@@ -290,15 +290,14 @@
c2_blocking_t mayBlock) const {
hidl_vec<FieldSupportedValuesQuery> inFields(fields.size());
for (size_t i = 0; i < fields.size(); ++i) {
- Status hidlStatus = objcpy(&inFields[i], fields[i]);
- if (hidlStatus != Status::OK) {
- ALOGE("querySupportedValues -- bad input");
+ if (!objcpy(&inFields[i], fields[i])) {
+ LOG(ERROR) << "querySupportedValues -- bad input";
return C2_TRANSACTION_FAILED;
}
}
c2_status_t status;
- Return<void> transStatus = base()->querySupportedValues(
+ Return<void> transStatus = mBase->querySupportedValues(
inFields,
mayBlock == C2_MAY_BLOCK,
[&status, &inFields, &fields](
@@ -306,27 +305,28 @@
const hidl_vec<FieldSupportedValuesQueryResult>& r) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK) {
- ALOGE("querySupportedValues -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "querySupportedValues -- call failed: "
+ << status << ".";
return;
}
if (r.size() != fields.size()) {
- ALOGE("querySupportedValues -- input and output lists "
- "have different sizes.");
+ LOG(ERROR) << "querySupportedValues -- "
+ "input and output lists "
+ "have different sizes.";
status = C2_CORRUPTED;
return;
}
for (size_t i = 0; i < fields.size(); ++i) {
- status = objcpy(&fields[i], inFields[i], r[i]);
- if (status != C2_OK) {
- ALOGE("querySupportedValues -- invalid returned value. "
- "Error code = %d", static_cast<int>(status));
+ if (!objcpy(&fields[i], inFields[i], r[i])) {
+ LOG(ERROR) << "querySupportedValues -- "
+ "invalid returned value.";
+ status = C2_CORRUPTED;
return;
}
}
});
if (!transStatus.isOk()) {
- ALOGE("querySupportedValues -- transaction failed.");
+ LOG(ERROR) << "querySupportedValues -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return status;
@@ -339,22 +339,20 @@
virtual Return<void> onWorkDone(const WorkBundle& workBundle) override {
std::list<std::unique_ptr<C2Work>> workItems;
- c2_status_t status = objcpy(&workItems, workBundle);
- if (status != C2_OK) {
- ALOGI("onWorkDone -- received corrupted WorkBundle. "
- "status = %d.", static_cast<int>(status));
+ if (!objcpy(&workItems, workBundle)) {
+ LOG(DEBUG) << "onWorkDone -- received corrupted WorkBundle.";
return Void();
}
// release input buffers potentially held by the component from queue
- size_t numDiscardedInputBuffers = 0;
- std::shared_ptr<Codec2Client::Component> strongComponent = component.lock();
+ std::shared_ptr<Codec2Client::Component> strongComponent =
+ component.lock();
if (strongComponent) {
- numDiscardedInputBuffers = strongComponent->handleOnWorkDone(workItems);
+ strongComponent->handleOnWorkDone(workItems);
}
if (std::shared_ptr<Codec2Client::Listener> listener = base.lock()) {
- listener->onWorkDone(component, workItems, numDiscardedInputBuffers);
+ listener->onWorkDone(component, workItems);
} else {
- ALOGD("onWorkDone -- listener died.");
+ LOG(DEBUG) << "onWorkDone -- listener died.";
}
return Void();
}
@@ -363,13 +361,10 @@
const hidl_vec<SettingResult>& settingResults) override {
std::vector<std::shared_ptr<C2SettingResult>> c2SettingResults(
settingResults.size());
- c2_status_t status;
for (size_t i = 0; i < settingResults.size(); ++i) {
std::unique_ptr<C2SettingResult> c2SettingResult;
- status = objcpy(&c2SettingResult, settingResults[i]);
- if (status != C2_OK) {
- ALOGI("onTripped -- received corrupted SettingResult. "
- "status = %d.", static_cast<int>(status));
+ if (!objcpy(&c2SettingResult, settingResults[i])) {
+ LOG(DEBUG) << "onTripped -- received corrupted SettingResult.";
return Void();
}
c2SettingResults[i] = std::move(c2SettingResult);
@@ -377,20 +372,21 @@
if (std::shared_ptr<Codec2Client::Listener> listener = base.lock()) {
listener->onTripped(component, c2SettingResults);
} else {
- ALOGD("onTripped -- listener died.");
+ LOG(DEBUG) << "onTripped -- listener died.";
}
return Void();
}
virtual Return<void> onError(Status s, uint32_t errorCode) override {
- ALOGD("onError -- status = %d, errorCode = %u.",
- static_cast<int>(s),
- static_cast<unsigned>(errorCode));
+ LOG(DEBUG) << "onError --"
+ << " status = " << s
+ << ", errorCode = " << errorCode
+ << ".";
if (std::shared_ptr<Listener> listener = base.lock()) {
listener->onError(component, s == Status::OK ?
errorCode : static_cast<c2_status_t>(s));
} else {
- ALOGD("onError -- listener died.");
+ LOG(DEBUG) << "onError -- listener died.";
}
return Void();
}
@@ -398,55 +394,59 @@
virtual Return<void> onFramesRendered(
const hidl_vec<RenderedFrame>& renderedFrames) override {
std::shared_ptr<Listener> listener = base.lock();
- std::vector<Codec2Client::Listener::RenderedFrame> rfs;
- rfs.reserve(renderedFrames.size());
- for (const RenderedFrame& rf : renderedFrames) {
- if (rf.slotId >= 0) {
- if (listener) {
- rfs.emplace_back(rf.bufferQueueId,
- rf.slotId,
- rf.timestampNs);
- }
- } else {
- std::shared_ptr<Codec2Client::Component> strongComponent =
- component.lock();
- if (strongComponent) {
- uint64_t frameIndex = rf.bufferQueueId;
- size_t bufferIndex = static_cast<size_t>(~rf.slotId);
- ALOGV("Received death notification of input buffer: "
- "frameIndex = %llu, bufferIndex = %zu.",
- static_cast<long long unsigned>(frameIndex),
- bufferIndex);
- std::shared_ptr<C2Buffer> buffer =
- strongComponent->freeInputBuffer(
- frameIndex, bufferIndex);
- if (buffer) {
- listener->onInputBufferDone(buffer);
- }
- }
- }
+ if (!listener) {
+ LOG(DEBUG) << "onFramesRendered -- listener died.";
+ return Void();
}
- if (!rfs.empty()) {
- if (listener) {
- listener->onFramesRendered(rfs);
- } else {
- ALOGD("onFramesRendered -- listener died.");
- }
+ for (const RenderedFrame& renderedFrame : renderedFrames) {
+ listener->onFrameRendered(
+ renderedFrame.bufferQueueId,
+ renderedFrame.slotId,
+ renderedFrame.timestampNs);
}
return Void();
}
+
+ virtual Return<void> onInputBuffersReleased(
+ const hidl_vec<InputBuffer>& inputBuffers) override {
+ std::shared_ptr<Listener> listener = base.lock();
+ if (!listener) {
+ LOG(DEBUG) << "onInputBuffersReleased -- listener died.";
+ return Void();
+ }
+ for (const InputBuffer& inputBuffer : inputBuffers) {
+ LOG(VERBOSE) << "onInputBuffersReleased --"
+ " received death notification of"
+ " input buffer:"
+ " frameIndex = " << inputBuffer.frameIndex
+ << ", bufferIndex = " << inputBuffer.arrayIndex
+ << ".";
+ listener->onInputBufferDone(
+ inputBuffer.frameIndex, inputBuffer.arrayIndex);
+ }
+ return Void();
+ }
+
};
// Codec2Client
-Codec2Client::Base* Codec2Client::base() const {
- return static_cast<Base*>(mBase.get());
-}
-
-Codec2Client::Codec2Client(const sp<Codec2Client::Base>& base, std::string instanceName) :
- Codec2ConfigurableClient(base), mListed(false), mInstanceName(instanceName) {
+Codec2Client::Codec2Client(const sp<IComponentStore>& base,
+ std::string serviceName)
+ : Configurable{
+ [base]() -> sp<IConfigurable> {
+ Return<sp<IConfigurable>> transResult =
+ base->getConfigurable();
+ return transResult.isOk() ?
+ static_cast<sp<IConfigurable>>(transResult) :
+ nullptr;
+ }()
+ },
+ mBase{base},
+ mListed{false},
+ mServiceName{serviceName} {
Return<sp<IClientManager>> transResult = base->getPoolClientManager();
if (!transResult.isOk()) {
- ALOGE("getPoolClientManager -- failed transaction.");
+ LOG(ERROR) << "getPoolClientManager -- transaction failed.";
} else {
mHostPoolManager = static_cast<sp<IClientManager>>(transResult);
}
@@ -457,13 +457,10 @@
const std::shared_ptr<Codec2Client::Listener>& listener,
std::shared_ptr<Codec2Client::Component>* const component) {
- // TODO: Add support for Bufferpool
-
-
c2_status_t status;
- sp<Component::HidlListener> hidlListener = new Component::HidlListener();
+ sp<Component::HidlListener> hidlListener = new Component::HidlListener{};
hidlListener->base = listener;
- Return<void> transStatus = base()->createComponent(
+ Return<void> transStatus = mBase->createComponent(
name,
hidlListener,
ClientManager::getInstance(),
@@ -478,23 +475,24 @@
hidlListener->component = *component;
});
if (!transStatus.isOk()) {
- ALOGE("createComponent -- failed transaction.");
+ LOG(ERROR) << "createComponent(" << name.c_str()
+ << ") -- transaction failed.";
return C2_TRANSACTION_FAILED;
- }
-
- if (status != C2_OK) {
+ } else if (status != C2_OK) {
+ LOG(ERROR) << "createComponent(" << name.c_str()
+ << ") -- call failed: " << status << ".";
return status;
- }
-
- if (!*component) {
- ALOGE("createComponent -- null component.");
+ } else if (!*component) {
+ LOG(ERROR) << "createComponent(" << name.c_str()
+ << ") -- null component.";
return C2_CORRUPTED;
}
status = (*component)->setDeathListener(*component, listener);
if (status != C2_OK) {
- ALOGE("createComponent -- setDeathListener returned error: %d.",
- static_cast<int>(status));
+ LOG(ERROR) << "createComponent(" << name.c_str()
+ << ") -- failed to set up death listener: "
+ << status << ".";
}
(*component)->mBufferPoolSender.setReceiver(mHostPoolManager);
@@ -505,44 +503,51 @@
const C2String& name,
std::shared_ptr<Codec2Client::Interface>* const interface) {
c2_status_t status;
- Return<void> transStatus = base()->createInterface(
+ Return<void> transStatus = mBase->createInterface(
name,
[&status, interface](
Status s,
const sp<IComponentInterface>& i) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK) {
- ALOGE("createInterface -- call failed. "
- "Error code = %d", static_cast<int>(status));
return;
}
- *interface = std::make_shared<Codec2Client::Interface>(i);
+ *interface = std::make_shared<Interface>(i);
});
if (!transStatus.isOk()) {
- ALOGE("createInterface -- failed transaction.");
+ LOG(ERROR) << "createInterface(" << name.c_str()
+ << ") -- transaction failed.";
return C2_TRANSACTION_FAILED;
+ } else if (status != C2_OK) {
+ LOG(ERROR) << "createComponent(" << name.c_str()
+ << ") -- call failed: " << status << ".";
+ return status;
}
+
return status;
}
c2_status_t Codec2Client::createInputSurface(
- std::shared_ptr<Codec2Client::InputSurface>* const inputSurface) {
- Return<sp<IInputSurface>> transResult = base()->createInputSurface();
- if (!transResult.isOk()) {
- ALOGE("createInputSurface -- failed transaction.");
+ std::shared_ptr<InputSurface>* const inputSurface) {
+ c2_status_t status;
+ Return<void> transStatus = mBase->createInputSurface(
+ [&status, inputSurface](
+ Status s,
+ const sp<IInputSurface>& i) {
+ status = static_cast<c2_status_t>(s);
+ if (status != C2_OK) {
+ return;
+ }
+ *inputSurface = std::make_shared<InputSurface>(i);
+ });
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "createInputSurface -- transaction failed.";
return C2_TRANSACTION_FAILED;
+ } else if (status != C2_OK) {
+ LOG(DEBUG) << "createInputSurface -- call failed: "
+ << status << ".";
}
- sp<IInputSurface> result = static_cast<sp<IInputSurface>>(transResult);
- if (!result) {
- *inputSurface = nullptr;
- return C2_OK;
- }
- *inputSurface = std::make_shared<InputSurface>(result);
- if (!*inputSurface) {
- ALOGE("createInputSurface -- unknown error.");
- return C2_CORRUPTED;
- }
- return C2_OK;
+ return status;
}
const std::vector<C2Component::Traits>& Codec2Client::listComponents() const {
@@ -550,22 +555,25 @@
if (mListed) {
return mTraitsList;
}
- Return<void> transStatus = base()->listComponents(
- [this](const hidl_vec<IComponentStore::ComponentTraits>& t) {
+ Return<void> transStatus = mBase->listComponents(
+ [this](Status s,
+ const hidl_vec<IComponentStore::ComponentTraits>& t) {
+ if (s != Status::OK) {
+ LOG(DEBUG) << "listComponents -- call failed: "
+ << static_cast<c2_status_t>(s) << ".";
+ return;
+ }
mTraitsList.resize(t.size());
- mAliasesBuffer.resize(t.size());
for (size_t i = 0; i < t.size(); ++i) {
- c2_status_t status = objcpy(
- &mTraitsList[i], &mAliasesBuffer[i], t[i]);
- mTraitsList[i].owner = mInstanceName;
- if (status != C2_OK) {
- ALOGE("listComponents -- corrupted output.");
+ if (!objcpy(&mTraitsList[i], t[i])) {
+ LOG(ERROR) << "listComponents -- corrupted output.";
return;
}
+ mTraitsList[i].owner = mServiceName;
}
});
if (!transStatus.isOk()) {
- ALOGE("listComponents -- failed transaction.");
+ LOG(ERROR) << "listComponents -- transaction failed.";
}
mListed = true;
return mTraitsList;
@@ -577,7 +585,7 @@
// TODO: Implement?
(void)src;
(void)dst;
- ALOGE("copyBuffer not implemented");
+ LOG(ERROR) << "copyBuffer not implemented";
return C2_OMITTED;
}
@@ -597,21 +605,25 @@
const hidl_vec<StructDescriptor>& sd) {
c2_status_t status = static_cast<c2_status_t>(s);
if (status != C2_OK) {
- ALOGE("getStructDescriptors -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "SimpleParamReflector -- "
+ "getStructDescriptors() failed: "
+ << status << ".";
descriptor.reset();
return;
}
if (sd.size() != 1) {
- ALOGD("getStructDescriptors -- returned vector of size %zu.",
- sd.size());
+ LOG(DEBUG) << "SimpleParamReflector -- "
+ "getStructDescriptors() "
+ "returned vector of size "
+ << sd.size() << ". "
+ "It should be 1.";
descriptor.reset();
return;
}
- status = objcpy(&descriptor, sd[0]);
- if (status != C2_OK) {
- ALOGD("getStructDescriptors -- failed to convert. "
- "Error code = %d", static_cast<int>(status));
+ if (!objcpy(&descriptor, sd[0])) {
+ LOG(DEBUG) << "SimpleParamReflector -- "
+ "getStructDescriptors() returned "
+ "corrupted data.";
descriptor.reset();
return;
}
@@ -625,44 +637,44 @@
sp<Base> mBase;
};
- return std::make_shared<SimpleParamReflector>(base());
+ return std::make_shared<SimpleParamReflector>(mBase);
};
std::shared_ptr<Codec2Client> Codec2Client::CreateFromService(
- const char* instanceName, bool waitForService) {
- if (!instanceName) {
+ const char* serviceName, bool waitForService) {
+ if (!serviceName) {
return nullptr;
}
sp<Base> baseStore = waitForService ?
- Base::getService(instanceName) :
- Base::tryGetService(instanceName);
+ Base::getService(serviceName) :
+ Base::tryGetService(serviceName);
if (!baseStore) {
if (waitForService) {
- ALOGW("Codec2.0 service \"%s\" inaccessible. "
- "Check the device manifest.",
- instanceName);
+ LOG(WARNING) << "Codec2.0 service \"" << serviceName << "\""
+ " inaccessible. Check the device manifest.";
} else {
- ALOGD("Codec2.0 service \"%s\" unavailable right now. "
- "Try again later.",
- instanceName);
+ LOG(DEBUG) << "Codec2.0 service \"" << serviceName << "\""
+ " unavailable at the moment. "
+ " Wait or check the device manifest.";
}
return nullptr;
}
- return std::make_shared<Codec2Client>(baseStore, instanceName);
+ return std::make_shared<Codec2Client>(baseStore, serviceName);
}
c2_status_t Codec2Client::ForAllStores(
const std::string &key,
- std::function<c2_status_t(const std::shared_ptr<Codec2Client>&)> predicate) {
+ std::function<c2_status_t(const std::shared_ptr<Codec2Client>&)>
+ predicate) {
c2_status_t status = C2_NO_INIT; // no IComponentStores present
// Cache the mapping key -> index of Codec2Client in getClient().
static std::mutex key2IndexMutex;
static std::map<std::string, size_t> key2Index;
- // By default try all stores. However, try the last known client first. If the last known
- // client fails, retry once. We do this by pushing the last known client in front of the
- // list of all clients.
+ // By default try all stores. However, try the last known client first. If
+ // the last known client fails, retry once. We do this by pushing the last
+ // known client in front of the list of all clients.
std::deque<size_t> indices;
for (size_t index = kNumClients; index > 0; ) {
indices.push_front(--index);
@@ -688,7 +700,8 @@
}
}
if (wasMapped) {
- ALOGI("Could not find '%s' in last instance. Retrying...", key.c_str());
+ LOG(INFO) << "Could not find \"" << key << "\""
+ " in the last instance. Retrying...";
wasMapped = false;
}
}
@@ -704,20 +717,27 @@
c2_status_t status = ForAllStores(
componentName,
[owner, &component, componentName, &listener](
- const std::shared_ptr<Codec2Client> &client) -> c2_status_t {
- c2_status_t status = client->createComponent(componentName, listener, &component);
+ const std::shared_ptr<Codec2Client> &client)
+ -> c2_status_t {
+ c2_status_t status = client->createComponent(componentName,
+ listener,
+ &component);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
} else if (status != C2_NOT_FOUND) {
- ALOGD("IComponentStore(%s)::createComponent('%s') returned %s",
- client->getInstanceName().c_str(), componentName, asString(status));
+ LOG(DEBUG) << "IComponentStore("
+ << client->getServiceName()
+ << ")::createComponent(\"" << componentName
+ << "\") returned status = "
+ << status << ".";
}
return status;
});
if (status != C2_OK) {
- ALOGI("Could not create component '%s' (%s)", componentName, asString(status));
+ LOG(DEBUG) << "Could not create component \"" << componentName << "\". "
+ "Status = " << status << ".";
}
return component;
}
@@ -730,20 +750,26 @@
c2_status_t status = ForAllStores(
interfaceName,
[owner, &interface, interfaceName](
- const std::shared_ptr<Codec2Client> &client) -> c2_status_t {
- c2_status_t status = client->createInterface(interfaceName, &interface);
+ const std::shared_ptr<Codec2Client> &client)
+ -> c2_status_t {
+ c2_status_t status = client->createInterface(interfaceName,
+ &interface);
if (status == C2_OK) {
if (owner) {
*owner = client;
}
} else if (status != C2_NOT_FOUND) {
- ALOGD("IComponentStore(%s)::createInterface('%s') returned %s",
- client->getInstanceName().c_str(), interfaceName, asString(status));
+ LOG(DEBUG) << "IComponentStore("
+ << client->getServiceName()
+ << ")::createInterface(\"" << interfaceName
+ << "\") returned status = "
+ << status << ".";
}
return status;
});
if (status != C2_OK) {
- ALOGI("Could not create interface '%s' (%s)", interfaceName, asString(status));
+ LOG(DEBUG) << "Could not create interface \"" << interfaceName << "\". "
+ "Status = " << status << ".";
}
return interface;
}
@@ -762,7 +788,8 @@
}
}
}
- ALOGW("Could not create an input surface from any Codec2.0 services.");
+ LOG(INFO) << "Could not create an input surface "
+ "from any Codec2.0 services.";
return nullptr;
}
@@ -798,15 +825,39 @@
Codec2Client::Listener::~Listener() {
}
-// Codec2Client::Component
-
-Codec2Client::Component::Base* Codec2Client::Component::base() const {
- return static_cast<Base*>(mBase.get());
+// Codec2Client::Interface
+Codec2Client::Interface::Interface(const sp<Base>& base)
+ : Configurable{
+ [base]() -> sp<IConfigurable> {
+ Return<sp<IConfigurable>> transResult =
+ base->getConfigurable();
+ return transResult.isOk() ?
+ static_cast<sp<IConfigurable>>(transResult) :
+ nullptr;
+ }()
+ },
+ mBase{base} {
}
-Codec2Client::Component::Component(const sp<Codec2Client::Component::Base>& base) :
- Codec2Client::Configurable(base),
- mBufferPoolSender(nullptr) {
+// Codec2Client::Component
+Codec2Client::Component::Component(const sp<Base>& base)
+ : Configurable{
+ [base]() -> sp<IConfigurable> {
+ Return<sp<IComponentInterface>> transResult1 =
+ base->getInterface();
+ if (!transResult1.isOk()) {
+ return nullptr;
+ }
+ Return<sp<IConfigurable>> transResult2 =
+ static_cast<sp<IComponentInterface>>(transResult1)->
+ getConfigurable();
+ return transResult2.isOk() ?
+ static_cast<sp<IConfigurable>>(transResult2) :
+ nullptr;
+ }()
+ },
+ mBase{base},
+ mBufferPoolSender{nullptr} {
}
Codec2Client::Component::~Component() {
@@ -817,7 +868,7 @@
C2BlockPool::local_id_t* blockPoolId,
std::shared_ptr<Codec2Client::Configurable>* configurable) {
c2_status_t status;
- Return<void> transStatus = base()->createBlockPool(
+ Return<void> transStatus = mBase->createBlockPool(
static_cast<uint32_t>(id),
[&status, blockPoolId, configurable](
Status s,
@@ -826,15 +877,15 @@
status = static_cast<c2_status_t>(s);
configurable->reset();
if (status != C2_OK) {
- ALOGE("createBlockPool -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "createBlockPool -- call failed: "
+ << status << ".";
return;
}
*blockPoolId = static_cast<C2BlockPool::local_id_t>(pId);
- *configurable = std::make_shared<Codec2Client::Configurable>(c);
+ *configurable = std::make_shared<Configurable>(c);
});
if (!transStatus.isOk()) {
- ALOGE("createBlockPool -- transaction failed.");
+ LOG(ERROR) << "createBlockPool -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return status;
@@ -842,50 +893,17 @@
c2_status_t Codec2Client::Component::destroyBlockPool(
C2BlockPool::local_id_t localId) {
- Return<Status> transResult = base()->destroyBlockPool(
+ Return<Status> transResult = mBase->destroyBlockPool(
static_cast<uint64_t>(localId));
if (!transResult.isOk()) {
- ALOGE("destroyBlockPool -- transaction failed.");
+ LOG(ERROR) << "destroyBlockPool -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
return static_cast<c2_status_t>(static_cast<Status>(transResult));
}
-size_t Codec2Client::Component::handleOnWorkDone(
+void Codec2Client::Component::handleOnWorkDone(
const std::list<std::unique_ptr<C2Work>> &workItems) {
- // Input buffers' lifetime management
- std::vector<uint64_t> inputDone;
- for (const std::unique_ptr<C2Work> &work : workItems) {
- if (work) {
- if (work->worklets.empty()
- || !work->worklets.back()
- || (work->worklets.back()->output.flags & C2FrameData::FLAG_INCOMPLETE) == 0) {
- // input is complete
- inputDone.emplace_back(work->input.ordinal.frameIndex.peeku());
- }
- }
- }
-
- size_t numDiscardedInputBuffers = 0;
- {
- std::lock_guard<std::mutex> lock(mInputBuffersMutex);
- for (uint64_t inputIndex : inputDone) {
- auto it = mInputBuffers.find(inputIndex);
- if (it == mInputBuffers.end()) {
- ALOGV("onWorkDone -- returned consumed/unknown "
- "input frame: index %llu",
- (long long)inputIndex);
- } else {
- ALOGV("onWorkDone -- processed input frame: "
- "index %llu (containing %zu buffers)",
- (long long)inputIndex, it->second.size());
- mInputBuffers.erase(it);
- mInputBufferCount.erase(inputIndex);
- ++numDiscardedInputBuffers;
- }
- }
- }
-
// Output bufferqueue-based blocks' lifetime management
mOutputBufferQueueMutex.lock();
sp<IGraphicBufferProducer> igbp = mOutputIgbp;
@@ -896,86 +914,24 @@
if (igbp) {
holdBufferQueueBlocks(workItems, igbp, bqId, generation);
}
- return numDiscardedInputBuffers;
-}
-
-std::shared_ptr<C2Buffer> Codec2Client::Component::freeInputBuffer(
- uint64_t frameIndex,
- size_t bufferIndex) {
- std::shared_ptr<C2Buffer> buffer;
- std::lock_guard<std::mutex> lock(mInputBuffersMutex);
- auto it = mInputBuffers.find(frameIndex);
- if (it == mInputBuffers.end()) {
- ALOGI("freeInputBuffer -- Unrecognized input frame index %llu.",
- static_cast<long long unsigned>(frameIndex));
- return nullptr;
- }
- if (bufferIndex >= it->second.size()) {
- ALOGI("freeInputBuffer -- Input buffer no. %zu is invalid in "
- "input frame index %llu.",
- bufferIndex, static_cast<long long unsigned>(frameIndex));
- return nullptr;
- }
- buffer = it->second[bufferIndex];
- if (!buffer) {
- ALOGI("freeInputBuffer -- Input buffer no. %zu in "
- "input frame index %llu has already been freed.",
- bufferIndex, static_cast<long long unsigned>(frameIndex));
- return nullptr;
- }
- it->second[bufferIndex] = nullptr;
- if (--mInputBufferCount[frameIndex] == 0) {
- mInputBuffers.erase(it);
- mInputBufferCount.erase(frameIndex);
- }
- return buffer;
}
c2_status_t Codec2Client::Component::queue(
std::list<std::unique_ptr<C2Work>>* const items) {
- // remember input buffers queued to hold reference to them
- {
- std::lock_guard<std::mutex> lock(mInputBuffersMutex);
- for (const std::unique_ptr<C2Work> &work : *items) {
- if (!work) {
- continue;
- }
- if (work->input.buffers.size() == 0) {
- continue;
- }
-
- uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
- auto res = mInputBuffers.emplace(inputIndex, work->input.buffers);
- if (!res.second) {
- // TODO: append? - for now we are replacing
- res.first->second = work->input.buffers;
- ALOGI("queue -- duplicate input frame: index %llu. "
- "Discarding the old input frame...",
- (long long)inputIndex);
- }
- mInputBufferCount[inputIndex] = work->input.buffers.size();
- ALOGV("queue -- queueing input frame: "
- "index %llu (containing %zu buffers)",
- (long long)inputIndex, work->input.buffers.size());
- }
- }
-
WorkBundle workBundle;
- Status hidlStatus = objcpy(&workBundle, *items, &mBufferPoolSender);
- if (hidlStatus != Status::OK) {
- ALOGE("queue -- bad input.");
+ if (!objcpy(&workBundle, *items, &mBufferPoolSender)) {
+ LOG(ERROR) << "queue -- bad input.";
return C2_TRANSACTION_FAILED;
}
- Return<Status> transStatus = base()->queue(workBundle);
+ Return<Status> transStatus = mBase->queue(workBundle);
if (!transStatus.isOk()) {
- ALOGE("queue -- transaction failed.");
+ LOG(ERROR) << "queue -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("queue -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "queue -- call failed: " << status << ".";
}
return status;
}
@@ -985,19 +941,22 @@
std::list<std::unique_ptr<C2Work>>* const flushedWork) {
(void)mode; // Flush mode isn't supported in HIDL yet.
c2_status_t status;
- Return<void> transStatus = base()->flush(
+ Return<void> transStatus = mBase->flush(
[&status, flushedWork](
Status s, const WorkBundle& wb) {
status = static_cast<c2_status_t>(s);
if (status != C2_OK) {
- ALOGE("flush -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "flush -- call failed: " << status << ".";
return;
}
- status = objcpy(flushedWork, wb);
+ if (!objcpy(flushedWork, wb)) {
+ status = C2_CORRUPTED;
+ } else {
+ status = C2_OK;
+ }
});
if (!transStatus.isOk()) {
- ALOGE("flush -- transaction failed.");
+ LOG(ERROR) << "flush -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
@@ -1016,23 +975,6 @@
}
}
- // Input buffers' lifetime management
- for (uint64_t flushedIndex : flushedIndices) {
- std::lock_guard<std::mutex> lock(mInputBuffersMutex);
- auto it = mInputBuffers.find(flushedIndex);
- if (it == mInputBuffers.end()) {
- ALOGV("flush -- returned consumed/unknown input frame: "
- "index %llu",
- (long long)flushedIndex);
- } else {
- ALOGV("flush -- returned unprocessed input frame: "
- "index %llu (containing %zu buffers)",
- (long long)flushedIndex, mInputBufferCount[flushedIndex]);
- mInputBuffers.erase(it);
- mInputBufferCount.erase(flushedIndex);
- }
- }
-
// Output bufferqueue-based blocks' lifetime management
mOutputBufferQueueMutex.lock();
sp<IGraphicBufferProducer> igbp = mOutputIgbp;
@@ -1048,90 +990,73 @@
}
c2_status_t Codec2Client::Component::drain(C2Component::drain_mode_t mode) {
- Return<Status> transStatus = base()->drain(
+ Return<Status> transStatus = mBase->drain(
mode == C2Component::DRAIN_COMPONENT_WITH_EOS);
if (!transStatus.isOk()) {
- ALOGE("drain -- transaction failed.");
+ LOG(ERROR) << "drain -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("drain -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "drain -- call failed: " << status << ".";
}
return status;
}
c2_status_t Codec2Client::Component::start() {
- Return<Status> transStatus = base()->start();
+ Return<Status> transStatus = mBase->start();
if (!transStatus.isOk()) {
- ALOGE("start -- transaction failed.");
+ LOG(ERROR) << "start -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("start -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "start -- call failed: " << status << ".";
}
return status;
}
c2_status_t Codec2Client::Component::stop() {
- Return<Status> transStatus = base()->stop();
+ Return<Status> transStatus = mBase->stop();
if (!transStatus.isOk()) {
- ALOGE("stop -- transaction failed.");
+ LOG(ERROR) << "stop -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("stop -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "stop -- call failed: " << status << ".";
}
- mInputBuffersMutex.lock();
- mInputBuffers.clear();
- mInputBufferCount.clear();
- mInputBuffersMutex.unlock();
return status;
}
c2_status_t Codec2Client::Component::reset() {
- Return<Status> transStatus = base()->reset();
+ Return<Status> transStatus = mBase->reset();
if (!transStatus.isOk()) {
- ALOGE("reset -- transaction failed.");
+ LOG(ERROR) << "reset -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("reset -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "reset -- call failed: " << status << ".";
}
- mInputBuffersMutex.lock();
- mInputBuffers.clear();
- mInputBufferCount.clear();
- mInputBuffersMutex.unlock();
return status;
}
c2_status_t Codec2Client::Component::release() {
- Return<Status> transStatus = base()->release();
+ Return<Status> transStatus = mBase->release();
if (!transStatus.isOk()) {
- ALOGE("release -- transaction failed.");
+ LOG(ERROR) << "release -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("release -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "release -- call failed: " << status << ".";
}
- mInputBuffersMutex.lock();
- mInputBuffers.clear();
- mInputBufferCount.clear();
- mInputBuffersMutex.unlock();
return status;
}
@@ -1139,22 +1064,23 @@
C2BlockPool::local_id_t blockPoolId,
const sp<IGraphicBufferProducer>& surface,
uint32_t generation) {
- sp<HGraphicBufferProducer> igbp = surface->getHalInterface();
+ sp<HGraphicBufferProducer> igbp =
+ surface->getHalInterface<HGraphicBufferProducer>();
+
if (!igbp) {
igbp = new TWGraphicBufferProducer<HGraphicBufferProducer>(surface);
}
- Return<Status> transStatus = base()->setOutputSurface(
+ Return<Status> transStatus = mBase->setOutputSurface(
static_cast<uint64_t>(blockPoolId), igbp);
if (!transStatus.isOk()) {
- ALOGE("setOutputSurface -- transaction failed.");
+ LOG(ERROR) << "setOutputSurface -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("setOutputSurface -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "setOutputSurface -- call failed: " << status << ".";
} else {
std::lock_guard<std::mutex> lock(mOutputBufferQueueMutex);
if (mOutputIgbp != surface) {
@@ -1162,7 +1088,8 @@
if (!surface) {
mOutputBqId = 0;
} else if (surface->getUniqueId(&mOutputBqId) != OK) {
- ALOGE("setOutputSurface -- cannot obtain bufferqueue id.");
+ LOG(ERROR) << "setOutputSurface -- "
+ "cannot obtain bufferqueue id.";
}
}
mOutputGeneration = generation;
@@ -1186,22 +1113,21 @@
uint32_t outputGeneration = mOutputGeneration;
mOutputBufferQueueMutex.unlock();
- status_t status = !attachToBufferQueue(block,
+ status_t status = attachToBufferQueue(block,
outputIgbp,
outputGeneration,
&bqSlot);
if (status != OK) {
- ALOGW("queueToOutputSurface -- attaching failed.");
+ LOG(WARNING) << "queueToOutputSurface -- attaching failed.";
return INVALID_OPERATION;
}
status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
input, output);
if (status != OK) {
- ALOGE("queueToOutputSurface -- queueBuffer() failed "
- "on non-bufferqueue-based block. "
- "Error code = %d.",
- static_cast<int>(status));
+ LOG(ERROR) << "queueToOutputSurface -- queueBuffer() failed "
+ "on non-bufferqueue-based block. "
+ "Error = " << status << ".";
return status;
}
return OK;
@@ -1214,66 +1140,95 @@
mOutputBufferQueueMutex.unlock();
if (!outputIgbp) {
- ALOGV("queueToOutputSurface -- output surface is null.");
+ LOG(VERBOSE) << "queueToOutputSurface -- output surface is null.";
return NO_INIT;
}
- if (bqId != outputBqId) {
- ALOGV("queueToOutputSurface -- bufferqueue ids mismatch.");
- return DEAD_OBJECT;
- }
-
- if (generation != outputGeneration) {
- ALOGV("queueToOutputSurface -- generation numbers mismatch.");
- return DEAD_OBJECT;
+ if (bqId != outputBqId || generation != outputGeneration) {
+ if (!holdBufferQueueBlock(block, mOutputIgbp, mOutputBqId, mOutputGeneration)) {
+ LOG(ERROR) << "queueToOutputSurface -- migration failed.";
+ return DEAD_OBJECT;
+ }
+ if (!getBufferQueueAssignment(block, &generation, &bqId, &bqSlot)) {
+ LOG(ERROR) << "queueToOutputSurface -- corrupted bufferqueue assignment.";
+ return UNKNOWN_ERROR;
+ }
}
status_t status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
input, output);
if (status != OK) {
- ALOGD("queueToOutputSurface -- queueBuffer() failed "
- "on bufferqueue-based block. "
- "Error code = %d.",
- static_cast<int>(status));
+ LOG(DEBUG) << "queueToOutputSurface -- queueBuffer() failed "
+ "on bufferqueue-based block. "
+ "Error = " << status << ".";
return status;
}
if (!yieldBufferQueueBlock(block)) {
- ALOGD("queueToOutputSurface -- cannot yield bufferqueue-based block "
- "to the bufferqueue.");
+ LOG(DEBUG) << "queueToOutputSurface -- cannot yield "
+ "bufferqueue-based block to the bufferqueue.";
return UNKNOWN_ERROR;
}
return OK;
}
-c2_status_t Codec2Client::Component::connectToOmxInputSurface(
- const sp<HGraphicBufferProducer>& producer,
- const sp<HGraphicBufferSource>& source) {
- Return<Status> transStatus = base()->connectToOmxInputSurface(
- producer, source);
+c2_status_t Codec2Client::Component::connectToInputSurface(
+ const std::shared_ptr<InputSurface>& inputSurface,
+ std::shared_ptr<InputSurfaceConnection>* connection) {
+ c2_status_t status;
+ Return<void> transStatus = mBase->connectToInputSurface(
+ inputSurface->mBase,
+ [&status, connection](
+ Status s, const sp<IInputSurfaceConnection>& c) {
+ status = static_cast<c2_status_t>(s);
+ if (status != C2_OK) {
+ LOG(DEBUG) << "connectToInputSurface -- call failed: "
+ << status << ".";
+ return;
+ }
+ *connection = std::make_shared<InputSurfaceConnection>(c);
+ });
if (!transStatus.isOk()) {
- ALOGE("connectToOmxInputSurface -- transaction failed.");
+ LOG(ERROR) << "connectToInputSurface -- transaction failed";
return C2_TRANSACTION_FAILED;
}
- c2_status_t status =
- static_cast<c2_status_t>(static_cast<Status>(transStatus));
- if (status != C2_OK) {
- ALOGE("connectToOmxInputSurface -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ return status;
+}
+
+c2_status_t Codec2Client::Component::connectToOmxInputSurface(
+ const sp<HGraphicBufferProducer>& producer,
+ const sp<HGraphicBufferSource>& source,
+ std::shared_ptr<InputSurfaceConnection>* connection) {
+ c2_status_t status;
+ Return<void> transStatus = mBase->connectToOmxInputSurface(
+ producer, source,
+ [&status, connection](
+ Status s, const sp<IInputSurfaceConnection>& c) {
+ status = static_cast<c2_status_t>(s);
+ if (status != C2_OK) {
+ LOG(DEBUG) << "connectToOmxInputSurface -- call failed: "
+ << status << ".";
+ return;
+ }
+ *connection = std::make_shared<InputSurfaceConnection>(c);
+ });
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "connectToOmxInputSurface -- transaction failed.";
+ return C2_TRANSACTION_FAILED;
}
return status;
}
c2_status_t Codec2Client::Component::disconnectFromInputSurface() {
- Return<Status> transStatus = base()->disconnectFromInputSurface();
+ Return<Status> transStatus = mBase->disconnectFromInputSurface();
if (!transStatus.isOk()) {
- ALOGE("disconnectToInputSurface -- transaction failed.");
+ LOG(ERROR) << "disconnectToInputSurface -- transaction failed.";
return C2_TRANSACTION_FAILED;
}
c2_status_t status =
static_cast<c2_status_t>(static_cast<Status>(transStatus));
if (status != C2_OK) {
- ALOGE("disconnectFromInputSurface -- call failed. "
- "Error code = %d", static_cast<int>(status));
+ LOG(DEBUG) << "disconnectFromInputSurface -- call failed: "
+ << status << ".";
}
return status;
}
@@ -1293,7 +1248,7 @@
if (std::shared_ptr<Codec2Client::Listener> listener = base.lock()) {
listener->onDeath(component);
} else {
- ALOGW("onDeath -- listener died.");
+ LOG(DEBUG) << "onDeath -- listener died.";
}
}
};
@@ -1303,93 +1258,68 @@
deathRecipient->component = component;
component->mDeathRecipient = deathRecipient;
- Return<bool> transResult = component->base()->linkToDeath(
+ Return<bool> transResult = component->mBase->linkToDeath(
component->mDeathRecipient, 0);
if (!transResult.isOk()) {
- ALOGE("setDeathListener -- failed transaction: linkToDeath.");
+ LOG(ERROR) << "setDeathListener -- linkToDeath() transaction failed.";
return C2_TRANSACTION_FAILED;
}
if (!static_cast<bool>(transResult)) {
- ALOGE("setDeathListener -- linkToDeath call failed.");
+ LOG(DEBUG) << "setDeathListener -- linkToDeath() call failed.";
return C2_CORRUPTED;
}
return C2_OK;
}
// Codec2Client::InputSurface
-
-Codec2Client::InputSurface::Base* Codec2Client::InputSurface::base() const {
- return static_cast<Base*>(mBase.get());
-}
-
-Codec2Client::InputSurface::InputSurface(const sp<IInputSurface>& base) :
- mBase(base),
- mGraphicBufferProducer(new
+Codec2Client::InputSurface::InputSurface(const sp<IInputSurface>& base)
+ : Configurable{
+ [base]() -> sp<IConfigurable> {
+ Return<sp<IConfigurable>> transResult =
+ base->getConfigurable();
+ return transResult.isOk() ?
+ static_cast<sp<IConfigurable>>(transResult) :
+ nullptr;
+ }()
+ },
+ mBase{base},
+ mGraphicBufferProducer{new
::android::hardware::graphics::bufferqueue::V1_0::utils::
- H2BGraphicBufferProducer(base)) {
+ H2BGraphicBufferProducer([base]() -> sp<HGraphicBufferProducer> {
+ Return<sp<HGraphicBufferProducer>> transResult =
+ base->getGraphicBufferProducer();
+ return transResult.isOk() ?
+ static_cast<sp<HGraphicBufferProducer>>(transResult) :
+ nullptr;
+ }())} {
}
-c2_status_t Codec2Client::InputSurface::connectToComponent(
- const std::shared_ptr<Codec2Client::Component>& component,
- std::shared_ptr<Connection>* connection) {
- c2_status_t status;
- Return<void> transStatus = base()->connectToComponent(
- component->base(),
- [&status, connection](
- Status s,
- const sp<IInputSurfaceConnection>& c) {
- status = static_cast<c2_status_t>(s);
- if (status != C2_OK) {
- ALOGE("connectToComponent -- call failed. "
- "Error code = %d", static_cast<int>(status));
- return;
- }
- *connection = std::make_shared<Connection>(c);
- });
- if (!transStatus.isOk()) {
- ALOGE("connect -- transaction failed.");
- return C2_TRANSACTION_FAILED;
- }
- return status;
-}
-
-std::shared_ptr<Codec2Client::Configurable>
- Codec2Client::InputSurface::getConfigurable() const {
- Return<sp<IConfigurable>> transResult = base()->getConfigurable();
- if (!transResult.isOk()) {
- ALOGW("getConfigurable -- transaction failed.");
- return nullptr;
- }
- if (!static_cast<sp<IConfigurable>>(transResult)) {
- ALOGW("getConfigurable -- null pointer.");
- return nullptr;
- }
- return std::make_shared<Configurable>(transResult);
-}
-
-const sp<IGraphicBufferProducer>&
+sp<IGraphicBufferProducer>
Codec2Client::InputSurface::getGraphicBufferProducer() const {
return mGraphicBufferProducer;
}
-const sp<IInputSurface>& Codec2Client::InputSurface::getHalInterface() const {
+sp<IInputSurface> Codec2Client::InputSurface::getHalInterface() const {
return mBase;
}
// Codec2Client::InputSurfaceConnection
-
-Codec2Client::InputSurfaceConnection::Base*
- Codec2Client::InputSurfaceConnection::base() const {
- return static_cast<Base*>(mBase.get());
-}
-
Codec2Client::InputSurfaceConnection::InputSurfaceConnection(
- const sp<Codec2Client::InputSurfaceConnection::Base>& base) :
- mBase(base) {
+ const sp<IInputSurfaceConnection>& base)
+ : Configurable{
+ [base]() -> sp<IConfigurable> {
+ Return<sp<IConfigurable>> transResult =
+ base->getConfigurable();
+ return transResult.isOk() ?
+ static_cast<sp<IConfigurable>>(transResult) :
+ nullptr;
+ }()
+ },
+ mBase{base} {
}
c2_status_t Codec2Client::InputSurfaceConnection::disconnect() {
- Return<Status> transResult = base()->disconnect();
+ Return<Status> transResult = mBase->disconnect();
return static_cast<c2_status_t>(static_cast<Status>(transResult));
}
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index c48bf0c..478ce6e 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef CODEC2_HIDL_CLIENT_H_
-#define CODEC2_HIDL_CLIENT_H_
+#ifndef CODEC2_HIDL_CLIENT_H
+#define CODEC2_HIDL_CLIENT_H
#include <gui/IGraphicBufferProducer.h>
#include <codec2/hidl/1.0/types.h>
@@ -70,8 +70,8 @@
namespace c2 {
namespace V1_0 {
struct IConfigurable;
-struct IComponentInterface;
struct IComponent;
+struct IComponentInterface;
struct IComponentStore;
struct IInputSurface;
struct IInputSurfaceConnection;
@@ -146,10 +146,8 @@
Codec2ConfigurableClient(const sp<Base>& base);
protected:
- C2String mName;
sp<Base> mBase;
-
- Base* base() const;
+ C2String mName;
friend struct Codec2Client;
};
@@ -162,17 +160,17 @@
typedef Codec2ConfigurableClient Configurable;
- typedef Configurable Interface; // These two types may diverge in the future.
-
struct Component;
+ struct Interface;
+
struct InputSurface;
struct InputSurfaceConnection;
typedef Codec2Client Store;
- std::string getInstanceName() const { return mInstanceName; }
+ std::string getServiceName() const { return mServiceName; }
c2_status_t createComponent(
const C2String& name,
@@ -195,7 +193,7 @@
std::shared_ptr<C2ParamReflector> getParamReflector();
static std::shared_ptr<Codec2Client> CreateFromService(
- const char* instanceName,
+ const char* serviceName,
bool waitForService = true);
// Try to create a component with a given name from all known
@@ -218,10 +216,10 @@
static std::shared_ptr<InputSurface> CreateInputSurface();
// base cannot be null.
- Codec2Client(const sp<Base>& base, std::string instanceName);
+ Codec2Client(const sp<Base>& base, std::string serviceName);
protected:
- Base* base() const;
+ sp<Base> mBase;
// Finds the first store where the predicate returns OK, and returns the last
// predicate result. Uses key to remember the last store found, and if cached,
@@ -232,28 +230,29 @@
mutable std::mutex mMutex;
mutable bool mListed;
- std::string mInstanceName;
+ std::string mServiceName;
mutable std::vector<C2Component::Traits> mTraitsList;
- mutable std::vector<std::unique_ptr<std::vector<std::string>>>
- mAliasesBuffer;
sp<::android::hardware::media::bufferpool::V2_0::IClientManager>
mHostPoolManager;
};
+struct Codec2Client::Interface : public Codec2Client::Configurable {
+
+ typedef ::android::hardware::media::c2::V1_0::IComponentInterface Base;
+
+ Interface(const sp<Base>& base);
+
+protected:
+ sp<Base> mBase;
+};
+
struct Codec2Client::Listener {
// This is called when the component produces some output.
- //
- // numDiscardedInputBuffers is the number of input buffers contained in
- // workItems that have just become unused. Note that workItems may contain
- // more input buffers than numDiscardedInputBuffers because buffers that
- // have been previously reported by onInputBufferDone() are not counted
- // towards numDiscardedInputBuffers, but may still show up in workItems.
virtual void onWorkDone(
const std::weak_ptr<Component>& comp,
- std::list<std::unique_ptr<C2Work>>& workItems,
- size_t numDiscardedInputBuffers) = 0;
+ std::list<std::unique_ptr<C2Work>>& workItems) = 0;
// This is called when the component goes into a tripped state.
virtual void onTripped(
@@ -275,30 +274,14 @@
// Input buffers that have been returned by onWorkDone() or flush() will not
// trigger a call to this function.
virtual void onInputBufferDone(
- const std::shared_ptr<C2Buffer>& buffer) = 0;
+ uint64_t frameIndex, size_t arrayIndex) = 0;
- // This structure is used for transporting onFramesRendered() event to the
- // client in the case where the output buffers are obtained from a
- // bufferqueue.
- struct RenderedFrame {
- // The id of the bufferqueue.
- uint64_t bufferQueueId;
- // The slot of the buffer inside the bufferqueue.
- int32_t slotId;
- // The timestamp.
- int64_t timestampNs;
-
- RenderedFrame(uint64_t bufferQueueId, int32_t slotId,
- int64_t timestampNs)
- : bufferQueueId(bufferQueueId),
- slotId(slotId),
- timestampNs(timestampNs) {}
- RenderedFrame(const RenderedFrame&) = default;
- };
-
- // This is called when the component becomes aware of frames being rendered.
- virtual void onFramesRendered(
- const std::vector<RenderedFrame>& renderedFrames) = 0;
+ // This is called when the component becomes aware of a frame being
+ // rendered.
+ virtual void onFrameRendered(
+ uint64_t bufferQueueId,
+ int32_t slotId,
+ int64_t timestampNs) = 0;
virtual ~Listener();
@@ -373,9 +356,15 @@
const QueueBufferInput& input,
QueueBufferOutput* output);
+ // Connect to a given InputSurface.
+ c2_status_t connectToInputSurface(
+ const std::shared_ptr<InputSurface>& inputSurface,
+ std::shared_ptr<InputSurfaceConnection>* connection);
+
c2_status_t connectToOmxInputSurface(
const sp<HGraphicBufferProducer>& producer,
- const sp<HGraphicBufferSource>& source);
+ const sp<HGraphicBufferSource>& source,
+ std::shared_ptr<InputSurfaceConnection>* connection);
c2_status_t disconnectFromInputSurface();
@@ -385,25 +374,7 @@
~Component();
protected:
- Base* base() const;
-
- // Mutex for mInputBuffers and mInputBufferCount.
- mutable std::mutex mInputBuffersMutex;
-
- // Map: frameIndex -> vector of bufferIndices
- //
- // mInputBuffers[frameIndex][bufferIndex] may be null if the buffer in that
- // slot has been freed.
- mutable std::map<uint64_t, std::vector<std::shared_ptr<C2Buffer>>>
- mInputBuffers;
-
- // Map: frameIndex -> number of bufferIndices that have not been freed
- //
- // mInputBufferCount[frameIndex] keeps track of the number of non-null
- // elements in mInputBuffers[frameIndex]. When mInputBufferCount[frameIndex]
- // decreases to 0, frameIndex can be removed from both mInputBuffers and
- // mInputBufferCount.
- mutable std::map<uint64_t, size_t> mInputBufferCount;
+ sp<Base> mBase;
::android::hardware::media::c2::V1_0::utils::DefaultBufferPoolSender
mBufferPoolSender;
@@ -421,14 +392,11 @@
friend struct Codec2Client;
struct HidlListener;
- // Return the number of input buffers that should be discarded.
- size_t handleOnWorkDone(const std::list<std::unique_ptr<C2Work>> &workItems);
- // Remove an input buffer from mInputBuffers and return it.
- std::shared_ptr<C2Buffer> freeInputBuffer(uint64_t frameIndex, size_t bufferIndex);
+ void handleOnWorkDone(const std::list<std::unique_ptr<C2Work>> &workItems);
};
-struct Codec2Client::InputSurface {
+struct Codec2Client::InputSurface : public Codec2Client::Configurable {
public:
typedef ::android::hardware::media::c2::V1_0::IInputSurface Base;
@@ -439,22 +407,15 @@
typedef ::android::IGraphicBufferProducer IGraphicBufferProducer;
- c2_status_t connectToComponent(
- const std::shared_ptr<Component>& component,
- std::shared_ptr<Connection>* connection);
-
- std::shared_ptr<Configurable> getConfigurable() const;
-
- const sp<IGraphicBufferProducer>& getGraphicBufferProducer() const;
+ sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
// Return the underlying IInputSurface.
- const sp<Base>& getHalInterface() const;
+ sp<Base> getHalInterface() const;
// base cannot be null.
InputSurface(const sp<Base>& base);
protected:
- Base* base() const;
sp<Base> mBase;
sp<IGraphicBufferProducer> mGraphicBufferProducer;
@@ -463,7 +424,7 @@
friend struct Component;
};
-struct Codec2Client::InputSurfaceConnection {
+struct Codec2Client::InputSurfaceConnection : public Codec2Client::Configurable {
typedef ::android::hardware::media::c2::V1_0::IInputSurfaceConnection Base;
@@ -473,7 +434,6 @@
InputSurfaceConnection(const sp<Base>& base);
protected:
- Base* base() const;
sp<Base> mBase;
friend struct Codec2Client::InputSurface;
@@ -481,5 +441,5 @@
} // namespace android
-#endif // CODEC2_HIDL_CLIENT_H_
+#endif // CODEC2_HIDL_CLIENT_H
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 2870d39..a212651 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -8,6 +8,7 @@
"CCodecConfig.cpp",
"Codec2Buffer.cpp",
"Codec2InfoBuilder.cpp",
+ "PipelineWatcher.cpp",
"ReflectedParamUpdater.cpp",
"SkipCutBuffer.cpp",
],
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index 749fd7a..03d859a 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -226,7 +226,7 @@
&& omxBuf.mGraphicBuffer != nullptr) {
std::shared_ptr<C2GraphicAllocation> alloc;
handle = WrapNativeCodec2GrallocHandle(
- native_handle_clone(omxBuf.mGraphicBuffer->handle),
+ omxBuf.mGraphicBuffer->handle,
omxBuf.mGraphicBuffer->width,
omxBuf.mGraphicBuffer->height,
omxBuf.mGraphicBuffer->format,
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 852d6d6..2d10c67 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -143,8 +143,7 @@
if (mConnection != nullptr) {
return ALREADY_EXISTS;
}
- return toStatusT(mSurface->connectToComponent(comp, &mConnection),
- C2_OPERATION_InputSurface_connectToComponent);
+ return toStatusT(comp->connectToInputSurface(mSurface, &mConnection));
}
void disconnect() override {
@@ -162,7 +161,7 @@
status_t signalEndOfInputStream() override {
C2InputSurfaceEosTuning eos(true);
std::vector<std::unique_ptr<C2SettingResult>> failures;
- c2_status_t err = mSurface->getConfigurable()->config({&eos}, C2_MAY_BLOCK, &failures);
+ c2_status_t err = mSurface->config({&eos}, C2_MAY_BLOCK, &failures);
if (err != C2_OK) {
return UNKNOWN_ERROR;
}
@@ -449,14 +448,13 @@
virtual void onWorkDone(
const std::weak_ptr<Codec2Client::Component>& component,
- std::list<std::unique_ptr<C2Work>>& workItems,
- size_t numDiscardedInputBuffers) override {
+ std::list<std::unique_ptr<C2Work>>& workItems) override {
(void)component;
sp<CCodec> codec(mCodec.promote());
if (!codec) {
return;
}
- codec->onWorkDone(workItems, numDiscardedInputBuffers);
+ codec->onWorkDone(workItems);
}
virtual void onTripped(
@@ -495,17 +493,20 @@
codec->mCallback->onError(DEAD_OBJECT, ACTION_CODE_FATAL);
}
- virtual void onFramesRendered(
- const std::vector<RenderedFrame>& renderedFrames) override {
- // TODO
- (void)renderedFrames;
+ virtual void onFrameRendered(uint64_t bufferQueueId,
+ int32_t slotId,
+ int64_t timestampNs) override {
+ // TODO: implement
+ (void)bufferQueueId;
+ (void)slotId;
+ (void)timestampNs;
}
virtual void onInputBufferDone(
- const std::shared_ptr<C2Buffer>& buffer) override {
+ uint64_t frameIndex, size_t arrayIndex) override {
sp<CCodec> codec(mCodec.promote());
if (codec) {
- codec->onInputBufferDone(buffer);
+ codec->onInputBufferDone(frameIndex, arrayIndex);
}
}
@@ -529,10 +530,6 @@
{RenderedFrameInfo(mediaTimeUs, renderTimeNs)});
}
- void onWorkQueued(bool eos) override {
- mCodec->onWorkQueued(eos);
- }
-
void onOutputBuffersChanged() override {
mCodec->mCallback->onOutputBuffersChanged();
}
@@ -544,8 +541,7 @@
// CCodec
CCodec::CCodec()
- : mChannel(new CCodecBufferChannel(std::make_shared<CCodecCallbackImpl>(this))),
- mQueuedWorkCount(0) {
+ : mChannel(new CCodecBufferChannel(std::make_shared<CCodecCallbackImpl>(this))) {
}
CCodec::~CCodec() {
@@ -599,7 +595,7 @@
// set up preferred component store to access vendor store parameters
client = Codec2Client::CreateFromService("default", false);
if (client) {
- ALOGI("setting up '%s' as default (vendor) store", client->getInstanceName().c_str());
+ ALOGI("setting up '%s' as default (vendor) store", client->getServiceName().c_str());
SetPreferredCodec2ComponentStore(
std::make_shared<Codec2ClientInterfaceWrapper>(client));
}
@@ -713,6 +709,49 @@
Mutexed<Config>::Locked config(mConfig);
config->mUsingSurface = surface != nullptr;
+ // Enforce required parameters
+ int32_t i32;
+ float flt;
+ if (config->mDomain & Config::IS_AUDIO) {
+ if (!msg->findInt32(KEY_SAMPLE_RATE, &i32)) {
+ ALOGD("sample rate is missing, which is required for audio components.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_CHANNEL_COUNT, &i32)) {
+ ALOGD("channel count is missing, which is required for audio components.");
+ return BAD_VALUE;
+ }
+ if ((config->mDomain & Config::IS_ENCODER)
+ && !mime.equalsIgnoreCase(MEDIA_MIMETYPE_AUDIO_FLAC)
+ && !msg->findInt32(KEY_BIT_RATE, &i32)
+ && !msg->findFloat(KEY_BIT_RATE, &flt)) {
+ ALOGD("bitrate is missing, which is required for audio encoders.");
+ return BAD_VALUE;
+ }
+ }
+ if (config->mDomain & (Config::IS_IMAGE | Config::IS_VIDEO)) {
+ if (!msg->findInt32(KEY_WIDTH, &i32)) {
+ ALOGD("width is missing, which is required for image/video components.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_HEIGHT, &i32)) {
+ ALOGD("height is missing, which is required for image/video components.");
+ return BAD_VALUE;
+ }
+ if ((config->mDomain & Config::IS_ENCODER) && (config->mDomain & Config::IS_VIDEO)) {
+ if (!msg->findInt32(KEY_BIT_RATE, &i32)
+ && !msg->findFloat(KEY_BIT_RATE, &flt)) {
+ ALOGD("bitrate is missing, which is required for video encoders.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_I_FRAME_INTERVAL, &i32)
+ && !msg->findFloat(KEY_I_FRAME_INTERVAL, &flt)) {
+ ALOGD("I frame interval is missing, which is required for video encoders.");
+ return BAD_VALUE;
+ }
+ }
+ }
+
/*
* Handle input surface configuration
*/
@@ -722,13 +761,14 @@
{
config->mISConfig->mMinFps = 0;
int64_t value;
- if (msg->findInt64("repeat-previous-frame-after", &value) && value > 0) {
+ if (msg->findInt64(KEY_REPEAT_PREVIOUS_FRAME_AFTER, &value) && value > 0) {
config->mISConfig->mMinFps = 1e6 / value;
}
- (void)msg->findFloat("max-fps-to-encoder", &config->mISConfig->mMaxFps);
+ (void)msg->findFloat(
+ KEY_MAX_FPS_TO_ENCODER, &config->mISConfig->mMaxFps);
config->mISConfig->mMinAdjustedFps = 0;
config->mISConfig->mFixedAdjustedFps = 0;
- if (msg->findInt64("max-pts-gap-to-encoder", &value)) {
+ if (msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &value)) {
if (value < 0 && value >= INT32_MIN) {
config->mISConfig->mFixedAdjustedFps = -1e6 / value;
} else if (value > 0 && value <= INT32_MAX) {
@@ -749,7 +789,7 @@
config->mISConfig->mSuspended = false;
config->mISConfig->mSuspendAtUs = -1;
int32_t value;
- if (msg->findInt32("create-input-buffers-suspended", &value) && value) {
+ if (msg->findInt32(KEY_CREATE_INPUT_SURFACE_SUSPENDED, &value) && value) {
config->mISConfig->mSuspended = true;
}
}
@@ -776,8 +816,16 @@
}
std::vector<std::unique_ptr<C2Param>> configUpdate;
+ // NOTE: We used to ignore "video-bitrate" at configure; replicate
+ // the behavior here.
+ sp<AMessage> sdkParams = msg;
+ int32_t videoBitrate;
+ if (sdkParams->findInt32(PARAMETER_KEY_VIDEO_BITRATE, &videoBitrate)) {
+ sdkParams = msg->dup();
+ sdkParams->removeEntryAt(sdkParams->findEntryByName(PARAMETER_KEY_VIDEO_BITRATE));
+ }
status_t err = config->getConfigUpdateFromSdkParams(
- comp, msg, Config::IS_CONFIG, C2_DONT_BLOCK, &configUpdate);
+ comp, sdkParams, Config::IS_CONFIG, C2_DONT_BLOCK, &configUpdate);
if (err != OK) {
ALOGW("failed to convert configuration to c2 params");
}
@@ -941,6 +989,47 @@
(new AMessage(kWhatCreateInputSurface, this))->post();
}
+sp<PersistentSurface> CCodec::CreateOmxInputSurface() {
+ using namespace android::hardware::media::omx::V1_0;
+ using namespace android::hardware::media::omx::V1_0::utils;
+ using namespace android::hardware::graphics::bufferqueue::V1_0::utils;
+ typedef android::hardware::media::omx::V1_0::Status OmxStatus;
+ android::sp<IOmx> omx = IOmx::getService();
+ typedef android::hardware::graphics::bufferqueue::V1_0::
+ IGraphicBufferProducer HGraphicBufferProducer;
+ typedef android::hardware::media::omx::V1_0::
+ IGraphicBufferSource HGraphicBufferSource;
+ OmxStatus s;
+ android::sp<HGraphicBufferProducer> gbp;
+ android::sp<HGraphicBufferSource> gbs;
+ android::Return<void> transStatus = omx->createInputSurface(
+ [&s, &gbp, &gbs](
+ OmxStatus status,
+ const android::sp<HGraphicBufferProducer>& producer,
+ const android::sp<HGraphicBufferSource>& source) {
+ s = status;
+ gbp = producer;
+ gbs = source;
+ });
+ if (transStatus.isOk() && s == OmxStatus::OK) {
+ return new PersistentSurface(
+ new H2BGraphicBufferProducer(gbp),
+ sp<::android::IGraphicBufferSource>(new LWGraphicBufferSource(gbs)));
+ }
+
+ return nullptr;
+}
+
+sp<PersistentSurface> CCodec::CreateCompatibleInputSurface() {
+ sp<PersistentSurface> surface(CreateInputSurface());
+
+ if (surface == nullptr) {
+ surface = CreateOmxInputSurface();
+ }
+
+ return surface;
+}
+
void CCodec::createInputSurface() {
status_t err;
sp<IGraphicBufferProducer> bufferProducer;
@@ -953,19 +1042,21 @@
outputFormat = config->mOutputFormat;
}
- std::shared_ptr<PersistentSurface> persistentSurface(CreateInputSurface());
+ sp<PersistentSurface> persistentSurface = CreateCompatibleInputSurface();
if (persistentSurface->getHidlTarget()) {
- sp<IInputSurface> inputSurface = IInputSurface::castFrom(
+ sp<IInputSurface> hidlInputSurface = IInputSurface::castFrom(
persistentSurface->getHidlTarget());
- if (!inputSurface) {
+ if (!hidlInputSurface) {
ALOGE("Corrupted input surface");
mCallback->onInputSurfaceCreationFailed(UNKNOWN_ERROR);
return;
}
+ std::shared_ptr<Codec2Client::InputSurface> inputSurface =
+ std::make_shared<Codec2Client::InputSurface>(hidlInputSurface);
err = setupInputSurface(std::make_shared<C2InputSurfaceWrapper>(
- std::make_shared<Codec2Client::InputSurface>(inputSurface)));
- bufferProducer = new H2BGraphicBufferProducer(inputSurface);
+ inputSurface));
+ bufferProducer = inputSurface->getGraphicBufferProducer();
} else {
int32_t width = 0;
(void)outputFormat->findInt32("width", &width);
@@ -1020,7 +1111,7 @@
ALOGD("ISConfig: no configuration");
}
- return surface->start();
+ return OK;
}
void CCodec::initiateSetInputSurface(const sp<PersistentSurface> &surface) {
@@ -1107,12 +1198,20 @@
}
sp<AMessage> inputFormat;
sp<AMessage> outputFormat;
+ status_t err2 = OK;
{
Mutexed<Config>::Locked config(mConfig);
inputFormat = config->mInputFormat;
outputFormat = config->mOutputFormat;
+ if (config->mInputSurface) {
+ err2 = config->mInputSurface->start();
+ }
}
- status_t err2 = mChannel->start(inputFormat, outputFormat);
+ if (err2 != OK) {
+ mCallback->onError(err2, ACTION_CODE_FATAL);
+ return;
+ }
+ err2 = mChannel->start(inputFormat, outputFormat);
if (err2 != OK) {
mCallback->onError(err2, ACTION_CODE_FATAL);
return;
@@ -1187,6 +1286,13 @@
}
{
+ Mutexed<Config>::Locked config(mConfig);
+ if (config->mInputSurface) {
+ config->mInputSurface->disconnect();
+ config->mInputSurface = nullptr;
+ }
+ }
+ {
Mutexed<State>::Locked state(mState);
if (state->get() == STOPPING) {
state->set(ALLOCATED);
@@ -1196,6 +1302,7 @@
}
void CCodec::initiateRelease(bool sendCallback /* = true */) {
+ bool clearInputSurfaceIfNeeded = false;
{
Mutexed<State>::Locked state(mState);
if (state->get() == RELEASED || state->get() == RELEASING) {
@@ -1217,9 +1324,23 @@
}
return;
}
+ if (state->get() == STARTING
+ || state->get() == RUNNING
+ || state->get() == STOPPING) {
+ // Input surface may have been started, so clean up is needed.
+ clearInputSurfaceIfNeeded = true;
+ }
state->set(RELEASING);
}
+ if (clearInputSurfaceIfNeeded) {
+ Mutexed<Config>::Locked config(mConfig);
+ if (config->mInputSurface) {
+ config->mInputSurface->disconnect();
+ config->mInputSurface = nullptr;
+ }
+ }
+
mChannel->stop();
// thiz holds strong ref to this while the thread is running.
sp<CCodec> thiz(this);
@@ -1309,7 +1430,6 @@
}
mChannel->flush(flushedWork);
- subQueuedWorkCount(flushedWork.size());
{
Mutexed<State>::Locked state(mState);
@@ -1347,11 +1467,7 @@
(void)mChannel->requestInitialInputBuffers();
}
-void CCodec::signalSetParameters(const sp<AMessage> ¶ms) {
- setParameters(params);
-}
-
-void CCodec::setParameters(const sp<AMessage> ¶ms) {
+void CCodec::signalSetParameters(const sp<AMessage> &msg) {
std::shared_ptr<Codec2Client::Component> comp;
auto checkState = [this, &comp] {
Mutexed<State>::Locked state(mState);
@@ -1365,6 +1481,15 @@
return;
}
+ // NOTE: We used to ignore "bitrate" at setParameters; replicate
+ // the behavior here.
+ sp<AMessage> params = msg;
+ int32_t bitrate;
+ if (params->findInt32(KEY_BIT_RATE, &bitrate)) {
+ params = msg->dup();
+ params->removeEntryAt(params->findEntryByName(KEY_BIT_RATE));
+ }
+
Mutexed<Config>::Locked config(mConfig);
/**
@@ -1372,7 +1497,7 @@
*/
if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))
&& (config->mDomain & Config::IS_ENCODER) && config->mInputSurface && config->mISConfig) {
- (void)params->findInt64("time-offset-us", &config->mISConfig->mTimeOffsetUs);
+ (void)params->findInt64(PARAMETER_KEY_OFFSET_TIME, &config->mISConfig->mTimeOffsetUs);
if (params->findInt64("skip-frames-before", &config->mISConfig->mStartAtUs)) {
config->mISConfig->mStopped = false;
@@ -1381,10 +1506,10 @@
}
int32_t value;
- if (params->findInt32("drop-input-frames", &value)) {
+ if (params->findInt32(PARAMETER_KEY_SUSPEND, &value)) {
config->mISConfig->mSuspended = value;
config->mISConfig->mSuspendAtUs = -1;
- (void)params->findInt64("drop-start-time-us", &config->mISConfig->mSuspendAtUs);
+ (void)params->findInt64(PARAMETER_KEY_SUSPEND_TIME, &config->mISConfig->mSuspendAtUs);
}
(void)config->mInputSurface->configure(*config->mISConfig);
@@ -1431,28 +1556,16 @@
config->setParameters(comp, params, C2_MAY_BLOCK);
}
-void CCodec::onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
- size_t numDiscardedInputBuffers) {
+void CCodec::onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems) {
if (!workItems.empty()) {
- {
- Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
- mNumDiscardedInputBuffersQueue);
- numDiscardedInputBuffersQueue->insert(
- numDiscardedInputBuffersQueue->end(),
- workItems.size() - 1, 0);
- numDiscardedInputBuffersQueue->emplace_back(
- numDiscardedInputBuffers);
- }
- {
- Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
- queue->splice(queue->end(), workItems);
- }
+ Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
+ queue->splice(queue->end(), workItems);
}
(new AMessage(kWhatWorkDone, this))->post();
}
-void CCodec::onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer) {
- mChannel->onInputBufferDone(buffer);
+void CCodec::onInputBufferDone(uint64_t frameIndex, size_t arrayIndex) {
+ mChannel->onInputBufferDone(frameIndex, arrayIndex);
}
void CCodec::onMessageReceived(const sp<AMessage> &msg) {
@@ -1478,7 +1591,6 @@
case kWhatStart: {
// C2Component::start() should return within 500ms.
setDeadline(now, 550ms, "start");
- mQueuedWorkCount = 0;
start();
break;
}
@@ -1486,10 +1598,6 @@
// C2Component::stop() should return within 500ms.
setDeadline(now, 550ms, "stop");
stop();
-
- mQueuedWorkCount = 0;
- Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
- deadline->set(TimePoint::max(), "none");
break;
}
case kWhatFlush: {
@@ -1515,7 +1623,6 @@
}
case kWhatWorkDone: {
std::unique_ptr<C2Work> work;
- size_t numDiscardedInputBuffers;
bool shouldPost = false;
{
Mutexed<std::list<std::unique_ptr<C2Work>>>::Locked queue(mWorkDoneQueue);
@@ -1526,24 +1633,10 @@
queue->pop_front();
shouldPost = !queue->empty();
}
- {
- Mutexed<std::list<size_t>>::Locked numDiscardedInputBuffersQueue(
- mNumDiscardedInputBuffersQueue);
- if (numDiscardedInputBuffersQueue->empty()) {
- numDiscardedInputBuffers = 0;
- } else {
- numDiscardedInputBuffers = numDiscardedInputBuffersQueue->front();
- numDiscardedInputBuffersQueue->pop_front();
- }
- }
if (shouldPost) {
(new AMessage(kWhatWorkDone, this))->post();
}
- if (work->worklets.empty()
- || !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
- subQueuedWorkCount(1);
- }
// handle configuration changes in work done
Mutexed<Config>::Locked config(mConfig);
bool changed = false;
@@ -1607,8 +1700,7 @@
}
mChannel->onWorkDone(
std::move(work), changed ? config->mOutputFormat : nullptr,
- initData.hasChanged() ? initData.update().get() : nullptr,
- numDiscardedInputBuffers);
+ initData.hasChanged() ? initData.update().get() : nullptr);
break;
}
case kWhatWatch: {
@@ -1635,17 +1727,26 @@
void CCodec::initiateReleaseIfStuck() {
std::string name;
bool pendingDeadline = false;
- for (Mutexed<NamedTimePoint> *deadlinePtr : { &mDeadline, &mQueueDeadline, &mEosDeadline }) {
- Mutexed<NamedTimePoint>::Locked deadline(*deadlinePtr);
+ {
+ Mutexed<NamedTimePoint>::Locked deadline(mDeadline);
if (deadline->get() < std::chrono::steady_clock::now()) {
name = deadline->getName();
- break;
}
if (deadline->get() != TimePoint::max()) {
pendingDeadline = true;
}
}
if (name.empty()) {
+ constexpr std::chrono::steady_clock::duration kWorkDurationThreshold = 3s;
+ std::chrono::steady_clock::duration elapsed = mChannel->elapsed();
+ if (elapsed >= kWorkDurationThreshold) {
+ name = "queue";
+ }
+ if (elapsed > 0s) {
+ pendingDeadline = true;
+ }
+ }
+ if (name.empty()) {
// We're not stuck.
if (pendingDeadline) {
// If we are not stuck yet but still has deadline coming up,
@@ -1660,79 +1761,23 @@
mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
}
-void CCodec::onWorkQueued(bool eos) {
- ALOGV("queued work count +1 from %d", mQueuedWorkCount.load());
- int32_t count = ++mQueuedWorkCount;
- if (eos) {
- CCodecWatchdog::getInstance()->watch(this);
- Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
- deadline->set(std::chrono::steady_clock::now() + 3s, "eos");
- }
- // TODO: query and use input/pipeline/output delay combined
- if (count >= 4) {
- CCodecWatchdog::getInstance()->watch(this);
- Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
- deadline->set(std::chrono::steady_clock::now() + 3s, "queue");
- }
-}
-
-void CCodec::subQueuedWorkCount(uint32_t count) {
- ALOGV("queued work count -%u from %d", count, mQueuedWorkCount.load());
- int32_t currentCount = (mQueuedWorkCount -= count);
- if (currentCount == 0) {
- Mutexed<NamedTimePoint>::Locked deadline(mEosDeadline);
- deadline->set(TimePoint::max(), "none");
- }
- Mutexed<NamedTimePoint>::Locked deadline(mQueueDeadline);
- deadline->set(TimePoint::max(), "none");
-}
-
} // namespace android
extern "C" android::CodecBase *CreateCodec() {
return new android::CCodec;
}
+// Create Codec 2.0 input surface
extern "C" android::PersistentSurface *CreateInputSurface() {
// Attempt to create a Codec2's input surface.
std::shared_ptr<android::Codec2Client::InputSurface> inputSurface =
android::Codec2Client::CreateInputSurface();
- if (inputSurface) {
- return new android::PersistentSurface(
- inputSurface->getGraphicBufferProducer(),
- static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
- inputSurface->getHalInterface()));
+ if (!inputSurface) {
+ return nullptr;
}
-
- // Fall back to OMX.
- using namespace android::hardware::media::omx::V1_0;
- using namespace android::hardware::media::omx::V1_0::utils;
- using namespace android::hardware::graphics::bufferqueue::V1_0::utils;
- typedef android::hardware::media::omx::V1_0::Status OmxStatus;
- android::sp<IOmx> omx = IOmx::getService();
- typedef android::hardware::graphics::bufferqueue::V1_0::
- IGraphicBufferProducer HGraphicBufferProducer;
- typedef android::hardware::media::omx::V1_0::
- IGraphicBufferSource HGraphicBufferSource;
- OmxStatus s;
- android::sp<HGraphicBufferProducer> gbp;
- android::sp<HGraphicBufferSource> gbs;
- android::Return<void> transStatus = omx->createInputSurface(
- [&s, &gbp, &gbs](
- OmxStatus status,
- const android::sp<HGraphicBufferProducer>& producer,
- const android::sp<HGraphicBufferSource>& source) {
- s = status;
- gbp = producer;
- gbs = source;
- });
- if (transStatus.isOk() && s == OmxStatus::OK) {
- return new android::PersistentSurface(
- new H2BGraphicBufferProducer(gbp),
- sp<::android::IGraphicBufferSource>(
- new LWGraphicBufferSource(gbs)));
- }
-
- return nullptr;
+ return new android::PersistentSurface(
+ inputSurface->getGraphicBufferProducer(),
+ static_cast<android::sp<android::hidl::base::V1_0::IBase>>(
+ inputSurface->getHalInterface()));
}
diff --git a/media/codec2/sfplugin/CCodec.h b/media/codec2/sfplugin/CCodec.h
index 78b009e..b0b3c4f 100644
--- a/media/codec2/sfplugin/CCodec.h
+++ b/media/codec2/sfplugin/CCodec.h
@@ -66,9 +66,8 @@
virtual void signalRequestIDRFrame() override;
void initiateReleaseIfStuck();
- void onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems,
- size_t numDiscardedInputBuffers);
- void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+ void onWorkDone(std::list<std::unique_ptr<C2Work>> &workItems);
+ void onInputBufferDone(uint64_t frameIndex, size_t arrayIndex);
protected:
virtual ~CCodec();
@@ -76,7 +75,7 @@
virtual void onMessageReceived(const sp<AMessage> &msg) override;
private:
- typedef std::chrono::time_point<std::chrono::steady_clock> TimePoint;
+ typedef std::chrono::steady_clock::time_point TimePoint;
status_t tryAndReportOnError(std::function<status_t()> job);
@@ -90,19 +89,25 @@
void flush();
void release(bool sendCallback);
+ /**
+ * Creates an input surface for the current device configuration compatible with CCodec.
+ * This could be backed by the C2 HAL or the OMX HAL.
+ */
+ static sp<PersistentSurface> CreateCompatibleInputSurface();
+
+ /// Creates an input surface to the OMX HAL
+ static sp<PersistentSurface> CreateOmxInputSurface();
+
+ /// handle a create input surface call
void createInputSurface();
void setInputSurface(const sp<PersistentSurface> &surface);
status_t setupInputSurface(const std::shared_ptr<InputSurfaceWrapper> &surface);
- void setParameters(const sp<AMessage> ¶ms);
void setDeadline(
const TimePoint &now,
const std::chrono::milliseconds &timeout,
const char *name);
- void onWorkQueued(bool eos);
- void subQueuedWorkCount(uint32_t count);
-
enum {
kWhatAllocate,
kWhatConfigure,
@@ -167,13 +172,9 @@
struct ClientListener;
Mutexed<NamedTimePoint> mDeadline;
- std::atomic_int32_t mQueuedWorkCount;
- Mutexed<NamedTimePoint> mQueueDeadline;
- Mutexed<NamedTimePoint> mEosDeadline;
typedef CCodecConfig Config;
Mutexed<Config> mConfig;
Mutexed<std::list<std::unique_ptr<C2Work>>> mWorkDoneQueue;
- Mutexed<std::list<size_t>> mNumDiscardedInputBuffersQueue;
friend class CCodecCallbackImpl;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 55a97d8..7a444a3 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -94,6 +94,11 @@
*/
virtual void getArray(Vector<sp<MediaCodecBuffer>> *) const {}
+ /**
+ * Return number of buffers the client owns.
+ */
+ virtual size_t numClientBuffers() const = 0;
+
protected:
std::string mComponentName; ///< name of component for debugging
std::string mChannelName; ///< name of channel for debugging
@@ -128,7 +133,9 @@
* and released successfully.
*/
virtual bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) = 0;
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) = 0;
/**
* Release the buffer that is no longer used by the codec process. Return
@@ -179,7 +186,7 @@
* MediaCodec behavior.
*/
virtual status_t registerCsd(
- const C2StreamCsdInfo::output * /* csd */,
+ const C2StreamInitDataInfo::output * /* csd */,
size_t * /* index */,
sp<MediaCodecBuffer> * /* clientBuffer */) = 0;
@@ -248,6 +255,34 @@
mSkipCutBuffer = scb;
}
+ void handleImageData(const sp<Codec2Buffer> &buffer) {
+ sp<ABuffer> imageDataCandidate = buffer->getImageData();
+ if (imageDataCandidate == nullptr) {
+ return;
+ }
+ sp<ABuffer> imageData;
+ if (!mFormat->findBuffer("image-data", &imageData)
+ || imageDataCandidate->size() != imageData->size()
+ || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
+ ALOGD("[%s] updating image-data", mName);
+ sp<AMessage> newFormat = dupFormat();
+ newFormat->setBuffer("image-data", imageDataCandidate);
+ MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
+ if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+ int32_t stride = img->mPlane[0].mRowInc;
+ newFormat->setInt32(KEY_STRIDE, stride);
+ ALOGD("[%s] updating stride = %d", mName, stride);
+ if (img->mNumPlanes > 1 && stride > 0) {
+ int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+ newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
+ ALOGD("[%s] updating vstride = %d", mName, vstride);
+ }
+ }
+ setFormat(newFormat);
+ buffer->setFormat(newFormat);
+ }
+ }
+
protected:
sp<SkipCutBuffer> mSkipCutBuffer;
@@ -271,12 +306,8 @@
namespace {
-// TODO: get this info from component
-const static size_t kMinInputBufferArraySize = 4;
-const static size_t kMaxPipelineCapacity = 18;
-const static size_t kChannelOutputDelay = 0;
-const static size_t kMinOutputBufferArraySize = kMaxPipelineCapacity +
- kChannelOutputDelay;
+const static size_t kSmoothnessFactor = 4;
+const static size_t kRenderingDepth = 3;
const static size_t kLinearBufferSize = 1048576;
// This can fit 4K RGBA frame, and most likely client won't need more than this.
const static size_t kMaxLinearBufferSize = 3840 * 2160 * 4;
@@ -459,13 +490,18 @@
* \return true if the buffer is successfully released from a slot
* false otherwise
*/
- bool releaseSlot(const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) {
+ bool releaseSlot(
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) {
sp<Codec2Buffer> clientBuffer;
size_t index = mBuffers.size();
for (size_t i = 0; i < mBuffers.size(); ++i) {
if (mBuffers[i].clientBuffer == buffer) {
clientBuffer = mBuffers[i].clientBuffer;
- mBuffers[i].clientBuffer.clear();
+ if (release) {
+ mBuffers[i].clientBuffer.clear();
+ }
index = i;
break;
}
@@ -474,8 +510,11 @@
ALOGV("[%s] %s: No matching buffer found", mName, __func__);
return false;
}
- std::shared_ptr<C2Buffer> result = clientBuffer->asC2Buffer();
- mBuffers[index].compBuffer = result;
+ std::shared_ptr<C2Buffer> result = mBuffers[index].compBuffer.lock();
+ if (!result) {
+ result = clientBuffer->asC2Buffer();
+ mBuffers[index].compBuffer = result;
+ }
if (c2buffer) {
*c2buffer = result;
}
@@ -489,8 +528,8 @@
if (!compBuffer || compBuffer != c2buffer) {
continue;
}
- mBuffers[i].clientBuffer = nullptr;
mBuffers[i].compBuffer.reset();
+ ALOGV("[%s] codec released buffer #%zu", mName, i);
return true;
}
ALOGV("[%s] codec released an unknown buffer", mName);
@@ -502,6 +541,14 @@
mBuffers.clear();
}
+ size_t numClientBuffers() const {
+ return std::count_if(
+ mBuffers.begin(), mBuffers.end(),
+ [](const Entry &entry) {
+ return (entry.clientBuffer != nullptr);
+ });
+ }
+
private:
friend class BuffersArrayImpl;
@@ -601,7 +648,10 @@
* \return true if the buffer is successfully returned
* false otherwise
*/
- bool returnBuffer(const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) {
+ bool returnBuffer(
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) {
sp<Codec2Buffer> clientBuffer;
size_t index = mBuffers.size();
for (size_t i = 0; i < mBuffers.size(); ++i) {
@@ -610,7 +660,9 @@
ALOGD("[%s] Client returned a buffer it does not own according to our record: %zu", mName, i);
}
clientBuffer = mBuffers[i].clientBuffer;
- mBuffers[i].ownedByClient = false;
+ if (release) {
+ mBuffers[i].ownedByClient = false;
+ }
index = i;
break;
}
@@ -620,8 +672,11 @@
return false;
}
ALOGV("[%s] %s: matching buffer found (index=%zu)", mName, __func__, index);
- std::shared_ptr<C2Buffer> result = clientBuffer->asC2Buffer();
- mBuffers[index].compBuffer = result;
+ std::shared_ptr<C2Buffer> result = mBuffers[index].compBuffer.lock();
+ if (!result) {
+ result = clientBuffer->asC2Buffer();
+ mBuffers[index].compBuffer = result;
+ }
if (c2buffer) {
*c2buffer = result;
}
@@ -640,9 +695,9 @@
// This should not happen.
ALOGD("[%s] codec released a buffer owned by client "
"(index %zu)", mName, i);
- mBuffers[i].ownedByClient = false;
}
mBuffers[i].compBuffer.reset();
+ ALOGV("[%s] codec released buffer #%zu(array mode)", mName, i);
return true;
}
}
@@ -679,6 +734,14 @@
}
}
+ size_t numClientBuffers() const {
+ return std::count_if(
+ mBuffers.begin(), mBuffers.end(),
+ [](const Entry &entry) {
+ return entry.ownedByClient;
+ });
+ }
+
private:
std::string mImplName; ///< name for debugging
const char *mName; ///< C-string version of name
@@ -727,8 +790,10 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.returnBuffer(buffer, c2buffer);
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) override {
+ return mImpl.returnBuffer(buffer, c2buffer, release);
}
bool expireComponentBuffer(
@@ -740,6 +805,10 @@
mImpl.flush();
}
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
private:
BuffersArrayImpl mImpl;
};
@@ -769,8 +838,10 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.releaseSlot(buffer, c2buffer);
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) override {
+ return mImpl.releaseSlot(buffer, c2buffer, release);
}
bool expireComponentBuffer(
@@ -805,7 +876,11 @@
return std::move(array);
}
- virtual sp<Codec2Buffer> alloc(size_t size) const {
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
+ virtual sp<Codec2Buffer> alloc(size_t size) {
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
std::shared_ptr<C2LinearBlock> block;
@@ -829,6 +904,7 @@
const sp<ICrypto> &crypto,
int32_t heapSeqNum,
size_t capacity,
+ size_t numInputSlots,
const char *componentName, const char *name = "EncryptedInput")
: LinearInputBuffers(componentName, name),
mUsage({0, 0}),
@@ -840,7 +916,7 @@
} else {
mUsage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
}
- for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+ for (size_t i = 0; i < numInputSlots; ++i) {
sp<IMemory> memory = mDealer->allocate(capacity);
if (memory == nullptr) {
ALOGD("[%s] Failed to allocate memory from dealer: only %zu slots allocated", mName, i);
@@ -853,11 +929,12 @@
~EncryptedLinearInputBuffers() override {
}
- sp<Codec2Buffer> alloc(size_t size) const override {
+ sp<Codec2Buffer> alloc(size_t size) override {
sp<IMemory> memory;
- for (const Entry &entry : mMemoryVector) {
- if (entry.block.expired()) {
- memory = entry.memory;
+ size_t slot = 0;
+ for (; slot < mMemoryVector.size(); ++slot) {
+ if (mMemoryVector[slot].block.expired()) {
+ memory = mMemoryVector[slot].memory;
break;
}
}
@@ -867,10 +944,11 @@
std::shared_ptr<C2LinearBlock> block;
c2_status_t err = mPool->fetchLinearBlock(size, mUsage, &block);
- if (err != C2_OK) {
+ if (err != C2_OK || block == nullptr) {
return nullptr;
}
+ mMemoryVector[slot].block = block;
return new EncryptedLinearBlockBuffer(mFormat, block, memory, mHeapSeqNum);
}
@@ -910,8 +988,10 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.releaseSlot(buffer, c2buffer);
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) override {
+ return mImpl.releaseSlot(buffer, c2buffer, release);
}
bool expireComponentBuffer(
@@ -944,6 +1024,10 @@
return std::move(array);
}
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
private:
FlexBuffersImpl mImpl;
std::shared_ptr<C2AllocatorStore> mStore;
@@ -951,11 +1035,12 @@
class GraphicInputBuffers : public CCodecBufferChannel::InputBuffers {
public:
- GraphicInputBuffers(const char *componentName, const char *name = "2D-BB-Input")
+ GraphicInputBuffers(
+ size_t numInputSlots, const char *componentName, const char *name = "2D-BB-Input")
: InputBuffers(componentName, name),
mImpl(mName),
mLocalBufferPool(LocalBufferPool::Create(
- kMaxLinearBufferSize * kMinInputBufferArraySize)) { }
+ kMaxLinearBufferSize * numInputSlots)) { }
~GraphicInputBuffers() override = default;
bool requestNewBuffer(size_t *index, sp<MediaCodecBuffer> *buffer) override {
@@ -973,14 +1058,17 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.releaseSlot(buffer, c2buffer);
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer,
+ bool release) override {
+ return mImpl.releaseSlot(buffer, c2buffer, release);
}
bool expireComponentBuffer(
const std::shared_ptr<C2Buffer> &c2buffer) override {
return mImpl.expireComponentBuffer(c2buffer);
}
+
void flush() override {
// This is no-op by default unless we're in array mode where we need to keep
// track of the flushed work.
@@ -1003,6 +1091,10 @@
return std::move(array);
}
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
private:
FlexBuffersImpl mImpl;
std::shared_ptr<LocalBufferPool> mLocalBufferPool;
@@ -1018,7 +1110,7 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &, std::shared_ptr<C2Buffer> *) override {
+ const sp<MediaCodecBuffer> &, std::shared_ptr<C2Buffer> *, bool) override {
return false;
}
@@ -1038,6 +1130,10 @@
void getArray(Vector<sp<MediaCodecBuffer>> *array) const final {
array->clear();
}
+
+ size_t numClientBuffers() const final {
+ return 0u;
+ }
};
class OutputBuffersArray : public CCodecBufferChannel::OutputBuffers {
@@ -1084,13 +1180,14 @@
return WOULD_BLOCK;
}
submit(c2Buffer);
+ handleImageData(c2Buffer);
*clientBuffer = c2Buffer;
ALOGV("[%s] grabbed buffer %zu", mName, *index);
return OK;
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> c2Buffer;
@@ -1113,7 +1210,7 @@
bool releaseBuffer(
const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.returnBuffer(buffer, c2buffer);
+ return mImpl.returnBuffer(buffer, c2buffer, true);
}
void flush(const std::list<std::unique_ptr<C2Work>> &flushedWork) override {
@@ -1158,6 +1255,10 @@
mImpl.realloc(alloc);
}
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
private:
BuffersArrayImpl mImpl;
};
@@ -1173,15 +1274,19 @@
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) override {
sp<Codec2Buffer> newBuffer = wrap(buffer);
+ if (newBuffer == nullptr) {
+ return NO_MEMORY;
+ }
newBuffer->setFormat(mFormat);
*index = mImpl.assignSlot(newBuffer);
+ handleImageData(newBuffer);
*clientBuffer = newBuffer;
ALOGV("[%s] registered buffer %zu", mName, *index);
return OK;
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> newBuffer = new LocalLinearBuffer(
@@ -1192,8 +1297,9 @@
}
bool releaseBuffer(
- const sp<MediaCodecBuffer> &buffer, std::shared_ptr<C2Buffer> *c2buffer) override {
- return mImpl.releaseSlot(buffer, c2buffer);
+ const sp<MediaCodecBuffer> &buffer,
+ std::shared_ptr<C2Buffer> *c2buffer) override {
+ return mImpl.releaseSlot(buffer, c2buffer, true);
}
void flush(
@@ -1215,6 +1321,10 @@
return std::move(array);
}
+ size_t numClientBuffers() const final {
+ return mImpl.numClientBuffers();
+ }
+
/**
* Return an appropriate Codec2Buffer object for the type of buffers.
*
@@ -1265,6 +1375,10 @@
return nullptr;
}
sp<Codec2Buffer> clientBuffer = ConstLinearBlockBuffer::Allocate(mFormat, buffer);
+ if (clientBuffer == nullptr) {
+ ALOGD("[%s] ConstLinearBlockBuffer::Allocate failed", mName);
+ return nullptr;
+ }
submit(clientBuffer);
return clientBuffer;
}
@@ -1291,10 +1405,11 @@
class RawGraphicOutputBuffers : public FlexOutputBuffers {
public:
- RawGraphicOutputBuffers(const char *componentName, const char *name = "2D-BB-Output")
+ RawGraphicOutputBuffers(
+ size_t numOutputSlots, const char *componentName, const char *name = "2D-BB-Output")
: FlexOutputBuffers(componentName, name),
mLocalBufferPool(LocalBufferPool::Create(
- kMaxLinearBufferSize * kMinOutputBufferArraySize)) { }
+ kMaxLinearBufferSize * numOutputSlots)) { }
~RawGraphicOutputBuffers() override = default;
sp<Codec2Buffer> wrap(const std::shared_ptr<C2Buffer> &buffer) override {
@@ -1304,6 +1419,10 @@
[lbp = mLocalBufferPool](size_t capacity) {
return lbp->newBuffer(capacity);
});
+ if (c2buffer == nullptr) {
+ ALOGD("[%s] ConstGraphicBlockBuffer::AllocateEmpty failed", mName);
+ return nullptr;
+ }
c2buffer->setRange(0, 0);
return c2buffer;
} else {
@@ -1382,90 +1501,6 @@
count->value = -1;
}
-// CCodecBufferChannel::PipelineCapacity
-
-CCodecBufferChannel::PipelineCapacity::PipelineCapacity()
- : input(0), component(0),
- mName("<UNKNOWN COMPONENT>") {
-}
-
-void CCodecBufferChannel::PipelineCapacity::initialize(
- int newInput,
- int newComponent,
- const char* newName,
- const char* callerTag) {
- input.store(newInput, std::memory_order_relaxed);
- component.store(newComponent, std::memory_order_relaxed);
- mName = newName;
- ALOGV("[%s] %s -- PipelineCapacity::initialize(): "
- "pipeline availability initialized ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- newInput, newComponent);
-}
-
-bool CCodecBufferChannel::PipelineCapacity::allocate(const char* callerTag) {
- int prevInput = input.fetch_sub(1, std::memory_order_relaxed);
- int prevComponent = component.fetch_sub(1, std::memory_order_relaxed);
- if (prevInput > 0 && prevComponent > 0) {
- ALOGV("[%s] %s -- PipelineCapacity::allocate() returns true: "
- "pipeline availability -1 all ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- prevInput - 1,
- prevComponent - 1);
- return true;
- }
- input.fetch_add(1, std::memory_order_relaxed);
- component.fetch_add(1, std::memory_order_relaxed);
- ALOGV("[%s] %s -- PipelineCapacity::allocate() returns false: "
- "pipeline availability unchanged ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- prevInput,
- prevComponent);
- return false;
-}
-
-void CCodecBufferChannel::PipelineCapacity::free(const char* callerTag) {
- int prevInput = input.fetch_add(1, std::memory_order_relaxed);
- int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
- ALOGV("[%s] %s -- PipelineCapacity::free(): "
- "pipeline availability +1 all ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- prevInput + 1,
- prevComponent + 1);
-}
-
-int CCodecBufferChannel::PipelineCapacity::freeInputSlots(
- size_t numDiscardedInputBuffers,
- const char* callerTag) {
- int prevInput = input.fetch_add(numDiscardedInputBuffers,
- std::memory_order_relaxed);
- ALOGV("[%s] %s -- PipelineCapacity::freeInputSlots(%zu): "
- "pipeline availability +%zu input ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- numDiscardedInputBuffers,
- numDiscardedInputBuffers,
- prevInput + static_cast<int>(numDiscardedInputBuffers),
- component.load(std::memory_order_relaxed));
- return prevInput + static_cast<int>(numDiscardedInputBuffers);
-}
-
-int CCodecBufferChannel::PipelineCapacity::freeComponentSlot(
- const char* callerTag) {
- int prevComponent = component.fetch_add(1, std::memory_order_relaxed);
- ALOGV("[%s] %s -- PipelineCapacity::freeComponentSlot(): "
- "pipeline availability +1 component ==> "
- "input = %d, component = %d",
- mName, callerTag ? callerTag : "*",
- input.load(std::memory_order_relaxed),
- prevComponent + 1);
- return prevComponent + 1;
-}
-
// CCodecBufferChannel::ReorderStash
CCodecBufferChannel::ReorderStash::ReorderStash() {
@@ -1479,6 +1514,11 @@
mKey = C2Config::ORDINAL;
}
+void CCodecBufferChannel::ReorderStash::flush() {
+ mPending.clear();
+ mStash.clear();
+}
+
void CCodecBufferChannel::ReorderStash::setDepth(uint32_t depth) {
mPending.splice(mPending.end(), mStash);
mDepth = depth;
@@ -1505,13 +1545,13 @@
int64_t timestamp,
int32_t flags,
const C2WorkOrdinalStruct &ordinal) {
- for (auto it = mStash.begin(); it != mStash.end(); ++it) {
+ auto it = mStash.begin();
+ for (; it != mStash.end(); ++it) {
if (less(ordinal, it->ordinal)) {
- mStash.emplace(it, buffer, timestamp, flags, ordinal);
- return;
+ break;
}
}
- mStash.emplace_back(buffer, timestamp, flags, ordinal);
+ mStash.emplace(it, buffer, timestamp, flags, ordinal);
while (!mStash.empty() && mStash.size() > mDepth) {
mPending.push_back(mStash.front());
mStash.pop_front();
@@ -1545,11 +1585,14 @@
const std::shared_ptr<CCodecCallback> &callback)
: mHeapSeqNum(-1),
mCCodecCallback(callback),
+ mNumInputSlots(kSmoothnessFactor),
+ mNumOutputSlots(kSmoothnessFactor),
+ mDelay(0),
mFrameIndex(0u),
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
- mAvailablePipelineCapacity(),
mInputMetEos(false) {
+ mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
buffers->reset(new DummyInputBuffers(""));
}
@@ -1611,18 +1654,19 @@
work->input.ordinal.customOrdinal = timeUs;
work->input.buffers.clear();
+ uint64_t queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
+ std::vector<std::shared_ptr<C2Buffer>> queuedBuffers;
+
if (buffer->size() > 0u) {
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
std::shared_ptr<C2Buffer> c2buffer;
- if (!(*buffers)->releaseBuffer(buffer, &c2buffer)) {
+ if (!(*buffers)->releaseBuffer(buffer, &c2buffer, false)) {
return -ENOENT;
}
work->input.buffers.push_back(c2buffer);
- } else {
- mAvailablePipelineCapacity.freeInputSlots(1, "queueInputBufferInternal");
- if (eos) {
- flags |= C2FrameData::FLAG_END_OF_STREAM;
- }
+ queuedBuffers.push_back(c2buffer);
+ } else if (eos) {
+ flags |= C2FrameData::FLAG_END_OF_STREAM;
}
work->input.flags = (C2FrameData::flags_t)flags;
// TODO: fill info's
@@ -1633,10 +1677,16 @@
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
+ mPipelineWatcher.lock()->onWorkQueued(
+ queuedFrameIndex,
+ std::move(queuedBuffers),
+ PipelineWatcher::Clock::now());
c2_status_t err = mComponent->queue(&items);
+ if (err != C2_OK) {
+ mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+ }
if (err == C2_OK && eos && buffer->size() > 0u) {
- mCCodecCallback->onWorkQueued(false);
work.reset(new C2Work);
work->input.ordinal.timestamp = timeUs;
work->input.ordinal.frameIndex = mFrameIndex++;
@@ -1644,13 +1694,27 @@
work->input.ordinal.customOrdinal = timeUs;
work->input.buffers.clear();
work->input.flags = C2FrameData::FLAG_END_OF_STREAM;
+ work->worklets.emplace_back(new C2Worklet);
+
+ queuedFrameIndex = work->input.ordinal.frameIndex.peeku();
+ queuedBuffers.clear();
items.clear();
items.push_back(std::move(work));
+
+ mPipelineWatcher.lock()->onWorkQueued(
+ queuedFrameIndex,
+ std::move(queuedBuffers),
+ PipelineWatcher::Clock::now());
err = mComponent->queue(&items);
+ if (err != C2_OK) {
+ mPipelineWatcher.lock()->onWorkDone(queuedFrameIndex);
+ }
}
if (err == C2_OK) {
- mCCodecCallback->onWorkQueued(eos);
+ Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ bool released = (*buffers)->releaseBuffer(buffer, nullptr, true);
+ ALOGV("[%s] queueInputBuffer: buffer %sreleased", mName, released ? "" : "not ");
}
feedInputBufferIfAvailableInternal();
@@ -1792,16 +1856,26 @@
}
void CCodecBufferChannel::feedInputBufferIfAvailableInternal() {
- while (!mInputMetEos &&
- !mReorderStash.lock()->hasPending() &&
- mAvailablePipelineCapacity.allocate("feedInputBufferIfAvailable")) {
+ if (mInputMetEos ||
+ mReorderStash.lock()->hasPending() ||
+ mPipelineWatcher.lock()->pipelineFull()) {
+ return;
+ } else {
+ Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
+ if ((*buffers)->numClientBuffers() >= mNumOutputSlots) {
+ return;
+ }
+ }
+ for (size_t i = 0; i < mNumInputSlots; ++i) {
sp<MediaCodecBuffer> inBuffer;
size_t index;
{
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
+ if ((*buffers)->numClientBuffers() >= mNumInputSlots) {
+ return;
+ }
if (!(*buffers)->requestNewBuffer(&index, &inBuffer)) {
ALOGV("[%s] no new buffer available", mName);
- mAvailablePipelineCapacity.free("feedInputBufferIfAvailable");
break;
}
}
@@ -1979,16 +2053,13 @@
bool released = false;
{
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
- if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr)) {
- buffers.unlock();
+ if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr, true)) {
released = true;
- mAvailablePipelineCapacity.freeInputSlots(1, "discardBuffer");
}
}
{
Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
if (*buffers && (*buffers)->releaseBuffer(buffer, nullptr)) {
- buffers.unlock();
released = true;
}
}
@@ -2006,7 +2077,7 @@
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
if (!(*buffers)->isArrayMode()) {
- *buffers = (*buffers)->toArrayMode(kMinInputBufferArraySize);
+ *buffers = (*buffers)->toArrayMode(mNumInputSlots);
}
(*buffers)->getArray(array);
@@ -2017,7 +2088,7 @@
Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
if (!(*buffers)->isArrayMode()) {
- *buffers = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+ *buffers = (*buffers)->toArrayMode(mNumOutputSlots);
}
(*buffers)->getArray(array);
@@ -2029,12 +2100,19 @@
C2StreamBufferTypeSetting::output oStreamFormat(0u);
C2PortReorderBufferDepthTuning::output reorderDepth;
C2PortReorderKeySetting::output reorderKey;
+ C2PortActualDelayTuning::input inputDelay(0);
+ C2PortActualDelayTuning::output outputDelay(0);
+ C2ActualPipelineDelayTuning pipelineDelay(0);
+
c2_status_t err = mComponent->query(
{
&iStreamFormat,
&oStreamFormat,
&reorderDepth,
&reorderKey,
+ &inputDelay,
+ &pipelineDelay,
+ &outputDelay,
},
{},
C2_DONT_BLOCK,
@@ -2057,6 +2135,15 @@
reorder->setKey(reorderKey.value);
}
}
+
+ uint32_t inputDelayValue = inputDelay ? inputDelay.value : 0;
+ uint32_t pipelineDelayValue = pipelineDelay ? pipelineDelay.value : 0;
+ uint32_t outputDelayValue = outputDelay ? outputDelay.value : 0;
+
+ mNumInputSlots = inputDelayValue + pipelineDelayValue + kSmoothnessFactor;
+ mNumOutputSlots = outputDelayValue + kSmoothnessFactor;
+ mDelay = inputDelayValue + pipelineDelayValue + outputDelayValue;
+
// TODO: get this from input format
bool secure = mComponent->getName().find(".secure") != std::string::npos;
@@ -2067,7 +2154,7 @@
1 << C2PlatformAllocatorStore::BUFFERQUEUE);
if (inputFormat != nullptr) {
- bool graphic = (iStreamFormat.value == C2FormatVideo);
+ bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
std::shared_ptr<C2BlockPool> pool;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -2127,6 +2214,7 @@
pools->inputPool = pool;
}
+ bool forceArrayMode = false;
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
if (graphic) {
if (mInputSurface) {
@@ -2134,7 +2222,7 @@
} else if (mMetaMode == MODE_ANW) {
buffers->reset(new GraphicMetadataInputBuffers(mName));
} else {
- buffers->reset(new GraphicInputBuffers(mName));
+ buffers->reset(new GraphicInputBuffers(mNumInputSlots, mName));
}
} else {
if (hasCryptoOrDescrambler()) {
@@ -2147,7 +2235,7 @@
if (mDealer == nullptr) {
mDealer = new MemoryDealer(
align(capacity, MemoryDealer::getAllocationAlignment())
- * (kMinInputBufferArraySize + 1),
+ * (mNumInputSlots + 1),
"EncryptedLinearInputBuffers");
mDecryptDestination = mDealer->allocate((size_t)capacity);
}
@@ -2157,7 +2245,9 @@
mHeapSeqNum = -1;
}
buffers->reset(new EncryptedLinearInputBuffers(
- secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity, mName));
+ secure, mDealer, mCrypto, mHeapSeqNum, (size_t)capacity,
+ mNumInputSlots, mName));
+ forceArrayMode = true;
} else {
buffers->reset(new LinearInputBuffers(mName));
}
@@ -2169,6 +2259,10 @@
} else {
// TODO: error
}
+
+ if (forceArrayMode) {
+ *buffers = (*buffers)->toArrayMode(mNumInputSlots);
+ }
}
if (outputFormat != nullptr) {
@@ -2176,12 +2270,16 @@
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
+ if (outputSurface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
outputGeneration = output->generation;
}
- bool graphic = (oStreamFormat.value == C2FormatVideo);
+ bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
C2BlockPool::local_id_t outputPoolId_;
{
@@ -2286,7 +2384,7 @@
if (outputSurface) {
buffers->reset(new GraphicOutputBuffers(mName));
} else {
- buffers->reset(new RawGraphicOutputBuffers(mName));
+ buffers->reset(new RawGraphicOutputBuffers(mNumOutputSlots, mName));
}
} else {
buffers->reset(new LinearOutputBuffers(mName));
@@ -2307,7 +2405,7 @@
// WORKAROUND: if we're using early CSD workaround we convert to
// array mode, to appease apps assuming the output
// buffers to be of the same size.
- (*buffers) = (*buffers)->toArrayMode(kMinOutputBufferArraySize);
+ (*buffers) = (*buffers)->toArrayMode(mNumOutputSlots);
int32_t channelCount;
int32_t sampleRate;
@@ -2335,27 +2433,14 @@
// about buffers from the previous generation do not interfere with the
// newly initialized pipeline capacity.
- // Query delays
- C2PortRequestedDelayTuning::input inputDelay;
- C2PortRequestedDelayTuning::output outputDelay;
- C2RequestedPipelineDelayTuning pipelineDelay;
-#if 0
- err = mComponent->query(
- { &inputDelay, &pipelineDelay, &outputDelay },
- {},
- C2_DONT_BLOCK,
- nullptr);
- mAvailablePipelineCapacity.initialize(
- inputDelay,
- inputDelay + pipelineDelay,
- inputDelay + pipelineDelay + outputDelay,
- mName);
-#else
- mAvailablePipelineCapacity.initialize(
- kMinInputBufferArraySize,
- kMaxPipelineCapacity,
- mName);
-#endif
+ {
+ Mutexed<PipelineWatcher>::Locked watcher(mPipelineWatcher);
+ watcher->inputDelay(inputDelayValue)
+ .pipelineDelay(pipelineDelayValue)
+ .outputDelay(outputDelayValue)
+ .smoothnessFactor(kSmoothnessFactor);
+ watcher->flush();
+ }
mInputMetEos = false;
mSync.start();
@@ -2367,14 +2452,14 @@
return OK;
}
- C2StreamFormatConfig::output oStreamFormat(0u);
+ C2StreamBufferTypeSetting::output oStreamFormat(0u);
c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
if (err != C2_OK) {
return UNKNOWN_ERROR;
}
std::vector<sp<MediaCodecBuffer>> toBeQueued;
// TODO: use proper buffer depth instead of this random value
- for (size_t i = 0; i < kMinInputBufferArraySize; ++i) {
+ for (size_t i = 0; i < mNumInputSlots; ++i) {
size_t index;
sp<MediaCodecBuffer> buffer;
{
@@ -2416,21 +2501,16 @@
buffer->meta()->setInt64("timeUs", 0);
post = false;
}
- if (mAvailablePipelineCapacity.allocate("requestInitialInputBuffers")) {
- if (post) {
- mCallback->onInputBufferAvailable(index, buffer);
- } else {
- toBeQueued.emplace_back(buffer);
- }
+ if (post) {
+ mCallback->onInputBufferAvailable(index, buffer);
} else {
- ALOGD("[%s] pipeline is full while requesting %zu-th input buffer",
- mName, i);
+ toBeQueued.emplace_back(buffer);
}
}
}
for (const sp<MediaCodecBuffer> &buffer : toBeQueued) {
if (queueInputBufferInternal(buffer) != OK) {
- mAvailablePipelineCapacity.freeComponentSlot("requestInitialInputBuffers");
+ ALOGV("[%s] Error while queueing initial buffers", mName);
}
}
return OK;
@@ -2440,7 +2520,6 @@
mSync.stop();
mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
if (mInputSurface != nullptr) {
- mInputSurface->disconnect();
mInputSurface.reset();
}
}
@@ -2476,28 +2555,26 @@
Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
(*buffers)->flush(flushedWork);
}
+ mReorderStash.lock()->flush();
+ mPipelineWatcher.lock()->flush();
}
void CCodecBufferChannel::onWorkDone(
std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
- const C2StreamInitDataInfo::output *initData,
- size_t numDiscardedInputBuffers) {
+ const C2StreamInitDataInfo::output *initData) {
if (handleWork(std::move(work), outputFormat, initData)) {
- mAvailablePipelineCapacity.freeInputSlots(numDiscardedInputBuffers,
- "onWorkDone");
feedInputBufferIfAvailable();
}
}
void CCodecBufferChannel::onInputBufferDone(
- const std::shared_ptr<C2Buffer>& buffer) {
+ uint64_t frameIndex, size_t arrayIndex) {
+ std::shared_ptr<C2Buffer> buffer =
+ mPipelineWatcher.lock()->onInputBufferReleased(frameIndex, arrayIndex);
bool newInputSlotAvailable;
{
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
newInputSlotAvailable = (*buffers)->expireComponentBuffer(buffer);
- if (newInputSlotAvailable) {
- mAvailablePipelineCapacity.freeInputSlots(1, "onInputBufferDone");
- }
}
if (newInputSlotAvailable) {
feedInputBufferIfAvailable();
@@ -2517,7 +2594,7 @@
if (work->worklets.size() != 1u
|| !work->worklets.front()
|| !(work->worklets.front()->output.flags & C2FrameData::FLAG_INCOMPLETE)) {
- mAvailablePipelineCapacity.freeComponentSlot("handleWork");
+ mPipelineWatcher.lock()->onWorkDone(work->input.ordinal.frameIndex.peeku());
}
if (work->result == C2_NOT_FOUND) {
@@ -2566,6 +2643,11 @@
mReorderStash.lock()->setDepth(reorderDepth.value);
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
+ if (output->surface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
} else {
ALOGD("[%s] onWorkDone: failed to read reorder depth", mName);
}
@@ -2662,7 +2744,7 @@
// TODO: properly translate these to metadata
switch (info->coreIndex().coreIndex()) {
case C2StreamPictureTypeMaskInfo::CORE_INDEX:
- if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2PictureTypeKeyFrame) {
+ if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2Config::SYNC_FRAME) {
flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
}
break;
@@ -2690,29 +2772,39 @@
size_t index;
while (true) {
- {
- Mutexed<ReorderStash>::Locked reorder(mReorderStash);
- if (!reorder->hasPending()) {
- break;
- }
- if (!reorder->pop(&entry)) {
- break;
- }
+ Mutexed<ReorderStash>::Locked reorder(mReorderStash);
+ if (!reorder->hasPending()) {
+ break;
}
+ if (!reorder->pop(&entry)) {
+ break;
+ }
+
Mutexed<std::unique_ptr<OutputBuffers>>::Locked buffers(mOutputBuffers);
status_t err = (*buffers)->registerBuffer(entry.buffer, &index, &outBuffer);
if (err != OK) {
+ bool outputBuffersChanged = false;
if (err != WOULD_BLOCK) {
+ if (!(*buffers)->isArrayMode()) {
+ *buffers = (*buffers)->toArrayMode(mNumOutputSlots);
+ }
OutputBuffersArray *array = (OutputBuffersArray *)buffers->get();
array->realloc(entry.buffer);
+ outputBuffersChanged = true;
+ }
+ ALOGV("[%s] sendOutputBuffers: unable to register output buffer", mName);
+ reorder->defer(entry);
+
+ buffers.unlock();
+ reorder.unlock();
+
+ if (outputBuffersChanged) {
mCCodecCallback->onOutputBuffersChanged();
}
- buffers.unlock();
- ALOGV("[%s] sendOutputBuffers: unable to register output buffer", mName);
- mReorderStash.lock()->defer(entry);
return;
}
buffers.unlock();
+ reorder.unlock();
outBuffer->meta()->setInt64("timeUs", entry.timestamp);
outBuffer->meta()->setInt32("flags", entry.flags);
@@ -2731,7 +2823,6 @@
sp<IGraphicBufferProducer> producer;
if (newSurface) {
newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- newSurface->setMaxDequeuedBufferCount(kMinOutputBufferArraySize);
producer = newSurface->getIGraphicBufferProducer();
producer->setGenerationNumber(generation);
} else {
@@ -2759,6 +2850,7 @@
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ newSurface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
output->surface = newSurface;
output->generation = generation;
}
@@ -2766,6 +2858,14 @@
return OK;
}
+PipelineWatcher::Clock::duration CCodecBufferChannel::elapsed() {
+ // When client pushed EOS, we want all the work to be done quickly.
+ // Otherwise, component may have stalled work due to input starvation up to
+ // the sum of the delay in the pipeline.
+ size_t n = mInputMetEos ? 0 : mDelay;
+ return mPipelineWatcher.lock()->elapsed(PipelineWatcher::Clock::now(), n);
+}
+
void CCodecBufferChannel::setMetaMode(MetaMode mode) {
mMetaMode = mode;
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 431baaa..1ea29b4 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -34,15 +34,17 @@
#include <media/ICrypto.h>
#include "InputSurfaceWrapper.h"
+#include "PipelineWatcher.h"
namespace android {
+class MemoryDealer;
+
class CCodecCallback {
public:
virtual ~CCodecCallback() = default;
virtual void onError(status_t err, enum ActionCode actionCode) = 0;
virtual void onOutputFramesRendered(int64_t mediaTimeUs, nsecs_t renderTimeNs) = 0;
- virtual void onWorkQueued(bool eos) = 0;
virtual void onOutputBuffersChanged() = 0;
};
@@ -126,22 +128,21 @@
* @param workItems finished work item.
* @param outputFormat new output format if it has changed, otherwise nullptr
* @param initData new init data (CSD) if it has changed, otherwise nullptr
- * @param numDiscardedInputBuffers the number of input buffers that are
- * returned for the first time (not previously returned by
- * onInputBufferDone()).
*/
void onWorkDone(
std::unique_ptr<C2Work> work, const sp<AMessage> &outputFormat,
- const C2StreamInitDataInfo::output *initData,
- size_t numDiscardedInputBuffers);
+ const C2StreamInitDataInfo::output *initData);
/**
* Make an input buffer available for the client as it is no longer needed
* by the codec.
*
- * @param buffer The buffer that becomes unused.
+ * @param frameIndex The index of input work
+ * @param arrayIndex The index of buffer in the input work buffers.
*/
- void onInputBufferDone(const std::shared_ptr<C2Buffer>& buffer);
+ void onInputBufferDone(uint64_t frameIndex, size_t arrayIndex);
+
+ PipelineWatcher::Clock::duration elapsed();
enum MetaMode {
MODE_NONE,
@@ -233,6 +234,10 @@
QueueSync mQueueSync;
std::vector<std::unique_ptr<C2Param>> mParamsToBeSet;
+ size_t mNumInputSlots;
+ size_t mNumOutputSlots;
+ size_t mDelay;
+
Mutexed<std::unique_ptr<InputBuffers>> mInputBuffers;
Mutexed<std::list<sp<ABuffer>>> mFlushedConfigs;
Mutexed<std::unique_ptr<OutputBuffers>> mOutputBuffers;
@@ -245,6 +250,7 @@
struct OutputSurface {
sp<Surface> surface;
uint32_t generation;
+ int maxDequeueBuffers;
};
Mutexed<OutputSurface> mOutputSurface;
@@ -261,79 +267,7 @@
MetaMode mMetaMode;
- // PipelineCapacity is used in the input buffer gating logic.
- //
- // There are three criteria that need to be met before
- // onInputBufferAvailable() is called:
- // 1. The number of input buffers that have been received by
- // CCodecBufferChannel but not returned via onWorkDone() or
- // onInputBufferDone() does not exceed a certain limit. (Let us call this
- // number the "input" capacity.)
- // 2. The number of work items that have been received by
- // CCodecBufferChannel whose outputs have not been returned from the
- // component (by calling onWorkDone()) does not exceed a certain limit.
- // (Let us call this the "component" capacity.)
- //
- // These three criteria guarantee that a new input buffer that arrives from
- // the invocation of onInputBufferAvailable() will not
- // 1. overload CCodecBufferChannel's input buffers;
- // 2. overload the component; or
- //
- struct PipelineCapacity {
- // The number of available input capacity.
- std::atomic_int input;
- // The number of available component capacity.
- std::atomic_int component;
-
- PipelineCapacity();
- // Set the values of #input and #component.
- void initialize(int newInput, int newComponent,
- const char* newName = "<UNKNOWN COMPONENT>",
- const char* callerTag = nullptr);
-
- // Return true and decrease #input and #component by one if
- // they are all greater than zero; return false otherwise.
- //
- // callerTag is used for logging only.
- //
- // allocate() is called by CCodecBufferChannel to check whether it can
- // receive another input buffer. If the return value is true,
- // onInputBufferAvailable() and onOutputBufferAvailable() can be called
- // afterwards.
- bool allocate(const char* callerTag = nullptr);
-
- // Increase #input and #component by one.
- //
- // callerTag is used for logging only.
- //
- // free() is called by CCodecBufferChannel after allocate() returns true
- // but onInputBufferAvailable() cannot be called for any reasons. It
- // essentially undoes an allocate() call.
- void free(const char* callerTag = nullptr);
-
- // Increase #input by @p numDiscardedInputBuffers.
- //
- // callerTag is used for logging only.
- //
- // freeInputSlots() is called by CCodecBufferChannel when onWorkDone()
- // or onInputBufferDone() is called. @p numDiscardedInputBuffers is
- // provided in onWorkDone(), and is 1 in onInputBufferDone().
- int freeInputSlots(size_t numDiscardedInputBuffers,
- const char* callerTag = nullptr);
-
- // Increase #component by one and return the updated value.
- //
- // callerTag is used for logging only.
- //
- // freeComponentSlot() is called by CCodecBufferChannel when
- // onWorkDone() is called.
- int freeComponentSlot(const char* callerTag = nullptr);
-
- private:
- // Component name. Used for logging.
- const char* mName;
- };
- PipelineCapacity mAvailablePipelineCapacity;
+ Mutexed<PipelineWatcher> mPipelineWatcher;
class ReorderStash {
public:
@@ -354,6 +288,7 @@
ReorderStash();
void clear();
+ void flush();
void setDepth(uint32_t depth);
void setKey(C2Config::ordinal_key_t key);
bool pop(Entry *entry);
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 1113ae8..0fd5731 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -84,17 +84,7 @@
}
void Codec2Buffer::setImageData(const sp<ABuffer> &imageData) {
- meta()->setBuffer("image-data", imageData);
- format()->setBuffer("image-data", imageData);
- MediaImage2 *img = (MediaImage2*)imageData->data();
- if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
- int32_t stride = img->mPlane[0].mRowInc;
- format()->setInt32(KEY_STRIDE, stride);
- if (img->mNumPlanes > 1 && stride > 0) {
- int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
- format()->setInt32(KEY_SLICE_HEIGHT, vstride);
- }
- }
+ mImageData = imageData;
}
// LocalLinearBuffer
@@ -234,6 +224,7 @@
mInitCheck = BAD_VALUE;
return;
}
+ memset(mediaImage, 0, sizeof(*mediaImage));
mAllocatedDepth = layout.planes[0].allocatedDepth;
uint32_t bitDepth = layout.planes[0].bitDepth;
@@ -451,6 +442,9 @@
}
bool setBackBuffer(const sp<ABuffer> &backBuffer) {
+ if (backBuffer == nullptr) {
+ return false;
+ }
if (backBuffer->capacity() < mBackBufferSize) {
return false;
}
@@ -543,7 +537,6 @@
: Codec2Buffer(format, buffer),
mView(view),
mBlock(block),
- mImageData(imageData),
mWrapped(wrapped) {
setImageData(imageData);
}
@@ -578,7 +571,7 @@
ALOGV("VideoNativeMetadata: %dx%d", buffer->width, buffer->height);
C2Handle *handle = WrapNativeCodec2GrallocHandle(
- native_handle_clone(buffer->handle),
+ buffer->handle,
buffer->width,
buffer->height,
buffer->format,
@@ -680,9 +673,7 @@
mView(std::move(view)),
mBufferRef(buffer),
mWrapped(wrapped) {
- if (imageData != nullptr) {
- setImageData(imageData);
- }
+ setImageData(imageData);
}
std::shared_ptr<C2Buffer> ConstGraphicBlockBuffer::asC2Buffer() {
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index 481975f..dd618aa 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -23,6 +23,7 @@
#include <android/hardware/cas/native/1.0/types.h>
#include <binder/IMemory.h>
#include <media/hardware/VideoAPI.h>
+#include <media/stagefright/foundation/ABuffer.h>
#include <media/MediaCodecBuffer.h>
#include <media/ICrypto.h>
@@ -85,6 +86,8 @@
return false;
}
+ sp<ABuffer> getImageData() const { return mImageData; }
+
protected:
/**
* canCopy() implementation for linear buffers.
@@ -100,6 +103,8 @@
* sets MediaImage data for flexible graphic buffers
*/
void setImageData(const sp<ABuffer> &imageData);
+
+ sp<ABuffer> mImageData;
};
/**
@@ -239,7 +244,6 @@
C2GraphicView mView;
std::shared_ptr<C2GraphicBlock> mBlock;
- sp<ABuffer> mImageData;
const bool mWrapped;
};
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index a8cc62d..ead0a9b 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -44,6 +44,7 @@
#include <cutils/native_handle.h>
#include <media/omx/1.0/WOmxNode.h>
#include <media/stagefright/MediaCodecConstants.h>
+#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/omx/OMXUtils.h>
#include <media/stagefright/xmlparser/MediaCodecsXmlParser.h>
@@ -67,263 +68,146 @@
s.compare(s.size() - suffixLen, suffixLen, suffix) == 0;
}
-// Constants from ACodec
-constexpr OMX_U32 kPortIndexInput = 0;
-constexpr OMX_U32 kPortIndexOutput = 1;
-constexpr OMX_U32 kMaxIndicesToCheck = 32;
+void addSupportedProfileLevels(
+ std::shared_ptr<Codec2Client::Interface> intf,
+ MediaCodecInfo::CapabilitiesWriter *caps,
+ const Traits& trait, const std::string &mediaType) {
+ std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
+ C2Mapper::GetProfileLevelMapper(trait.mediaType);
+ // if we don't know the media type, pass through all values unmapped
-status_t queryOmxCapabilities(
- const char* name, const char* mediaType, bool isEncoder,
- MediaCodecInfo::CapabilitiesWriter* caps) {
-
- const char *role = GetComponentRole(isEncoder, mediaType);
- if (role == nullptr) {
- return BAD_VALUE;
- }
-
- using namespace ::android::hardware::media::omx::V1_0;
- using ::android::hardware::Return;
- using ::android::hardware::Void;
- using ::android::hardware::hidl_vec;
- using ::android::hardware::media::omx::V1_0::utils::LWOmxNode;
-
- sp<IOmx> omx = IOmx::getService();
- if (!omx) {
- ALOGW("Could not obtain IOmx service.");
- return NO_INIT;
- }
-
- struct Observer : IOmxObserver {
- virtual Return<void> onMessages(const hidl_vec<Message>&) override {
- return Void();
- }
+ // TODO: we cannot find levels that are local 'maxima' without knowing the coding
+ // e.g. H.263 level 45 and level 30 could be two values for highest level as
+ // they don't include one another. For now we use the last supported value.
+ bool encoder = trait.kind == C2Component::KIND_ENCODER;
+ C2StreamProfileLevelInfo pl(encoder /* output */, 0u);
+ std::vector<C2FieldSupportedValuesQuery> profileQuery = {
+ C2FieldSupportedValuesQuery::Possible(C2ParamField(&pl, &pl.profile))
};
- sp<Observer> observer = new Observer();
- Status status;
- sp<IOmxNode> tOmxNode;
- Return<void> transStatus = omx->allocateNode(
- name, observer,
- [&status, &tOmxNode](Status s, const sp<IOmxNode>& n) {
- status = s;
- tOmxNode = n;
- });
- if (!transStatus.isOk()) {
- ALOGW("IOmx::allocateNode -- transaction failed.");
- return NO_INIT;
- }
- if (status != Status::OK) {
- ALOGW("IOmx::allocateNode -- error returned: %d.",
- static_cast<int>(status));
- return NO_INIT;
+ c2_status_t err = intf->querySupportedValues(profileQuery, C2_DONT_BLOCK);
+ ALOGV("query supported profiles -> %s | %s", asString(err), asString(profileQuery[0].status));
+ if (err != C2_OK || profileQuery[0].status != C2_OK) {
+ return;
}
- sp<LWOmxNode> omxNode = new LWOmxNode(tOmxNode);
-
- status_t err = SetComponentRole(omxNode, role);
- if (err != OK) {
- omxNode->freeNode();
- ALOGW("Failed to SetComponentRole: component = %s, role = %s.",
- name, role);
- return err;
+ // we only handle enumerated values
+ if (profileQuery[0].values.type != C2FieldSupportedValues::VALUES) {
+ return;
}
- bool isVideo = hasPrefix(mediaType, "video/") == 0;
- bool isImage = hasPrefix(mediaType, "image/") == 0;
+ // determine if codec supports HDR
+ bool supportsHdr = false;
+ bool supportsHdr10Plus = false;
- if (isVideo || isImage) {
- OMX_VIDEO_PARAM_PROFILELEVELTYPE param;
- InitOMXParams(¶m);
- param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
-
- for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
- param.nProfileIndex = index;
- status_t err = omxNode->getParameter(
- OMX_IndexParamVideoProfileLevelQuerySupported,
- ¶m, sizeof(param));
- if (err != OK) {
+ std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
+ c2_status_t err1 = intf->querySupportedParams(¶mDescs);
+ if (err1 == C2_OK) {
+ for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
+ switch ((uint32_t)desc->index()) {
+ case C2StreamHdr10PlusInfo::output::PARAM_TYPE:
+ supportsHdr10Plus = true;
+ break;
+ case C2StreamHdrStaticInfo::output::PARAM_TYPE:
+ supportsHdr = true;
+ break;
+ default:
break;
}
- caps->addProfileLevel(param.eProfile, param.eLevel);
-
- // AVC components may not list the constrained profiles explicitly, but
- // decoders that support a profile also support its constrained version.
- // Encoders must explicitly support constrained profiles.
- if (!isEncoder && strcasecmp(mediaType, MEDIA_MIMETYPE_VIDEO_AVC) == 0) {
- if (param.eProfile == OMX_VIDEO_AVCProfileHigh) {
- caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedHigh, param.eLevel);
- } else if (param.eProfile == OMX_VIDEO_AVCProfileBaseline) {
- caps->addProfileLevel(OMX_VIDEO_AVCProfileConstrainedBaseline, param.eLevel);
- }
- }
-
- if (index == kMaxIndicesToCheck) {
- ALOGW("[%s] stopping checking profiles after %u: %x/%x",
- name, index,
- param.eProfile, param.eLevel);
- }
- }
-
- // Color format query
- // return colors in the order reported by the OMX component
- // prefix "flexible" standard ones with the flexible equivalent
- OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
- InitOMXParams(&portFormat);
- portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
- for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
- portFormat.nIndex = index;
- status_t err = omxNode->getParameter(
- OMX_IndexParamVideoPortFormat,
- &portFormat, sizeof(portFormat));
- if (err != OK) {
- break;
- }
-
- OMX_U32 flexibleEquivalent;
- if (IsFlexibleColorFormat(
- omxNode, portFormat.eColorFormat, false /* usingNativeWindow */,
- &flexibleEquivalent)) {
- caps->addColorFormat(flexibleEquivalent);
- }
- caps->addColorFormat(portFormat.eColorFormat);
-
- if (index == kMaxIndicesToCheck) {
- ALOGW("[%s] stopping checking formats after %u: %s(%x)",
- name, index,
- asString(portFormat.eColorFormat), portFormat.eColorFormat);
- }
- }
- } else if (strcasecmp(mediaType, MEDIA_MIMETYPE_AUDIO_AAC) == 0) {
- // More audio codecs if they have profiles.
- OMX_AUDIO_PARAM_ANDROID_PROFILETYPE param;
- InitOMXParams(¶m);
- param.nPortIndex = isEncoder ? kPortIndexOutput : kPortIndexInput;
- for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
- param.nProfileIndex = index;
- status_t err = omxNode->getParameter(
- (OMX_INDEXTYPE)OMX_IndexParamAudioProfileQuerySupported,
- ¶m, sizeof(param));
- if (err != OK) {
- break;
- }
- // For audio, level is ignored.
- caps->addProfileLevel(param.eProfile, 0 /* level */);
-
- if (index == kMaxIndicesToCheck) {
- ALOGW("[%s] stopping checking profiles after %u: %x",
- name, index,
- param.eProfile);
- }
- }
-
- // NOTE: Without Android extensions, OMX does not provide a way to query
- // AAC profile support
- if (param.nProfileIndex == 0) {
- ALOGW("component %s doesn't support profile query.", name);
}
}
- if (isVideo && !isEncoder) {
- native_handle_t *sidebandHandle = nullptr;
- if (omxNode->configureVideoTunnelMode(
- kPortIndexOutput, OMX_TRUE, 0, &sidebandHandle) == OK) {
- // tunneled playback includes adaptive playback
- } else {
- // tunneled playback is not supported
- caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_TUNNELED_PLAYBACK);
- if (omxNode->setPortMode(
- kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer) == OK ||
- omxNode->prepareForAdaptivePlayback(
- kPortIndexOutput, OMX_TRUE,
- 1280 /* width */, 720 /* height */) != OK) {
- // adaptive playback is not supported
- caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_ADAPTIVE_PLAYBACK);
- }
- }
- }
+ // For VP9, the static info is always propagated by framework.
+ supportsHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
- if (isVideo && isEncoder) {
- OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE params;
- InitOMXParams(¶ms);
- params.nPortIndex = kPortIndexOutput;
-
- OMX_VIDEO_PARAM_INTRAREFRESHTYPE fallbackParams;
- InitOMXParams(&fallbackParams);
- fallbackParams.nPortIndex = kPortIndexOutput;
- fallbackParams.eRefreshMode = OMX_VIDEO_IntraRefreshCyclic;
-
- if (omxNode->getConfig(
- (OMX_INDEXTYPE)OMX_IndexConfigAndroidIntraRefresh,
- ¶ms, sizeof(params)) != OK &&
- omxNode->getParameter(
- OMX_IndexParamVideoIntraRefresh, &fallbackParams,
- sizeof(fallbackParams)) != OK) {
- // intra refresh is not supported
- caps->removeDetail(MediaCodecInfo::Capabilities::FEATURE_INTRA_REFRESH);
- }
- }
-
- omxNode->freeNode();
- return OK;
-}
-
-void buildOmxInfo(const MediaCodecsXmlParser& parser,
- MediaCodecListWriter* writer) {
- uint32_t omxRank = ::android::base::GetUintProperty(
- "debug.stagefright.omx_default_rank", uint32_t(0x100));
- for (const MediaCodecsXmlParser::Codec& codec : parser.getCodecMap()) {
- const std::string &name = codec.first;
- if (!hasPrefix(codec.first, "OMX.")) {
+ for (C2Value::Primitive profile : profileQuery[0].values.values) {
+ pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ err = intf->config({&pl}, C2_DONT_BLOCK, &failures);
+ ALOGV("set profile to %u -> %s", pl.profile, asString(err));
+ std::vector<C2FieldSupportedValuesQuery> levelQuery = {
+ C2FieldSupportedValuesQuery::Current(C2ParamField(&pl, &pl.level))
+ };
+ err = intf->querySupportedValues(levelQuery, C2_DONT_BLOCK);
+ ALOGV("query supported levels -> %s | %s", asString(err), asString(levelQuery[0].status));
+ if (err != C2_OK || levelQuery[0].status != C2_OK
+ || levelQuery[0].values.type != C2FieldSupportedValues::VALUES
+ || levelQuery[0].values.values.size() == 0) {
continue;
}
- const MediaCodecsXmlParser::CodecProperties &properties = codec.second;
- bool encoder = properties.isEncoder;
- std::unique_ptr<MediaCodecInfoWriter> info =
- writer->addMediaCodecInfo();
- info->setName(name.c_str());
- info->setOwner("default");
- typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
- if (encoder) {
- attrs |= MediaCodecInfo::kFlagIsEncoder;
- }
- // NOTE: we don't support software-only codecs in OMX
- if (!hasPrefix(name, "OMX.google.")) {
- attrs |= MediaCodecInfo::kFlagIsVendor;
- if (properties.quirkSet.find("attribute::software-codec")
- == properties.quirkSet.end()) {
- attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
- }
- }
- info->setAttributes(attrs);
- info->setRank(omxRank);
- // OMX components don't have aliases
- for (const MediaCodecsXmlParser::Type &type : properties.typeMap) {
- const std::string &mediaType = type.first;
- std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
- info->addMediaType(mediaType.c_str());
- const MediaCodecsXmlParser::AttributeMap &attrMap = type.second;
- for (const MediaCodecsXmlParser::Attribute& attr : attrMap) {
- const std::string &key = attr.first;
- const std::string &value = attr.second;
- if (hasPrefix(key, "feature-") &&
- !hasPrefix(key, "feature-bitrate-modes")) {
- caps->addDetail(key.c_str(), hasPrefix(value, "1") ? 1 : 0);
- } else {
- caps->addDetail(key.c_str(), value.c_str());
+ C2Value::Primitive level = levelQuery[0].values.values.back();
+ pl.level = (C2Config::level_t)level.ref<uint32_t>();
+ ALOGV("supporting level: %u", pl.level);
+ int32_t sdkProfile, sdkLevel;
+ if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
+ && mapper->mapLevel(pl.level, &sdkLevel)) {
+ caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ // also list HDR profiles if component supports HDR
+ if (supportsHdr) {
+ auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(trait.mediaType);
+ if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
+ caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ }
+ if (supportsHdr10Plus) {
+ hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
+ trait.mediaType, true /*isHdr10Plus*/);
+ if (hdrMapper && hdrMapper->mapProfile(pl.profile, &sdkProfile)) {
+ caps->addProfileLevel((uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ }
}
}
- status_t err = queryOmxCapabilities(
- name.c_str(),
- mediaType.c_str(),
- encoder,
- caps.get());
- if (err != OK) {
- ALOGI("Failed to query capabilities for %s (media type: %s). Error: %d",
- name.c_str(),
- mediaType.c_str(),
- static_cast<int>(err));
+ } else if (!mapper) {
+ caps->addProfileLevel(pl.profile, pl.level);
+ }
+
+ // for H.263 also advertise the second highest level if the
+ // codec supports level 45, as level 45 only covers level 10
+ // TODO: move this to some form of a setting so it does not
+ // have to be here
+ if (mediaType == MIMETYPE_VIDEO_H263) {
+ C2Config::level_t nextLevel = C2Config::LEVEL_UNUSED;
+ for (C2Value::Primitive v : levelQuery[0].values.values) {
+ C2Config::level_t level = (C2Config::level_t)v.ref<uint32_t>();
+ if (level < C2Config::LEVEL_H263_45 && level > nextLevel) {
+ nextLevel = level;
+ }
}
+ if (nextLevel != C2Config::LEVEL_UNUSED
+ && nextLevel != pl.level
+ && mapper
+ && mapper->mapProfile(pl.profile, &sdkProfile)
+ && mapper->mapLevel(nextLevel, &sdkLevel)) {
+ caps->addProfileLevel(
+ (uint32_t)sdkProfile, (uint32_t)sdkLevel);
+ }
+ }
+ }
+}
+
+void addSupportedColorFormats(
+ std::shared_ptr<Codec2Client::Interface> intf,
+ MediaCodecInfo::CapabilitiesWriter *caps,
+ const Traits& trait, const std::string &mediaType) {
+ (void)intf;
+
+ // TODO: get this from intf() as well, but how do we map them to
+ // MediaCodec color formats?
+ bool encoder = trait.kind == C2Component::KIND_ENCODER;
+ if (mediaType.find("video") != std::string::npos) {
+ // vendor video codecs prefer opaque format
+ if (trait.name.find("android") == std::string::npos) {
+ caps->addColorFormat(COLOR_FormatSurface);
+ }
+ caps->addColorFormat(COLOR_FormatYUV420Flexible);
+ caps->addColorFormat(COLOR_FormatYUV420Planar);
+ caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
+ caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
+ caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
+ // framework video encoders must support surface format, though it is unclear
+ // that they will be able to map it if it is opaque
+ if (encoder && trait.name.find("android") != std::string::npos) {
+ caps->addColorFormat(COLOR_FormatSurface);
}
}
}
@@ -335,7 +219,7 @@
// properly. (Assume "full" behavior eventually.)
//
// debug.stagefright.ccodec supports 5 values.
- // 0 - Only OMX components are available.
+ // 0 - No Codec 2.0 components are available.
// 1 - Audio decoders and encoders with prefix "c2.android." are available
// and ranked first.
// All other components with prefix "c2.android." are available with
@@ -366,306 +250,156 @@
MediaCodecsXmlParser parser(
MediaCodecsXmlParser::defaultSearchDirs,
- option == 0 ? "media_codecs.xml" :
- "media_codecs_c2.xml",
- option == 0 ? "media_codecs_performance.xml" :
- "media_codecs_performance_c2.xml");
+ "media_codecs_c2.xml",
+ "media_codecs_performance_c2.xml");
if (parser.getParsingStatus() != OK) {
ALOGD("XML parser no good");
return OK;
}
- bool surfaceTest(Codec2Client::CreateInputSurface());
- if (option == 0 || !surfaceTest) {
- buildOmxInfo(parser, writer);
- }
-
for (const Traits& trait : traits) {
C2Component::rank_t rank = trait.rank;
- std::shared_ptr<Codec2Client::Interface> intf =
- Codec2Client::CreateInterfaceByName(trait.name.c_str());
- if (!intf || parser.getCodecMap().count(intf->getName()) == 0) {
- ALOGD("%s not found in xml", trait.name.c_str());
- continue;
- }
- std::string canonName = intf->getName();
-
- // TODO: Remove this block once all codecs are enabled by default.
- switch (option) {
- case 0:
- continue;
- case 1:
- if (hasPrefix(canonName, "c2.vda.")) {
- break;
+ // Interface must be accessible for us to list the component, and there also
+ // must be an XML entry for the codec. Codec aliases listed in the traits
+ // allow additional XML entries to be specified for each alias. These will
+ // be listed as separate codecs. If no XML entry is specified for an alias,
+ // those will be treated as an additional alias specified in the XML entry
+ // for the interface name.
+ std::vector<std::string> nameAndAliases = trait.aliases;
+ nameAndAliases.insert(nameAndAliases.begin(), trait.name);
+ for (const std::string &nameOrAlias : nameAndAliases) {
+ bool isAlias = trait.name != nameOrAlias;
+ std::shared_ptr<Codec2Client::Interface> intf =
+ Codec2Client::CreateInterfaceByName(nameOrAlias.c_str());
+ if (!intf) {
+ ALOGD("could not create interface for %s'%s'",
+ isAlias ? "alias " : "",
+ nameOrAlias.c_str());
+ continue;
}
- if (hasPrefix(canonName, "c2.android.")) {
- if (trait.domain == C2Component::DOMAIN_AUDIO) {
+ if (parser.getCodecMap().count(nameOrAlias) == 0) {
+ if (isAlias) {
+ std::unique_ptr<MediaCodecInfoWriter> baseCodecInfo =
+ writer->findMediaCodecInfo(trait.name.c_str());
+ if (!baseCodecInfo) {
+ ALOGD("alias '%s' not found in xml but canonical codec info '%s' missing",
+ nameOrAlias.c_str(),
+ trait.name.c_str());
+ } else {
+ ALOGD("alias '%s' not found in xml; use an XML <Alias> tag for this",
+ nameOrAlias.c_str());
+ // merge alias into existing codec
+ baseCodecInfo->addAlias(nameOrAlias.c_str());
+ }
+ } else {
+ ALOGD("component '%s' not found in xml", trait.name.c_str());
+ }
+ continue;
+ }
+ std::string canonName = trait.name;
+
+ // TODO: Remove this block once all codecs are enabled by default.
+ switch (option) {
+ case 0:
+ continue;
+ case 1:
+ if (hasPrefix(canonName, "c2.vda.")) {
+ break;
+ }
+ if (hasPrefix(canonName, "c2.android.")) {
+ if (trait.domain == C2Component::DOMAIN_AUDIO) {
+ rank = 1;
+ break;
+ }
+ break;
+ }
+ if (hasSuffix(canonName, ".avc.decoder") ||
+ hasSuffix(canonName, ".avc.encoder")) {
+ rank = std::numeric_limits<decltype(rank)>::max();
+ break;
+ }
+ continue;
+ case 2:
+ if (hasPrefix(canonName, "c2.vda.")) {
+ break;
+ }
+ if (hasPrefix(canonName, "c2.android.")) {
rank = 1;
break;
}
+ if (hasSuffix(canonName, ".avc.decoder") ||
+ hasSuffix(canonName, ".avc.encoder")) {
+ rank = std::numeric_limits<decltype(rank)>::max();
+ break;
+ }
+ continue;
+ case 3:
+ if (hasPrefix(canonName, "c2.android.")) {
+ rank = 1;
+ }
break;
}
- if (hasSuffix(canonName, ".avc.decoder") ||
- hasSuffix(canonName, ".avc.encoder")) {
- rank = std::numeric_limits<decltype(rank)>::max();
- break;
- }
- continue;
- case 2:
- if (hasPrefix(canonName, "c2.vda.")) {
- break;
- }
- if (hasPrefix(canonName, "c2.android.")) {
- rank = 1;
- break;
- }
- if (hasSuffix(canonName, ".avc.decoder") ||
- hasSuffix(canonName, ".avc.encoder")) {
- rank = std::numeric_limits<decltype(rank)>::max();
- break;
- }
- continue;
- case 3:
- if (hasPrefix(canonName, "c2.android.")) {
- rank = 1;
- }
- break;
- }
- ALOGV("canonName = %s", canonName.c_str());
- std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
- codecInfo->setName(trait.name.c_str());
- codecInfo->setOwner(("codec2::" + trait.owner).c_str());
- const MediaCodecsXmlParser::CodecProperties &codec = parser.getCodecMap().at(canonName);
+ ALOGV("adding codec entry for '%s'", nameOrAlias.c_str());
+ std::unique_ptr<MediaCodecInfoWriter> codecInfo = writer->addMediaCodecInfo();
+ codecInfo->setName(nameOrAlias.c_str());
+ codecInfo->setOwner(("codec2::" + trait.owner).c_str());
+ const MediaCodecsXmlParser::CodecProperties &codec =
+ parser.getCodecMap().at(nameOrAlias);
- bool encoder = trait.kind == C2Component::KIND_ENCODER;
- typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+ bool encoder = trait.kind == C2Component::KIND_ENCODER;
+ typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
- if (encoder) {
- attrs |= MediaCodecInfo::kFlagIsEncoder;
- }
- if (trait.owner == "software") {
- attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
- } else {
- attrs |= MediaCodecInfo::kFlagIsVendor;
- if (trait.owner == "vendor-software") {
+ if (encoder) {
+ attrs |= MediaCodecInfo::kFlagIsEncoder;
+ }
+ if (trait.owner == "software") {
attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
- } else if (codec.quirkSet.find("attribute::software-codec") == codec.quirkSet.end()) {
- attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
- }
- }
- codecInfo->setAttributes(attrs);
- codecInfo->setRank(rank);
-
- for (const std::string &alias : codec.aliases) {
- codecInfo->addAlias(alias.c_str());
- }
-
- for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
- const std::string &mediaType = typeIt->first;
- const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
- std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
- codecInfo->addMediaType(mediaType.c_str());
- for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
- std::string key, value;
- std::tie(key, value) = *attrIt;
- if (key.find("feature-") == 0 && key.find("feature-bitrate-modes") != 0) {
- caps->addDetail(key.c_str(), std::stoi(value));
- } else {
- caps->addDetail(key.c_str(), value.c_str());
+ } else {
+ attrs |= MediaCodecInfo::kFlagIsVendor;
+ if (trait.owner == "vendor-software") {
+ attrs |= MediaCodecInfo::kFlagIsSoftwareOnly;
+ } else if (codec.quirkSet.find("attribute::software-codec")
+ == codec.quirkSet.end()) {
+ attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
}
}
-
- bool gotProfileLevels = false;
- if (intf) {
- std::shared_ptr<C2Mapper::ProfileLevelMapper> mapper =
- C2Mapper::GetProfileLevelMapper(trait.mediaType);
- // if we don't know the media type, pass through all values unmapped
-
- // TODO: we cannot find levels that are local 'maxima' without knowing the coding
- // e.g. H.263 level 45 and level 30 could be two values for highest level as
- // they don't include one another. For now we use the last supported value.
- C2StreamProfileLevelInfo pl(encoder /* output */, 0u);
- std::vector<C2FieldSupportedValuesQuery> profileQuery = {
- C2FieldSupportedValuesQuery::Possible(C2ParamField(&pl, &pl.profile))
- };
-
- c2_status_t err = intf->querySupportedValues(profileQuery, C2_DONT_BLOCK);
- ALOGV("query supported profiles -> %s | %s",
- asString(err), asString(profileQuery[0].status));
- if (err == C2_OK && profileQuery[0].status == C2_OK) {
- if (profileQuery[0].values.type == C2FieldSupportedValues::VALUES) {
- std::vector<std::shared_ptr<C2ParamDescriptor>> paramDescs;
- c2_status_t err1 = intf->querySupportedParams(¶mDescs);
- bool isHdr = false, isHdr10Plus = false;
- if (err1 == C2_OK) {
- for (const std::shared_ptr<C2ParamDescriptor> &desc : paramDescs) {
- if ((uint32_t)desc->index() ==
- C2StreamHdr10PlusInfo::output::PARAM_TYPE) {
- isHdr10Plus = true;
- } else if ((uint32_t)desc->index() ==
- C2StreamHdrStaticInfo::output::PARAM_TYPE) {
- isHdr = true;
- }
- }
- }
- // For VP9, the static info is always propagated by framework.
- isHdr |= (mediaType == MIMETYPE_VIDEO_VP9);
-
- for (C2Value::Primitive profile : profileQuery[0].values.values) {
- pl.profile = (C2Config::profile_t)profile.ref<uint32_t>();
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- err = intf->config({&pl}, C2_DONT_BLOCK, &failures);
- ALOGV("set profile to %u -> %s", pl.profile, asString(err));
- std::vector<C2FieldSupportedValuesQuery> levelQuery = {
- C2FieldSupportedValuesQuery::Current(C2ParamField(&pl, &pl.level))
- };
- err = intf->querySupportedValues(levelQuery, C2_DONT_BLOCK);
- ALOGV("query supported levels -> %s | %s",
- asString(err), asString(levelQuery[0].status));
- if (err == C2_OK && levelQuery[0].status == C2_OK) {
- if (levelQuery[0].values.type == C2FieldSupportedValues::VALUES
- && levelQuery[0].values.values.size() > 0) {
- C2Value::Primitive level = levelQuery[0].values.values.back();
- pl.level = (C2Config::level_t)level.ref<uint32_t>();
- ALOGV("supporting level: %u", pl.level);
- int32_t sdkProfile, sdkLevel;
- if (mapper && mapper->mapProfile(pl.profile, &sdkProfile)
- && mapper->mapLevel(pl.level, &sdkLevel)) {
- caps->addProfileLevel(
- (uint32_t)sdkProfile, (uint32_t)sdkLevel);
- gotProfileLevels = true;
- if (isHdr) {
- auto hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
- trait.mediaType);
- if (hdrMapper && hdrMapper->mapProfile(
- pl.profile, &sdkProfile)) {
- caps->addProfileLevel(
- (uint32_t)sdkProfile,
- (uint32_t)sdkLevel);
- }
- if (isHdr10Plus) {
- hdrMapper = C2Mapper::GetHdrProfileLevelMapper(
- trait.mediaType, true /*isHdr10Plus*/);
- if (hdrMapper && hdrMapper->mapProfile(
- pl.profile, &sdkProfile)) {
- caps->addProfileLevel(
- (uint32_t)sdkProfile,
- (uint32_t)sdkLevel);
- }
- }
- }
- } else if (!mapper) {
- caps->addProfileLevel(pl.profile, pl.level);
- gotProfileLevels = true;
- }
-
- // for H.263 also advertise the second highest level if the
- // codec supports level 45, as level 45 only covers level 10
- // TODO: move this to some form of a setting so it does not
- // have to be here
- if (mediaType == MIMETYPE_VIDEO_H263) {
- C2Config::level_t nextLevel = C2Config::LEVEL_UNUSED;
- for (C2Value::Primitive v : levelQuery[0].values.values) {
- C2Config::level_t level =
- (C2Config::level_t)v.ref<uint32_t>();
- if (level < C2Config::LEVEL_H263_45
- && level > nextLevel) {
- nextLevel = level;
- }
- }
- if (nextLevel != C2Config::LEVEL_UNUSED
- && nextLevel != pl.level
- && mapper
- && mapper->mapProfile(pl.profile, &sdkProfile)
- && mapper->mapLevel(nextLevel, &sdkLevel)) {
- caps->addProfileLevel(
- (uint32_t)sdkProfile, (uint32_t)sdkLevel);
- }
- }
- }
- }
- }
- }
+ codecInfo->setAttributes(attrs);
+ if (!codec.rank.empty()) {
+ uint32_t xmlRank;
+ char dummy;
+ if (sscanf(codec.rank.c_str(), "%u%c", &xmlRank, &dummy) == 1) {
+ rank = xmlRank;
}
}
+ codecInfo->setRank(rank);
- if (!gotProfileLevels) {
- if (mediaType == MIMETYPE_VIDEO_VP9) {
- if (encoder) {
- caps->addProfileLevel(VP9Profile0, VP9Level41);
- } else {
- caps->addProfileLevel(VP9Profile0, VP9Level5);
- caps->addProfileLevel(VP9Profile2, VP9Level5);
- caps->addProfileLevel(VP9Profile2HDR, VP9Level5);
- }
- } else if (mediaType == MIMETYPE_VIDEO_AV1 && !encoder) {
- caps->addProfileLevel(AV1Profile0, AV1Level2);
- caps->addProfileLevel(AV1Profile0, AV1Level21);
- caps->addProfileLevel(AV1Profile1, AV1Level22);
- caps->addProfileLevel(AV1Profile1, AV1Level3);
- caps->addProfileLevel(AV1Profile2, AV1Level31);
- caps->addProfileLevel(AV1Profile2, AV1Level32);
- } else if (mediaType == MIMETYPE_VIDEO_HEVC && !encoder) {
- caps->addProfileLevel(HEVCProfileMain, HEVCMainTierLevel51);
- caps->addProfileLevel(HEVCProfileMainStill, HEVCMainTierLevel51);
- } else if (mediaType == MIMETYPE_VIDEO_VP8) {
- if (encoder) {
- caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
- } else {
- caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
- }
- } else if (mediaType == MIMETYPE_VIDEO_AVC) {
- if (encoder) {
- caps->addProfileLevel(AVCProfileBaseline, AVCLevel41);
-// caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel41);
- caps->addProfileLevel(AVCProfileMain, AVCLevel41);
- } else {
- caps->addProfileLevel(AVCProfileBaseline, AVCLevel52);
- caps->addProfileLevel(AVCProfileConstrainedBaseline, AVCLevel52);
- caps->addProfileLevel(AVCProfileMain, AVCLevel52);
- caps->addProfileLevel(AVCProfileConstrainedHigh, AVCLevel52);
- caps->addProfileLevel(AVCProfileHigh, AVCLevel52);
- }
- } else if (mediaType == MIMETYPE_VIDEO_MPEG4) {
- if (encoder) {
- caps->addProfileLevel(MPEG4ProfileSimple, MPEG4Level2);
- } else {
- caps->addProfileLevel(MPEG4ProfileSimple, MPEG4Level3);
- }
- } else if (mediaType == MIMETYPE_VIDEO_H263) {
- if (encoder) {
- caps->addProfileLevel(H263ProfileBaseline, H263Level45);
- } else {
- caps->addProfileLevel(H263ProfileBaseline, H263Level30);
- caps->addProfileLevel(H263ProfileBaseline, H263Level45);
- caps->addProfileLevel(H263ProfileISWV2, H263Level30);
- caps->addProfileLevel(H263ProfileISWV2, H263Level45);
- }
- } else if (mediaType == MIMETYPE_VIDEO_MPEG2 && !encoder) {
- caps->addProfileLevel(MPEG2ProfileSimple, MPEG2LevelHL);
- caps->addProfileLevel(MPEG2ProfileMain, MPEG2LevelHL);
- }
+ for (const std::string &alias : codec.aliases) {
+ ALOGV("adding alias '%s'", alias.c_str());
+ codecInfo->addAlias(alias.c_str());
}
- // TODO: get this from intf() as well, but how do we map them to
- // MediaCodec color formats?
- if (mediaType.find("video") != std::string::npos) {
- // vendor video codecs prefer opaque format
- if (trait.name.find("android") == std::string::npos) {
- caps->addColorFormat(COLOR_FormatSurface);
+ for (auto typeIt = codec.typeMap.begin(); typeIt != codec.typeMap.end(); ++typeIt) {
+ const std::string &mediaType = typeIt->first;
+ const MediaCodecsXmlParser::AttributeMap &attrMap = typeIt->second;
+ std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
+ codecInfo->addMediaType(mediaType.c_str());
+ for (auto attrIt = attrMap.begin(); attrIt != attrMap.end(); ++attrIt) {
+ std::string key, value;
+ std::tie(key, value) = *attrIt;
+ if (key.find("feature-") == 0 && key.find("feature-bitrate-modes") != 0) {
+ int32_t intValue = 0;
+ // Ignore trailing bad characters and default to 0.
+ (void)sscanf(value.c_str(), "%d", &intValue);
+ caps->addDetail(key.c_str(), intValue);
+ } else {
+ caps->addDetail(key.c_str(), value.c_str());
+ }
}
- caps->addColorFormat(COLOR_FormatYUV420Flexible);
- caps->addColorFormat(COLOR_FormatYUV420Planar);
- caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
- caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
- caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
- // framework video encoders must support surface format, though it is unclear
- // that they will be able to map it if it is opaque
- if (encoder && trait.name.find("android") != std::string::npos) {
- caps->addColorFormat(COLOR_FormatSurface);
- }
+
+ addSupportedProfileLevels(intf, caps.get(), trait, mediaType);
+ addSupportedColorFormats(intf, caps.get(), trait, mediaType);
}
}
}
@@ -677,4 +411,3 @@
extern "C" android::MediaCodecListBuilderBase *CreateBuilder() {
return new android::Codec2InfoBuilder;
}
-
diff --git a/media/codec2/sfplugin/PipelineWatcher.cpp b/media/codec2/sfplugin/PipelineWatcher.cpp
new file mode 100644
index 0000000..df81d49
--- /dev/null
+++ b/media/codec2/sfplugin/PipelineWatcher.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PipelineWatcher"
+
+#include <numeric>
+
+#include <log/log.h>
+
+#include "PipelineWatcher.h"
+
+namespace android {
+
+PipelineWatcher &PipelineWatcher::inputDelay(uint32_t value) {
+ mInputDelay = value;
+ return *this;
+}
+
+PipelineWatcher &PipelineWatcher::pipelineDelay(uint32_t value) {
+ mPipelineDelay = value;
+ return *this;
+}
+
+PipelineWatcher &PipelineWatcher::outputDelay(uint32_t value) {
+ mOutputDelay = value;
+ return *this;
+}
+
+PipelineWatcher &PipelineWatcher::smoothnessFactor(uint32_t value) {
+ mSmoothnessFactor = value;
+ return *this;
+}
+
+void PipelineWatcher::onWorkQueued(
+ uint64_t frameIndex,
+ std::vector<std::shared_ptr<C2Buffer>> &&buffers,
+ const Clock::time_point &queuedAt) {
+ ALOGV("onWorkQueued(frameIndex=%llu, buffers(size=%zu), queuedAt=%lld)",
+ (unsigned long long)frameIndex,
+ buffers.size(),
+ (long long)queuedAt.time_since_epoch().count());
+ auto it = mFramesInPipeline.find(frameIndex);
+ if (it != mFramesInPipeline.end()) {
+ ALOGD("onWorkQueued: Duplicate frame index (%llu); previous entry removed",
+ (unsigned long long)frameIndex);
+ (void)mFramesInPipeline.erase(it);
+ }
+ (void)mFramesInPipeline.try_emplace(frameIndex, std::move(buffers), queuedAt);
+}
+
+std::shared_ptr<C2Buffer> PipelineWatcher::onInputBufferReleased(
+ uint64_t frameIndex, size_t arrayIndex) {
+ ALOGV("onInputBufferReleased(frameIndex=%llu, arrayIndex=%zu)",
+ (unsigned long long)frameIndex, arrayIndex);
+ auto it = mFramesInPipeline.find(frameIndex);
+ if (it == mFramesInPipeline.end()) {
+ ALOGD("onInputBufferReleased: frameIndex not found (%llu); ignored",
+ (unsigned long long)frameIndex);
+ return nullptr;
+ }
+ if (it->second.buffers.size() <= arrayIndex) {
+ ALOGD("onInputBufferReleased: buffers at %llu: size %zu, requested index: %zu",
+ (unsigned long long)frameIndex, it->second.buffers.size(), arrayIndex);
+ return nullptr;
+ }
+ std::shared_ptr<C2Buffer> buffer(std::move(it->second.buffers[arrayIndex]));
+ ALOGD_IF(!buffer, "onInputBufferReleased: buffer already released (%llu:%zu)",
+ (unsigned long long)frameIndex, arrayIndex);
+ return buffer;
+}
+
+void PipelineWatcher::onWorkDone(uint64_t frameIndex) {
+ ALOGV("onWorkDone(frameIndex=%llu)", (unsigned long long)frameIndex);
+ auto it = mFramesInPipeline.find(frameIndex);
+ if (it == mFramesInPipeline.end()) {
+ ALOGD("onWorkDone: frameIndex not found (%llu); ignored",
+ (unsigned long long)frameIndex);
+ return;
+ }
+ (void)mFramesInPipeline.erase(it);
+}
+
+void PipelineWatcher::flush() {
+ mFramesInPipeline.clear();
+}
+
+bool PipelineWatcher::pipelineFull() const {
+ if (mFramesInPipeline.size() >=
+ mInputDelay + mPipelineDelay + mOutputDelay + mSmoothnessFactor) {
+ ALOGV("pipelineFull: too many frames in pipeline (%zu)", mFramesInPipeline.size());
+ return true;
+ }
+ size_t sizeWithInputReleased = std::count_if(
+ mFramesInPipeline.begin(),
+ mFramesInPipeline.end(),
+ [](const decltype(mFramesInPipeline)::value_type &value) {
+ for (const std::shared_ptr<C2Buffer> &buffer : value.second.buffers) {
+ if (buffer) {
+ return false;
+ }
+ }
+ return true;
+ });
+ if (sizeWithInputReleased >=
+ mPipelineDelay + mOutputDelay + mSmoothnessFactor) {
+ ALOGV("pipelineFull: too many frames in pipeline, with input released (%zu)",
+ sizeWithInputReleased);
+ return true;
+ }
+ ALOGV("pipeline has room (total: %zu, input released: %zu)",
+ mFramesInPipeline.size(), sizeWithInputReleased);
+ return false;
+}
+
+PipelineWatcher::Clock::duration PipelineWatcher::elapsed(
+ const PipelineWatcher::Clock::time_point &now, size_t n) const {
+ if (mFramesInPipeline.size() <= n) {
+ return Clock::duration::zero();
+ }
+ std::vector<Clock::duration> durations;
+ for (const decltype(mFramesInPipeline)::value_type &value : mFramesInPipeline) {
+ Clock::duration elapsed = now - value.second.queuedAt;
+ ALOGV("elapsed: frameIndex = %llu elapsed = %lldms",
+ (unsigned long long)value.first,
+ std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count());
+ durations.push_back(elapsed);
+ }
+ std::nth_element(durations.begin(), durations.end(), durations.begin() + n,
+ std::greater<Clock::duration>());
+ return durations[n];
+}
+
+} // namespace android
diff --git a/media/codec2/sfplugin/PipelineWatcher.h b/media/codec2/sfplugin/PipelineWatcher.h
new file mode 100644
index 0000000..1e23147
--- /dev/null
+++ b/media/codec2/sfplugin/PipelineWatcher.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PIPELINE_WATCHER_H_
+#define PIPELINE_WATCHER_H_
+
+#include <chrono>
+#include <map>
+#include <memory>
+
+#include <C2Work.h>
+
+namespace android {
+
+/**
+ * PipelineWatcher watches the pipeline and infers the status of work items from
+ * events.
+ */
+class PipelineWatcher {
+public:
+ typedef std::chrono::steady_clock Clock;
+
+ PipelineWatcher()
+ : mInputDelay(0),
+ mPipelineDelay(0),
+ mOutputDelay(0),
+ mSmoothnessFactor(0) {}
+ ~PipelineWatcher() = default;
+
+ /**
+ * \param value the new input delay value
+ * \return this object
+ */
+ PipelineWatcher &inputDelay(uint32_t value);
+
+ /**
+ * \param value the new pipeline delay value
+ * \return this object
+ */
+ PipelineWatcher &pipelineDelay(uint32_t value);
+
+ /**
+ * \param value the new output delay value
+ * \return this object
+ */
+ PipelineWatcher &outputDelay(uint32_t value);
+
+ /**
+ * \param value the new smoothness factor value
+ * \return this object
+ */
+ PipelineWatcher &smoothnessFactor(uint32_t value);
+
+ /**
+ * Client queued a work item to the component.
+ *
+ * \param frameIndex input frame index of this work
+ * \param buffers input buffers of the queued work item
+ * \param queuedAt time when the client queued the buffer
+ */
+ void onWorkQueued(
+ uint64_t frameIndex,
+ std::vector<std::shared_ptr<C2Buffer>> &&buffers,
+ const Clock::time_point &queuedAt);
+
+ /**
+ * The component released input buffers from a work item.
+ *
+ * \param frameIndex input frame index
+ * \param arrayIndex index of the buffer at the original |buffers| in
+ * onWorkQueued().
+ * \return buffers[arrayIndex]
+ */
+ std::shared_ptr<C2Buffer> onInputBufferReleased(
+ uint64_t frameIndex, size_t arrayIndex);
+
+ /**
+ * The component finished processing a work item.
+ *
+ * \param frameIndex input frame index
+ */
+ void onWorkDone(uint64_t frameIndex);
+
+ /**
+ * Flush the pipeline.
+ */
+ void flush();
+
+ /**
+ * \return true if pipeline does not need more work items to proceed
+ * smoothly, considering delays and smoothness factor;
+ * false otherwise.
+ */
+ bool pipelineFull() const;
+
+ /**
+ * Return elapsed processing time of a work item, nth from the longest
+ * processing time to the shortest.
+ *
+ * \param now current timestamp
+ * \param n nth work item, from the longest processing time to the
+ * shortest. It's a 0-based index.
+ * \return elapsed processing time of nth work item.
+ */
+ Clock::duration elapsed(const Clock::time_point &now, size_t n) const;
+
+private:
+ uint32_t mInputDelay;
+ uint32_t mPipelineDelay;
+ uint32_t mOutputDelay;
+ uint32_t mSmoothnessFactor;
+
+ struct Frame {
+ Frame(std::vector<std::shared_ptr<C2Buffer>> &&b,
+ const Clock::time_point &q)
+ : buffers(b),
+ queuedAt(q) {}
+ std::vector<std::shared_ptr<C2Buffer>> buffers;
+ const Clock::time_point queuedAt;
+ };
+ std::map<uint64_t, Frame> mFramesInPipeline;
+};
+
+} // namespace android
+
+#endif // PIPELINE_WATCHER_H_
diff --git a/media/codec2/sfplugin/SkipCutBuffer.cpp b/media/codec2/sfplugin/SkipCutBuffer.cpp
index 5762440..8d1de65 100644
--- a/media/codec2/sfplugin/SkipCutBuffer.cpp
+++ b/media/codec2/sfplugin/SkipCutBuffer.cpp
@@ -20,7 +20,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/SkipCutBuffer.h>
+#include "SkipCutBuffer.h"
namespace android {
diff --git a/media/codec2/sfplugin/utils/Android.bp b/media/codec2/sfplugin/utils/Android.bp
index eb6c3e9..8c8f025 100644
--- a/media/codec2/sfplugin/utils/Android.bp
+++ b/media/codec2/sfplugin/utils/Android.bp
@@ -26,6 +26,10 @@
"libutils",
],
+ static_libs: [
+ "libyuv_static",
+ ],
+
sanitize: {
cfi: true,
misc_undefined: [
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 84d22a3..6b8663f 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "Codec2BufferUtils"
#include <utils/Log.h>
+#include <libyuv.h>
+
#include <list>
#include <mutex>
@@ -62,14 +64,10 @@
*/
template<bool ToMediaImage, typename View, typename ImagePixel>
static status_t _ImageCopy(View &view, const MediaImage2 *img, ImagePixel *imgBase) {
- // TODO: more efficient copying --- e.g. one row at a time, copying
- // interleaved planes together, etc.
+ // TODO: more efficient copying --- e.g. copy interleaved planes together, etc.
const C2PlanarLayout &layout = view.layout();
const size_t bpp = divUp(img->mBitDepthAllocated, 8u);
- if (view.width() != img->mWidth
- || view.height() != img->mHeight) {
- return BAD_VALUE;
- }
+
for (uint32_t i = 0; i < layout.numPlanes; ++i) {
typename std::conditional<ToMediaImage, uint8_t, const uint8_t>::type *imgRow =
imgBase + img->mPlane[i].mOffset;
@@ -120,10 +118,72 @@
} // namespace
status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
+ if (view.width() != img->mWidth || view.height() != img->mHeight) {
+ return BAD_VALUE;
+ }
+ if ((IsNV12(view) && IsI420(img)) || (IsI420(view) && IsNV12(img))) {
+ // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
+ const uint8_t* src_y = view.data()[0];
+ const uint8_t* src_u = view.data()[1];
+ const uint8_t* src_v = view.data()[2];
+ int32_t src_stride_y = view.layout().planes[0].rowInc;
+ int32_t src_stride_u = view.layout().planes[1].rowInc;
+ int32_t src_stride_v = view.layout().planes[2].rowInc;
+ uint8_t* dst_y = imgBase + img->mPlane[0].mOffset;
+ uint8_t* dst_u = imgBase + img->mPlane[1].mOffset;
+ uint8_t* dst_v = imgBase + img->mPlane[2].mOffset;
+ int32_t dst_stride_y = img->mPlane[0].mRowInc;
+ int32_t dst_stride_u = img->mPlane[1].mRowInc;
+ int32_t dst_stride_v = img->mPlane[2].mRowInc;
+ if (IsNV12(view) && IsI420(img)) {
+ if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, view.width(),
+ view.height())) {
+ return OK;
+ }
+ } else {
+ if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, view.width(),
+ view.height())) {
+ return OK;
+ }
+ }
+ }
return _ImageCopy<true>(view, img, imgBase);
}
status_t ImageCopy(C2GraphicView &view, const uint8_t *imgBase, const MediaImage2 *img) {
+ if (view.width() != img->mWidth || view.height() != img->mHeight) {
+ return BAD_VALUE;
+ }
+ if ((IsNV12(img) && IsI420(view)) || (IsI420(img) && IsNV12(view))) {
+ // Take shortcuts to use libyuv functions between NV12 and I420 conversion.
+ const uint8_t* src_y = imgBase + img->mPlane[0].mOffset;
+ const uint8_t* src_u = imgBase + img->mPlane[1].mOffset;
+ const uint8_t* src_v = imgBase + img->mPlane[2].mOffset;
+ int32_t src_stride_y = img->mPlane[0].mRowInc;
+ int32_t src_stride_u = img->mPlane[1].mRowInc;
+ int32_t src_stride_v = img->mPlane[2].mRowInc;
+ uint8_t* dst_y = view.data()[0];
+ uint8_t* dst_u = view.data()[1];
+ uint8_t* dst_v = view.data()[2];
+ int32_t dst_stride_y = view.layout().planes[0].rowInc;
+ int32_t dst_stride_u = view.layout().planes[1].rowInc;
+ int32_t dst_stride_v = view.layout().planes[2].rowInc;
+ if (IsNV12(img) && IsI420(view)) {
+ if (!libyuv::NV12ToI420(src_y, src_stride_y, src_u, src_stride_u, dst_y, dst_stride_y,
+ dst_u, dst_stride_u, dst_v, dst_stride_v, view.width(),
+ view.height())) {
+ return OK;
+ }
+ } else {
+ if (!libyuv::I420ToNV12(src_y, src_stride_y, src_u, src_stride_u, src_v, src_stride_v,
+ dst_y, dst_stride_y, dst_u, dst_stride_u, view.width(),
+ view.height())) {
+ return OK;
+ }
+ }
+ }
return _ImageCopy<false>(view, img, imgBase);
}
@@ -151,6 +211,65 @@
&& layout.planes[layout.PLANE_V].rowSampling == 2);
}
+bool IsNV12(const C2GraphicView &view) {
+ if (!IsYUV420(view)) {
+ return false;
+ }
+ const C2PlanarLayout &layout = view.layout();
+ return (layout.rootPlanes == 2
+ && layout.planes[layout.PLANE_U].colInc == 2
+ && layout.planes[layout.PLANE_U].rootIx == layout.PLANE_U
+ && layout.planes[layout.PLANE_U].offset == 0
+ && layout.planes[layout.PLANE_V].colInc == 2
+ && layout.planes[layout.PLANE_V].rootIx == layout.PLANE_U
+ && layout.planes[layout.PLANE_V].offset == 1);
+}
+
+bool IsI420(const C2GraphicView &view) {
+ if (!IsYUV420(view)) {
+ return false;
+ }
+ const C2PlanarLayout &layout = view.layout();
+ return (layout.rootPlanes == 3
+ && layout.planes[layout.PLANE_U].colInc == 1
+ && layout.planes[layout.PLANE_U].rootIx == layout.PLANE_U
+ && layout.planes[layout.PLANE_U].offset == 0
+ && layout.planes[layout.PLANE_V].colInc == 1
+ && layout.planes[layout.PLANE_V].rootIx == layout.PLANE_V
+ && layout.planes[layout.PLANE_V].offset == 0);
+}
+
+bool IsYUV420(const MediaImage2 *img) {
+ return (img->mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV
+ && img->mNumPlanes == 3
+ && img->mBitDepth == 8
+ && img->mBitDepthAllocated == 8
+ && img->mPlane[0].mHorizSubsampling == 1
+ && img->mPlane[0].mVertSubsampling == 1
+ && img->mPlane[1].mHorizSubsampling == 2
+ && img->mPlane[1].mVertSubsampling == 2
+ && img->mPlane[2].mHorizSubsampling == 2
+ && img->mPlane[2].mVertSubsampling == 2);
+}
+
+bool IsNV12(const MediaImage2 *img) {
+ if (!IsYUV420(img)) {
+ return false;
+ }
+ return (img->mPlane[1].mColInc == 2
+ && img->mPlane[2].mColInc == 2
+ && (img->mPlane[2].mOffset - img->mPlane[1].mOffset == 1));
+}
+
+bool IsI420(const MediaImage2 *img) {
+ if (!IsYUV420(img)) {
+ return false;
+ }
+ return (img->mPlane[1].mColInc == 1
+ && img->mPlane[2].mColInc == 1
+ && img->mPlane[2].mOffset > img->mPlane[1].mOffset);
+}
+
MediaImage2 CreateYUV420PlanarMediaImage2(
uint32_t width, uint32_t height, uint32_t stride, uint32_t vstride) {
return MediaImage2 {
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index eaf6776..afadf00 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -91,6 +91,31 @@
bool IsYUV420(const C2GraphicView &view);
/**
+ * Returns true iff a view has a NV12 layout.
+ */
+bool IsNV12(const C2GraphicView &view);
+
+/**
+ * Returns true iff a view has a I420 layout.
+ */
+bool IsI420(const C2GraphicView &view);
+
+/**
+ * Returns true iff a MediaImage2 has a YUV 420 888 layout.
+ */
+bool IsYUV420(const MediaImage2 *img);
+
+/**
+ * Returns true iff a MediaImage2 has a NV12 layout.
+ */
+bool IsNV12(const MediaImage2 *img);
+
+/**
+ * Returns true iff a MediaImage2 has a I420 layout.
+ */
+bool IsI420(const MediaImage2 *img);
+
+/**
* A raw memory block to use for internal buffers.
*
* TODO: replace this with C2LinearBlocks from a private C2BlockPool
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index c369e16..6da131f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -65,7 +65,9 @@
{ C2Config::LEVEL_AVC_5, AVCLevel5 },
{ C2Config::LEVEL_AVC_5_1, AVCLevel51 },
{ C2Config::LEVEL_AVC_5_2, AVCLevel52 },
-
+ { C2Config::LEVEL_AVC_6, AVCLevel6 },
+ { C2Config::LEVEL_AVC_6_1, AVCLevel61 },
+ { C2Config::LEVEL_AVC_6_2, AVCLevel62 },
};
ALookup<C2Config::profile_t, int32_t> sAvcProfiles = {
@@ -99,7 +101,7 @@
{ C2Color::MATRIX_BT709, ColorAspects::MatrixBT709_5 },
{ C2Color::MATRIX_FCC47_73_682, ColorAspects::MatrixBT470_6M },
{ C2Color::MATRIX_BT601, ColorAspects::MatrixBT601_6 },
- { C2Color::MATRIX_SMPTE240M, ColorAspects::MatrixSMPTE240M },
+ { C2Color::MATRIX_240M, ColorAspects::MatrixSMPTE240M },
{ C2Color::MATRIX_BT2020, ColorAspects::MatrixBT2020 },
{ C2Color::MATRIX_BT2020_CONSTANT, ColorAspects::MatrixBT2020Constant },
{ C2Color::MATRIX_OTHER, ColorAspects::MatrixOther },
@@ -853,19 +855,19 @@
switch (primaries) {
case C2Color::PRIMARIES_BT601_525:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_525;
break;
case C2Color::PRIMARIES_BT601_625:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_625;
break;
case C2Color::PRIMARIES_BT2020:
- *dataSpace |= (matrix == C2Color::MATRIX_BT2020CONSTANT
+ *dataSpace |= (matrix == C2Color::MATRIX_BT2020_CONSTANT
? HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE
: HAL_DATASPACE_STANDARD_BT2020);
break;
diff --git a/media/codec2/tests/C2ComponentInterface_test.cpp b/media/codec2/tests/C2ComponentInterface_test.cpp
index e907964..67f733d 100644
--- a/media/codec2/tests/C2ComponentInterface_test.cpp
+++ b/media/codec2/tests/C2ComponentInterface_test.cpp
@@ -182,9 +182,9 @@
return std::make_unique<T>();
}
-template <> std::unique_ptr<C2PortMimeConfig::input> makeParam() {
+template <> std::unique_ptr<C2PortMediaTypeSetting::input> makeParam() {
// TODO(hiroh): Set more precise length.
- return C2PortMimeConfig::input::AllocUnique(100);
+ return C2PortMediaTypeSetting::input::AllocUnique(100);
}
#define TRACED_FAILURE(func) \
@@ -323,17 +323,17 @@
EXPECT_EQ(C2SettingResult::BAD_VALUE, failures[0]->failure);
}
-// There is only used enum type for the field type, that is C2DomainKind.
+// There is only used enum type for the field type, that is C2Component::domain_t.
// If another field type is added, it is necessary to add function for that.
template <>
void C2CompIntfTest::getTestValues(
const C2FieldSupportedValues &validValueInfos,
- std::vector<C2DomainKind> *const validValues,
- std::vector<C2DomainKind> *const invalidValues) {
+ std::vector<C2Component::domain_t> *const validValues,
+ std::vector<C2Component::domain_t> *const invalidValues) {
UNUSED(validValueInfos);
- validValues->emplace_back(C2DomainVideo);
- validValues->emplace_back(C2DomainAudio);
- validValues->emplace_back(C2DomainOther);
+ validValues->emplace_back(C2Component::DOMAIN_VIDEO);
+ validValues->emplace_back(C2Component::DOMAIN_AUDIO);
+ validValues->emplace_back(C2Component::DOMAIN_OTHER);
// There is no invalid value.
UNUSED(invalidValues);
@@ -634,20 +634,20 @@
std::vector<std::shared_ptr<C2ParamDescriptor>> supportedParams;
ASSERT_EQ(C2_OK, mIntf->querySupportedParams_nb(&supportedParams));
- EACH_TEST_SELF(C2ComponentLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentTemporalInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ActualPipelineDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentAttributesSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentDomainInfo, TEST_ENUM_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentDomainSetting, TEST_ENUM_WRITABLE_FIELD);
// TODO(hiroh): Support parameters based on uint32_t[] and char[].
- // EACH_TEST_INPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
- // EACH_TEST_OUTPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_INPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_OUTPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_INPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_OUTPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
@@ -656,10 +656,10 @@
// EACH_TEST_SELF(C2ReadOnlyParamsInfo, TEST_U32ARRAY_WRITABLE_FIELD);
// EACH_TEST_SELF(C2RequestedInfosInfo, TEST_U32ARRAY_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_INPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_OUTPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
diff --git a/media/codec2/tests/C2SampleComponent_test.cpp b/media/codec2/tests/C2SampleComponent_test.cpp
index cd354ad..9956834 100644
--- a/media/codec2/tests/C2SampleComponent_test.cpp
+++ b/media/codec2/tests/C2SampleComponent_test.cpp
@@ -152,7 +152,7 @@
std::unordered_map<uint32_t, C2Param &> mMyParams;
- C2ComponentDomainInfo mDomainInfo;
+ C2ComponentDomainSetting mDomainInfo;
MyComponentInstance() {
mMyParams.insert({mDomainInfo.index(), mDomainInfo});
@@ -187,12 +187,12 @@
c2_blocking_t mayBlock) const override {
(void)mayBlock;
for (C2FieldSupportedValuesQuery &query : fields) {
- if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::value)) {
+ if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainSetting::value)) {
query.values = C2FieldSupportedValues(
false /* flag */,
&mDomainInfo.value
//,
- //{(int32_t)C2DomainVideo}
+ //{(int32_t)C2Component::DOMAIN_VIDEO}
);
query.status = C2_OK;
} else {
@@ -391,20 +391,20 @@
}
TEST_F(C2SampleComponentTest, ReflectorTest) {
- C2ComponentDomainInfo domainInfo;
+ C2ComponentDomainSetting domainInfo;
std::shared_ptr<MyComponentInstance> myComp(new MyComponentInstance);
std::shared_ptr<C2ComponentInterface> comp = myComp;
std::unique_ptr<C2StructDescriptor> desc{
- myComp->getParamReflector()->describe(C2ComponentDomainInfo::CORE_INDEX)};
+ myComp->getParamReflector()->describe(C2ComponentDomainSetting::CORE_INDEX)};
dumpStruct(*desc);
std::vector<C2FieldSupportedValuesQuery> query = {
- { C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ { C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT },
- C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT),
- C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value)),
+ C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value)),
};
EXPECT_EQ(C2_OK, comp->querySupportedValues_vb(query, C2_DONT_BLOCK));
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 4878974..e698bf4 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -159,7 +159,7 @@
return xd != nullptr && xd->magic == MAGIC;
}
- static C2HandleGralloc* WrapNativeHandle(
+ static C2HandleGralloc* WrapAndMoveNativeHandle(
const native_handle_t *const handle,
uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
uint32_t stride, uint32_t generation, uint64_t igbp_id = 0, uint32_t igbp_slot = 0) {
@@ -181,6 +181,26 @@
return reinterpret_cast<C2HandleGralloc *>(res);
}
+ static C2HandleGralloc* WrapNativeHandle(
+ const native_handle_t *const handle,
+ uint32_t width, uint32_t height, uint32_t format, uint64_t usage,
+ uint32_t stride, uint32_t generation, uint64_t igbp_id = 0, uint32_t igbp_slot = 0) {
+ if (handle == nullptr) {
+ return nullptr;
+ }
+ native_handle_t *clone = native_handle_clone(handle);
+ if (clone == nullptr) {
+ return nullptr;
+ }
+ C2HandleGralloc *res = WrapAndMoveNativeHandle(
+ clone, width, height, format, usage, stride, generation, igbp_id, igbp_slot);
+ if (res == nullptr) {
+ native_handle_close(clone);
+ }
+ native_handle_delete(clone);
+ return res;
+ }
+
static native_handle_t* UnwrapNativeHandle(
const C2Handle *const handle) {
const ExtraData *xd = getExtraData(handle);
@@ -304,17 +324,23 @@
}
C2AllocationGralloc::~C2AllocationGralloc() {
- if (!mBuffer) {
- return;
- }
- if (mLocked) {
+ if (mBuffer && mLocked) {
// implementation ignores addresss and rect
uint8_t* addr[C2PlanarLayout::MAX_NUM_PLANES] = {};
unmap(addr, C2Rect(), nullptr);
}
- mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
- native_handle_delete(const_cast<native_handle_t*>(
- reinterpret_cast<const native_handle_t*>(mHandle)));
+ if (mBuffer) {
+ mMapper->freeBuffer(const_cast<native_handle_t *>(mBuffer));
+ }
+ if (mHandle) {
+ native_handle_delete(
+ const_cast<native_handle_t *>(reinterpret_cast<const native_handle_t *>(mHandle)));
+ }
+ if (mLockedHandle) {
+ native_handle_delete(
+ const_cast<native_handle_t *>(
+ reinterpret_cast<const native_handle_t *>(mLockedHandle)));
+ }
}
c2_status_t C2AllocationGralloc::map(
@@ -360,7 +386,7 @@
if (mHandle) {
mHandle->getIgbpData(&generation, &igbp_id, &igbp_slot);
}
- mLockedHandle = C2HandleGralloc::WrapNativeHandle(
+ mLockedHandle = C2HandleGralloc::WrapAndMoveNativeHandle(
mBuffer, mInfo.mapperInfo.width, mInfo.mapperInfo.height,
(uint32_t)mInfo.mapperInfo.format, mInfo.mapperInfo.usage, mInfo.stride,
generation, igbp_id, igbp_slot);
@@ -737,7 +763,7 @@
return;
}
info.stride = stride;
- buffer = std::move(buffers[0]);
+ buffer = buffers[0];
});
if (err != C2_OK) {
return err;
@@ -746,7 +772,7 @@
allocation->reset(new C2AllocationGralloc(
info, mMapper, buffer,
- C2HandleGralloc::WrapNativeHandle(
+ C2HandleGralloc::WrapAndMoveNativeHandle(
buffer.getNativeHandle(),
info.mapperInfo.width, info.mapperInfo.height,
(uint32_t)info.mapperInfo.format, info.mapperInfo.usage, info.stride,
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 736aac5..d22153d 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -140,6 +140,7 @@
protected:
class Impl;
+ class ImplV2;
Impl *mImpl;
// TODO: we could make this encapsulate shared_ptr and copiable
@@ -147,7 +148,7 @@
};
class C2AllocationIon::Impl {
-private:
+protected:
/**
* Constructs an ion allocation.
*
@@ -191,11 +192,7 @@
* \return created ion allocation (implementation) which may be invalid if the
* import failed.
*/
- static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id) {
- ion_user_handle_t buffer = -1;
- int ret = ion_import(ionFd, bufferFd, &buffer);
- return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
- }
+ static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
/**
* Constructs an ion allocation by allocating an ion buffer.
@@ -209,24 +206,7 @@
* \return created ion allocation (implementation) which may be invalid if the
* allocation failed.
*/
- static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
- int bufferFd = -1;
- ion_user_handle_t buffer = -1;
- size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
- int ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
- ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
- "returned (%d) ; buffer = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, buffer);
- if (ret == 0) {
- // get buffer fd for native handle constructor
- ret = ion_share(ionFd, buffer, &bufferFd);
- if (ret != 0) {
- ion_free(ionFd, buffer);
- buffer = -1;
- }
- }
- return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
- }
+ static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
(void)fence; // TODO: wait for fence
@@ -256,32 +236,7 @@
size_t mapSize = size + alignmentBytes;
Mapping map = { nullptr, alignmentBytes, mapSize };
- c2_status_t err = C2_OK;
- if (mMapFd == -1) {
- int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
- flags, mapOffset, (unsigned char**)&map.addr, &mMapFd);
- ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
- "offset = %zu) returned (%d)",
- mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
- if (ret) {
- mMapFd = -1;
- map.addr = *addr = nullptr;
- err = c2_map_errno<EINVAL>(-ret);
- } else {
- *addr = (uint8_t *)map.addr + alignmentBytes;
- }
- } else {
- map.addr = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
- ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
- "returned (%d)",
- mapSize, prot, flags, mMapFd, mapOffset, errno);
- if (map.addr == MAP_FAILED) {
- map.addr = *addr = nullptr;
- err = c2_map_errno<EINVAL>(errno);
- } else {
- *addr = (uint8_t *)map.addr + alignmentBytes;
- }
- }
+ c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
if (map.addr) {
mMappings.push_back(map);
}
@@ -289,7 +244,7 @@
}
c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
- if (mMapFd < 0 || mMappings.empty()) {
+ if (mMappings.empty()) {
ALOGD("tried to unmap unmapped buffer");
return C2_NOT_FOUND;
}
@@ -307,14 +262,14 @@
*fence = C2Fence(); // not using fences
}
(void)mMappings.erase(it);
- ALOGV("successfully unmapped: %d", mBuffer);
+ ALOGV("successfully unmapped: %d", mHandle.bufferFd());
return C2_OK;
}
ALOGD("unmap failed to find specified map");
return C2_BAD_VALUE;
}
- ~Impl() {
+ virtual ~Impl() {
if (!mMappings.empty()) {
ALOGD("Dangling mappings!");
for (const Mapping &map : mMappings) {
@@ -326,7 +281,9 @@
mMapFd = -1;
}
if (mInit == C2_OK) {
- (void)ion_free(mIonFd, mBuffer);
+ if (mBuffer >= 0) {
+ (void)ion_free(mIonFd, mBuffer);
+ }
native_handle_close(&mHandle);
}
if (mIonFd >= 0) {
@@ -346,11 +303,42 @@
return mId;
}
- ion_user_handle_t ionHandle() const {
+ virtual ion_user_handle_t ionHandle() const {
return mBuffer;
}
-private:
+protected:
+ virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
+ int prot, int flags, void** base, void** addr) {
+ c2_status_t err = C2_OK;
+ if (mMapFd == -1) {
+ int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
+ flags, mapOffset, (unsigned char**)base, &mMapFd);
+ ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
+ "offset = %zu) returned (%d)",
+ mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
+ if (ret) {
+ mMapFd = -1;
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(-ret);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ } else {
+ *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
+ ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
+ "returned (%d)",
+ mapSize, prot, flags, mMapFd, mapOffset, errno);
+ if (*base == MAP_FAILED) {
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(errno);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ }
+ return err;
+ }
+
int mIonFd;
C2HandleIon mHandle;
ion_user_handle_t mBuffer;
@@ -365,6 +353,93 @@
std::list<Mapping> mMappings;
};
+class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
+public:
+ /**
+ * Constructs an ion allocation for platforms with new (ion_4.12.h) api
+ *
+ * \note We always create an ion allocation, even if the allocation or import fails
+ * so that we can capture the error.
+ *
+ * \param ionFd ion client (ownership transferred to created object)
+ * \param capacity size of allocation
+ * \param bufferFd buffer handle (ownership transferred to created object). Must be
+ * invalid if err is not 0.
+ * \param err errno during buffer allocation or import
+ */
+ ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
+ : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
+ }
+
+ virtual ~ImplV2() = default;
+
+ virtual ion_user_handle_t ionHandle() const {
+ return mHandle.bufferFd();
+ }
+
+protected:
+ virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
+ int prot, int flags, void** base, void** addr) {
+ c2_status_t err = C2_OK;
+ *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
+ ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
+ "returned (%d)",
+ mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
+ if (*base == MAP_FAILED) {
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(errno);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ return err;
+ }
+
+};
+
+C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
+ C2Allocator::id_t id) {
+ int ret = 0;
+ if (ion_is_legacy(ionFd)) {
+ ion_user_handle_t buffer = -1;
+ ret = ion_import(ionFd, bufferFd, &buffer);
+ return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
+ } else {
+ return new ImplV2(ionFd, capacity, bufferFd, id, ret);
+ }
+}
+
+C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
+ unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
+ int bufferFd = -1;
+ ion_user_handle_t buffer = -1;
+ size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
+ int ret;
+
+ if (ion_is_legacy(ionFd)) {
+ ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
+ ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
+ "returned (%d) ; buffer = %d",
+ ionFd, alignedSize, align, heapMask, flags, ret, buffer);
+ if (ret == 0) {
+ // get buffer fd for native handle constructor
+ ret = ion_share(ionFd, buffer, &bufferFd);
+ if (ret != 0) {
+ ion_free(ionFd, buffer);
+ buffer = -1;
+ }
+ }
+ return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
+
+ } else {
+ ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
+ ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
+ "returned (%d) ; bufferFd = %d",
+ ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
+
+ return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
+ }
+}
+
c2_status_t C2AllocationIon::map(
size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
return mImpl->map(offset, size, usage, fence, addr);
diff --git a/media/codec2/vndk/C2Config.cpp b/media/codec2/vndk/C2Config.cpp
index 782bec5..34680a7 100644
--- a/media/codec2/vndk/C2Config.cpp
+++ b/media/codec2/vndk/C2Config.cpp
@@ -186,6 +186,9 @@
{ "avc-5", C2Config::LEVEL_AVC_5 },
{ "avc-5.1", C2Config::LEVEL_AVC_5_1 },
{ "avc-5.2", C2Config::LEVEL_AVC_5_2 },
+ { "avc-6", C2Config::LEVEL_AVC_6 },
+ { "avc-6.1", C2Config::LEVEL_AVC_6_1 },
+ { "avc-6.2", C2Config::LEVEL_AVC_6_2 },
{ "hevc-main-1", C2Config::LEVEL_HEVC_MAIN_1 },
{ "hevc-main-2", C2Config::LEVEL_HEVC_MAIN_2 },
{ "hevc-main-2.1", C2Config::LEVEL_HEVC_MAIN_2_1 },
@@ -221,6 +224,30 @@
{ "vp9-6", C2Config::LEVEL_VP9_6 },
{ "vp9-6.1", C2Config::LEVEL_VP9_6_1 },
{ "vp9-6.2", C2Config::LEVEL_VP9_6_2 },
+ { "av1-2", C2Config::LEVEL_AV1_2 },
+ { "av1-2.1", C2Config::LEVEL_AV1_2_1 },
+ { "av1-2.2", C2Config::LEVEL_AV1_2_2 },
+ { "av1-2.3", C2Config::LEVEL_AV1_2_3 },
+ { "av1-3", C2Config::LEVEL_AV1_3 },
+ { "av1-3.1", C2Config::LEVEL_AV1_3_1 },
+ { "av1-3.2", C2Config::LEVEL_AV1_3_2 },
+ { "av1-3.3", C2Config::LEVEL_AV1_3_3 },
+ { "av1-4", C2Config::LEVEL_AV1_4 },
+ { "av1-4.1", C2Config::LEVEL_AV1_4_1 },
+ { "av1-4.2", C2Config::LEVEL_AV1_4_2 },
+ { "av1-4.3", C2Config::LEVEL_AV1_4_3 },
+ { "av1-5", C2Config::LEVEL_AV1_5 },
+ { "av1-5.1", C2Config::LEVEL_AV1_5_1 },
+ { "av1-5.2", C2Config::LEVEL_AV1_5_2 },
+ { "av1-5.3", C2Config::LEVEL_AV1_5_3 },
+ { "av1-6", C2Config::LEVEL_AV1_6 },
+ { "av1-6.1", C2Config::LEVEL_AV1_6_1 },
+ { "av1-6.2", C2Config::LEVEL_AV1_6_2 },
+ { "av1-6.3", C2Config::LEVEL_AV1_6_3 },
+ { "av1-7", C2Config::LEVEL_AV1_7 },
+ { "av1-7.1", C2Config::LEVEL_AV1_7_1 },
+ { "av1-7.2", C2Config::LEVEL_AV1_7_2 },
+ { "av1-7.3", C2Config::LEVEL_AV1_7_3 },
}))
DEFINE_C2_ENUM_VALUE_CUSTOM_HELPER(C2BufferData::type_t, ({
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index a5dd203..e075849 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -517,7 +517,6 @@
*
* \note Only used by ComponentLoader.
*
- * \param alias[in] module alias
* \param libPath[in] library path
*
* \retval C2_OK the component module has been successfully loaded
@@ -527,7 +526,7 @@
* \retval C2_REFUSED permission denied to load the component module (unexpected)
* \retval C2_TIMED_OUT could not load the module within the time limit (unexpected)
*/
- c2_status_t init(std::string alias, std::string libPath);
+ c2_status_t init(std::string libPath);
virtual ~ComponentModule() override;
@@ -570,7 +569,7 @@
std::shared_ptr<ComponentModule> localModule = mModule.lock();
if (localModule == nullptr) {
localModule = std::make_shared<ComponentModule>();
- res = localModule->init(mAlias, mLibPath);
+ res = localModule->init(mLibPath);
if (res == C2_OK) {
mModule = localModule;
}
@@ -582,13 +581,12 @@
/**
* Creates a component loader for a specific library path (or name).
*/
- ComponentLoader(std::string alias, std::string libPath)
- : mAlias(alias), mLibPath(libPath) {}
+ ComponentLoader(std::string libPath)
+ : mLibPath(libPath) {}
private:
std::mutex mMutex; ///< mutex guarding the module
std::weak_ptr<ComponentModule> mModule; ///< weak reference to the loaded module
- std::string mAlias; ///< component alias
std::string mLibPath; ///< library path
};
@@ -624,9 +622,10 @@
};
/**
- * Retrieves the component loader for a component.
+ * Retrieves the component module for a component.
*
- * \return a non-ref-holding pointer to the component loader.
+ * \param module pointer to a shared_pointer where the component module will be stored on
+ * success.
*
* \retval C2_OK the component loader has been successfully retrieved
* \retval C2_NO_MEMORY not enough memory to locate the component loader
@@ -640,16 +639,25 @@
* component but some components could not be loaded due to lack of
* permissions)
*/
- c2_status_t findComponent(C2String name, ComponentLoader **loader);
+ c2_status_t findComponent(C2String name, std::shared_ptr<ComponentModule> *module);
- std::map<C2String, ComponentLoader> mComponents; ///< map of name -> components
- std::vector<C2String> mComponentsList; ///< list of components
+ /**
+ * Loads each component module and discover its contents.
+ */
+ void visitComponents();
+
+ std::mutex mMutex; ///< mutex guarding the component lists during construction
+ bool mVisited; ///< component modules visited
+ std::map<C2String, ComponentLoader> mComponents; ///< path -> component module
+ std::map<C2String, C2String> mComponentNameToPath; ///< name -> path
+ std::vector<std::shared_ptr<const C2Component::Traits>> mComponentList;
+
std::shared_ptr<C2ReflectorHelper> mReflector;
Interface mInterface;
};
c2_status_t C2PlatformComponentStore::ComponentModule::init(
- std::string alias, std::string libPath) {
+ std::string libPath) {
ALOGV("in %s", __func__);
ALOGV("loading dll");
mLibHandle = dlopen(libPath.c_str(), RTLD_NOW|RTLD_NODELETE);
@@ -684,14 +692,28 @@
std::shared_ptr<C2Component::Traits> traits(new (std::nothrow) C2Component::Traits);
if (traits) {
- if (alias != intf->getName()) {
- ALOGV("%s is alias to %s", alias.c_str(), intf->getName().c_str());
+ traits->name = intf->getName();
+
+ C2ComponentKindSetting kind;
+ C2ComponentDomainSetting domain;
+ res = intf->query_vb({ &kind, &domain }, {}, C2_MAY_BLOCK, nullptr);
+ bool fixDomain = res != C2_OK;
+ if (res == C2_OK) {
+ traits->kind = kind.value;
+ traits->domain = domain.value;
+ } else {
+ // TODO: remove this fall-back
+ ALOGD("failed to query interface for kind and domain: %d", res);
+
+ traits->kind =
+ (traits->name.find("encoder") != std::string::npos) ? C2Component::KIND_ENCODER :
+ (traits->name.find("decoder") != std::string::npos) ? C2Component::KIND_DECODER :
+ C2Component::KIND_OTHER;
}
- traits->name = alias;
- // TODO: get this from interface properly.
- bool encoder = (traits->name.find("encoder") != std::string::npos);
- uint32_t mediaTypeIndex = encoder ? C2PortMimeConfig::output::PARAM_TYPE
- : C2PortMimeConfig::input::PARAM_TYPE;
+
+ uint32_t mediaTypeIndex =
+ traits->kind == C2Component::KIND_ENCODER ? C2PortMediaTypeSetting::output::PARAM_TYPE
+ : C2PortMediaTypeSetting::input::PARAM_TYPE;
std::vector<std::unique_ptr<C2Param>> params;
res = intf->query_vb({}, { mediaTypeIndex }, C2_MAY_BLOCK, ¶ms);
if (res != C2_OK) {
@@ -702,29 +724,54 @@
ALOGD("failed to query interface: unexpected vector size: %zu", params.size());
return mInit;
}
- C2PortMimeConfig *mediaTypeConfig = (C2PortMimeConfig *)(params[0].get());
+ C2PortMediaTypeSetting *mediaTypeConfig = C2PortMediaTypeSetting::From(params[0].get());
if (mediaTypeConfig == nullptr) {
ALOGD("failed to query media type");
return mInit;
}
- traits->mediaType = mediaTypeConfig->m.value;
- // TODO: get this properly.
- traits->rank = 0x200;
+ traits->mediaType =
+ std::string(mediaTypeConfig->m.value,
+ strnlen(mediaTypeConfig->m.value, mediaTypeConfig->flexCount()));
- // TODO: define these values properly
- bool decoder = (traits->name.find("decoder") != std::string::npos);
- traits->kind =
- decoder ? C2Component::KIND_DECODER :
- encoder ? C2Component::KIND_ENCODER :
- C2Component::KIND_OTHER;
- if (strncmp(traits->mediaType.c_str(), "audio/", 6) == 0) {
- traits->domain = C2Component::DOMAIN_AUDIO;
- } else if (strncmp(traits->mediaType.c_str(), "video/", 6) == 0) {
- traits->domain = C2Component::DOMAIN_VIDEO;
- } else if (strncmp(traits->mediaType.c_str(), "image/", 6) == 0) {
- traits->domain = C2Component::DOMAIN_IMAGE;
- } else {
- traits->domain = C2Component::DOMAIN_OTHER;
+ if (fixDomain) {
+ if (strncmp(traits->mediaType.c_str(), "audio/", 6) == 0) {
+ traits->domain = C2Component::DOMAIN_AUDIO;
+ } else if (strncmp(traits->mediaType.c_str(), "video/", 6) == 0) {
+ traits->domain = C2Component::DOMAIN_VIDEO;
+ } else if (strncmp(traits->mediaType.c_str(), "image/", 6) == 0) {
+ traits->domain = C2Component::DOMAIN_IMAGE;
+ } else {
+ traits->domain = C2Component::DOMAIN_OTHER;
+ }
+ }
+
+ // TODO: get this properly from the store during emplace
+ switch (traits->domain) {
+ case C2Component::DOMAIN_AUDIO:
+ traits->rank = 8;
+ break;
+ default:
+ traits->rank = 512;
+ }
+
+ params.clear();
+ res = intf->query_vb({}, { C2ComponentAliasesSetting::PARAM_TYPE }, C2_MAY_BLOCK, ¶ms);
+ if (res == C2_OK && params.size() == 1u) {
+ C2ComponentAliasesSetting *aliasesSetting =
+ C2ComponentAliasesSetting::From(params[0].get());
+ if (aliasesSetting) {
+ // Split aliases on ','
+ // This looks simpler in plain C and even std::string would still make a copy.
+ char *aliases = ::strndup(aliasesSetting->m.value, aliasesSetting->flexCount());
+ ALOGD("'%s' has aliases: '%s'", intf->getName().c_str(), aliases);
+
+ for (char *tok, *ptr, *str = aliases; (tok = ::strtok_r(str, ",", &ptr));
+ str = nullptr) {
+ traits->aliases.push_back(tok);
+ ALOGD("adding alias: '%s'", tok);
+ }
+ free(aliases);
+ }
}
}
mTraits = traits;
@@ -783,81 +830,46 @@
}
C2PlatformComponentStore::C2PlatformComponentStore()
- : mReflector(std::make_shared<C2ReflectorHelper>()),
+ : mVisited(false),
+ mReflector(std::make_shared<C2ReflectorHelper>()),
mInterface(mReflector) {
- auto emplace = [this](const char *alias, const char *libPath) {
- // ComponentLoader is neither copiable nor movable, so it must be
- // constructed in-place. Now ComponentLoader takes two arguments in
- // constructor, so we need to use piecewise_construct to achieve this
- // behavior.
- mComponents.emplace(
- std::piecewise_construct,
- std::forward_as_tuple(alias),
- std::forward_as_tuple(alias, libPath));
- mComponentsList.emplace_back(alias);
+ auto emplace = [this](const char *libPath) {
+ mComponents.emplace(libPath, libPath);
};
- // TODO: move this also into a .so so it can be updated
- emplace("c2.android.avc.decoder", "libcodec2_soft_avcdec.so");
- emplace("c2.android.avc.encoder", "libcodec2_soft_avcenc.so");
- emplace("c2.android.aac.decoder", "libcodec2_soft_aacdec.so");
- emplace("c2.android.aac.encoder", "libcodec2_soft_aacenc.so");
- emplace("c2.android.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
- emplace("c2.android.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
- emplace("c2.android.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
- emplace("c2.android.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
- emplace("c2.android.hevc.decoder", "libcodec2_soft_hevcdec.so");
- emplace("c2.android.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
- emplace("c2.android.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
- emplace("c2.android.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
- emplace("c2.android.h263.decoder", "libcodec2_soft_h263dec.so");
- emplace("c2.android.h263.encoder", "libcodec2_soft_h263enc.so");
- emplace("c2.android.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
- emplace("c2.android.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
- emplace("c2.android.mp3.decoder", "libcodec2_soft_mp3dec.so");
- emplace("c2.android.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
- emplace("c2.android.opus.decoder", "libcodec2_soft_opusdec.so");
- emplace("c2.android.vp8.decoder", "libcodec2_soft_vp8dec.so");
- emplace("c2.android.vp9.decoder", "libcodec2_soft_vp9dec.so");
- emplace("c2.android.vp8.encoder", "libcodec2_soft_vp8enc.so");
- emplace("c2.android.vp9.encoder", "libcodec2_soft_vp9enc.so");
- emplace("c2.android.av1.decoder", "libcodec2_soft_av1dec.so");
- emplace("c2.android.raw.decoder", "libcodec2_soft_rawdec.so");
- emplace("c2.android.flac.decoder", "libcodec2_soft_flacdec.so");
- emplace("c2.android.flac.encoder", "libcodec2_soft_flacenc.so");
- emplace("c2.android.gsm.decoder", "libcodec2_soft_gsmdec.so");
- emplace("c2.android.xaac.decoder", "libcodec2_soft_xaacdec.so");
- // "Aliases"
- // TODO: use aliases proper from C2Component::Traits
- emplace("OMX.google.h264.decoder", "libcodec2_soft_avcdec.so");
- emplace("OMX.google.h264.encoder", "libcodec2_soft_avcenc.so");
- emplace("OMX.google.aac.decoder", "libcodec2_soft_aacdec.so");
- emplace("OMX.google.aac.encoder", "libcodec2_soft_aacenc.so");
- emplace("OMX.google.amrnb.decoder", "libcodec2_soft_amrnbdec.so");
- emplace("OMX.google.amrnb.encoder", "libcodec2_soft_amrnbenc.so");
- emplace("OMX.google.amrwb.decoder", "libcodec2_soft_amrwbdec.so");
- emplace("OMX.google.amrwb.encoder", "libcodec2_soft_amrwbenc.so");
- emplace("OMX.google.hevc.decoder", "libcodec2_soft_hevcdec.so");
- emplace("OMX.google.g711.alaw.decoder", "libcodec2_soft_g711alawdec.so");
- emplace("OMX.google.g711.mlaw.decoder", "libcodec2_soft_g711mlawdec.so");
- emplace("OMX.google.mpeg2.decoder", "libcodec2_soft_mpeg2dec.so");
- emplace("OMX.google.h263.decoder", "libcodec2_soft_h263dec.so");
- emplace("OMX.google.h263.encoder", "libcodec2_soft_h263enc.so");
- emplace("OMX.google.mpeg4.decoder", "libcodec2_soft_mpeg4dec.so");
- emplace("OMX.google.mpeg4.encoder", "libcodec2_soft_mpeg4enc.so");
- emplace("OMX.google.mp3.decoder", "libcodec2_soft_mp3dec.so");
- emplace("OMX.google.vorbis.decoder", "libcodec2_soft_vorbisdec.so");
- emplace("OMX.google.opus.decoder", "libcodec2_soft_opusdec.so");
- emplace("OMX.google.vp8.decoder", "libcodec2_soft_vp8dec.so");
- emplace("OMX.google.vp9.decoder", "libcodec2_soft_vp9dec.so");
- emplace("OMX.google.vp8.encoder", "libcodec2_soft_vp8enc.so");
- emplace("OMX.google.vp9.encoder", "libcodec2_soft_vp9enc.so");
- emplace("OMX.google.raw.decoder", "libcodec2_soft_rawdec.so");
- emplace("OMX.google.flac.decoder", "libcodec2_soft_flacdec.so");
- emplace("OMX.google.flac.encoder", "libcodec2_soft_flacenc.so");
- emplace("OMX.google.gsm.decoder", "libcodec2_soft_gsmdec.so");
- emplace("OMX.google.xaac.decoder", "libcodec2_soft_xaacdec.so");
+ // TODO: move this also into a .so so it can be updated
+ emplace("libcodec2_soft_aacdec.so");
+ emplace("libcodec2_soft_aacenc.so");
+ emplace("libcodec2_soft_amrnbdec.so");
+ emplace("libcodec2_soft_amrnbenc.so");
+ emplace("libcodec2_soft_amrwbdec.so");
+ emplace("libcodec2_soft_amrwbenc.so");
+ emplace("libcodec2_soft_av1dec.so");
+ emplace("libcodec2_soft_avcdec.so");
+ emplace("libcodec2_soft_avcenc.so");
+ emplace("libcodec2_soft_flacdec.so");
+ emplace("libcodec2_soft_flacenc.so");
+ emplace("libcodec2_soft_g711alawdec.so");
+ emplace("libcodec2_soft_g711mlawdec.so");
+ emplace("libcodec2_soft_gsmdec.so");
+ emplace("libcodec2_soft_h263dec.so");
+ emplace("libcodec2_soft_h263enc.so");
+ emplace("libcodec2_soft_hevcdec.so");
+ emplace("libcodec2_soft_hevcenc.so");
+ emplace("libcodec2_soft_mp3dec.so");
+ emplace("libcodec2_soft_mpeg2dec.so");
+ emplace("libcodec2_soft_mpeg4dec.so");
+ emplace("libcodec2_soft_mpeg4enc.so");
+ emplace("libcodec2_soft_opusdec.so");
+ emplace("libcodec2_soft_opusenc.so");
+ emplace("libcodec2_soft_rawdec.so");
+ emplace("libcodec2_soft_vorbisdec.so");
+ emplace("libcodec2_soft_vp8dec.so");
+ emplace("libcodec2_soft_vp8enc.so");
+ emplace("libcodec2_soft_vp9dec.so");
+ emplace("libcodec2_soft_vp9enc.so");
+ emplace("libcodec2_soft_xaacdec.so");
}
c2_status_t C2PlatformComponentStore::copyBuffer(
@@ -880,47 +892,56 @@
return mInterface.config(params, C2_MAY_BLOCK, failures);
}
-std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
- // This method SHALL return within 500ms.
- std::vector<std::shared_ptr<const C2Component::Traits>> list;
- for (const C2String &alias : mComponentsList) {
- ComponentLoader &loader = mComponents.at(alias);
+void C2PlatformComponentStore::visitComponents() {
+ std::lock_guard<std::mutex> lock(mMutex);
+ if (mVisited) {
+ return;
+ }
+ for (auto &pathAndLoader : mComponents) {
+ const C2String &path = pathAndLoader.first;
+ ComponentLoader &loader = pathAndLoader.second;
std::shared_ptr<ComponentModule> module;
- c2_status_t res = loader.fetchModule(&module);
- if (res == C2_OK) {
+ if (loader.fetchModule(&module) == C2_OK) {
std::shared_ptr<const C2Component::Traits> traits = module->getTraits();
if (traits) {
- list.push_back(traits);
+ mComponentList.push_back(traits);
+ mComponentNameToPath.emplace(traits->name, path);
+ for (const C2String &alias : traits->aliases) {
+ mComponentNameToPath.emplace(alias, path);
+ }
}
}
}
- return list;
+ mVisited = true;
}
-c2_status_t C2PlatformComponentStore::findComponent(C2String name, ComponentLoader **loader) {
- *loader = nullptr;
- auto pos = mComponents.find(name);
- // TODO: check aliases
- if (pos == mComponents.end()) {
- return C2_NOT_FOUND;
+std::vector<std::shared_ptr<const C2Component::Traits>> C2PlatformComponentStore::listComponents() {
+ // This method SHALL return within 500ms.
+ visitComponents();
+ return mComponentList;
+}
+
+c2_status_t C2PlatformComponentStore::findComponent(
+ C2String name, std::shared_ptr<ComponentModule> *module) {
+ (*module).reset();
+ visitComponents();
+
+ auto pos = mComponentNameToPath.find(name);
+ if (pos != mComponentNameToPath.end()) {
+ return mComponents.at(pos->second).fetchModule(module);
}
- *loader = &pos->second;
- return C2_OK;
+ return C2_NOT_FOUND;
}
c2_status_t C2PlatformComponentStore::createComponent(
C2String name, std::shared_ptr<C2Component> *const component) {
// This method SHALL return within 100ms.
component->reset();
- ComponentLoader *loader;
- c2_status_t res = findComponent(name, &loader);
+ std::shared_ptr<ComponentModule> module;
+ c2_status_t res = findComponent(name, &module);
if (res == C2_OK) {
- std::shared_ptr<ComponentModule> module;
- res = loader->fetchModule(&module);
- if (res == C2_OK) {
- // TODO: get a unique node ID
- res = module->createComponent(0, component);
- }
+ // TODO: get a unique node ID
+ res = module->createComponent(0, component);
}
return res;
}
@@ -929,15 +950,11 @@
C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
// This method SHALL return within 100ms.
interface->reset();
- ComponentLoader *loader;
- c2_status_t res = findComponent(name, &loader);
+ std::shared_ptr<ComponentModule> module;
+ c2_status_t res = findComponent(name, &module);
if (res == C2_OK) {
- std::shared_ptr<ComponentModule> module;
- res = loader->fetchModule(&module);
- if (res == C2_OK) {
- // TODO: get a unique node ID
- res = module->createInterface(0, interface);
- }
+ // TODO: get a unique node ID
+ res = module->createInterface(0, interface);
}
return res;
}
diff --git a/media/codec2/vndk/platform/C2BqBuffer.cpp b/media/codec2/vndk/platform/C2BqBuffer.cpp
index 7a26035..41a5b3f 100644
--- a/media/codec2/vndk/platform/C2BqBuffer.cpp
+++ b/media/codec2/vndk/platform/C2BqBuffer.cpp
@@ -192,7 +192,7 @@
int slot;
ALOGV("tries to dequeue buffer");
Return<void> transStatus = mProducer->dequeueBuffer(
- width, height, pixelFormat, androidUsage.asGrallocUsage(), true,
+ width, height, pixelFormat, androidUsage.asGrallocUsage(), false,
[&status, &slot, &fence](
int32_t tStatus, int32_t tSlot, hidl_handle const& tFence,
HGraphicBufferProducer::FrameEventHistoryDelta const& tTs) {
@@ -207,12 +207,16 @@
// dequeueBuffer returns flag.
if (!transStatus.isOk() || status < android::OK) {
ALOGD("cannot dequeue buffer %d", status);
- if (transStatus.isOk() && status == android::INVALID_OPERATION) {
- // Too many buffer dequeued. retrying after some time is required.
- return C2_TIMED_OUT;
- } else {
- return C2_BAD_VALUE;
+ if (transStatus.isOk()) {
+ if (status == android::INVALID_OPERATION ||
+ status == android::TIMED_OUT ||
+ status == android::WOULD_BLOCK) {
+ // Dequeue buffer is blocked temporarily. Retrying is
+ // required.
+ return C2_BLOCKING;
+ }
}
+ return C2_BAD_VALUE;
}
ALOGV("dequeued a buffer successfully");
native_handle_t* nh = nullptr;
@@ -227,7 +231,7 @@
if (status == -ETIME) {
// fence is not signalled yet.
(void)mProducer->cancelBuffer(slot, fenceHandle).isOk();
- return C2_TIMED_OUT;
+ return C2_BLOCKING;
}
if (status != android::NO_ERROR) {
ALOGD("buffer fence wait error %d", status);
@@ -269,36 +273,28 @@
}
}
if (slotBuffer) {
- native_handle_t *grallocHandle = native_handle_clone(slotBuffer->handle);
-
- if (grallocHandle) {
- ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
- C2Handle *c2Handle = android::WrapNativeCodec2GrallocHandle(
- grallocHandle,
- slotBuffer->width,
- slotBuffer->height,
- slotBuffer->format,
- slotBuffer->usage,
- slotBuffer->stride,
- slotBuffer->getGenerationNumber(),
- mProducerId, slot);
- if (c2Handle) {
- // Moved everything to c2Handle.
- native_handle_delete(grallocHandle);
- std::shared_ptr<C2GraphicAllocation> alloc;
- c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
- if (err != C2_OK) {
- return err;
- }
- std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
- std::make_shared<C2BufferQueueBlockPoolData>(
- slotBuffer->getGenerationNumber(),
- mProducerId, slot, shared_from_this());
- *block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
- return C2_OK;
+ ALOGV("buffer wraps %llu %d", (unsigned long long)mProducerId, slot);
+ C2Handle *c2Handle = android::WrapNativeCodec2GrallocHandle(
+ slotBuffer->handle,
+ slotBuffer->width,
+ slotBuffer->height,
+ slotBuffer->format,
+ slotBuffer->usage,
+ slotBuffer->stride,
+ slotBuffer->getGenerationNumber(),
+ mProducerId, slot);
+ if (c2Handle) {
+ std::shared_ptr<C2GraphicAllocation> alloc;
+ c2_status_t err = mAllocator->priorGraphicAllocation(c2Handle, &alloc);
+ if (err != C2_OK) {
+ return err;
}
- native_handle_close(grallocHandle);
- native_handle_delete(grallocHandle);
+ std::shared_ptr<C2BufferQueueBlockPoolData> poolData =
+ std::make_shared<C2BufferQueueBlockPoolData>(
+ slotBuffer->getGenerationNumber(),
+ mProducerId, slot, shared_from_this());
+ *block = _C2BlockFactory::CreateGraphicBlock(alloc, poolData);
+ return C2_OK;
}
// Block was not created. call requestBuffer# again next time.
slotBuffer.clear();
@@ -361,14 +357,14 @@
return C2_OK;
}
c2_status_t status = fetchFromIgbp_l(width, height, format, usage, block);
- if (status == C2_TIMED_OUT) {
+ if (status == C2_BLOCKING) {
lock.unlock();
::usleep(kMaxIgbpRetryDelayUs);
continue;
}
return status;
}
- return C2_TIMED_OUT;
+ return C2_BLOCKING;
}
void setRenderCallback(const OnRenderCallback &renderCallback) {
diff --git a/media/extractors/aac/AACExtractor.cpp b/media/extractors/aac/AACExtractor.cpp
index beddad0..9d183d4 100644
--- a/media/extractors/aac/AACExtractor.cpp
+++ b/media/extractors/aac/AACExtractor.cpp
@@ -150,6 +150,7 @@
mMeta = AMediaFormat_new();
MakeAACCodecSpecificData(mMeta, profile, sf_index, channel);
+ AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_AAC_PROFILE, profile + 1);
off64_t streamSize, numFrames = 0;
size_t frameSize = 0;
diff --git a/media/extractors/flac/FLACExtractor.cpp b/media/extractors/flac/FLACExtractor.cpp
index 84fbcee..8854631 100644
--- a/media/extractors/flac/FLACExtractor.cpp
+++ b/media/extractors/flac/FLACExtractor.cpp
@@ -581,6 +581,7 @@
CHECK(mGroup == NULL);
mGroup = group;
mMaxBufferSize = getMaxBlockSize() * getChannels() * getOutputSampleSize();
+ AMediaFormat_setInt32(mTrackMetadata, AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, mMaxBufferSize);
mGroup->add_buffer(mMaxBufferSize);
}
@@ -667,7 +668,7 @@
: mDataSource(dataSource),
mTrackMetadata(trackMetadata),
mOutputFloat(outputFloat),
- mParser(new FLACParser(mDataSource, outputFloat)),
+ mParser(new FLACParser(mDataSource, outputFloat, 0, mTrackMetadata)),
mInitCheck(mParser->initCheck()),
mStarted(false)
{
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 61838f6..a838ae6 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -708,6 +708,7 @@
}
static const char *extensions[] = {
+ "mp2",
"mp3",
"mpeg",
"mpg",
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index 55a0c47..a72e589 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -17,6 +17,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ItemTable"
+#include <unordered_set>
+
#include <ItemTable.h>
#include <media/MediaExtractorPluginApi.h>
#include <media/MediaExtractorPluginHelper.h>
@@ -48,7 +50,7 @@
offset(0), size(0), nextTileIndex(0) {}
bool isGrid() const {
- return type == FOURCC('g', 'r', 'i', 'd');
+ return type == FOURCC("grid");
}
status_t getNextTileItemId(uint32_t *nextTileItemId, bool reset) {
@@ -223,7 +225,7 @@
struct PitmBox : public FullBox {
PitmBox(DataSourceHelper *source) :
- FullBox(source, FOURCC('p', 'i', 't', 'm')) {}
+ FullBox(source, FOURCC("pitm")) {}
status_t parse(off64_t offset, size_t size, uint32_t *primaryItemId);
};
@@ -303,7 +305,7 @@
struct IlocBox : public FullBox {
IlocBox(DataSourceHelper *source, KeyedVector<uint32_t, ItemLoc> *itemLocs) :
- FullBox(source, FOURCC('i', 'l', 'o', 'c')),
+ FullBox(source, FOURCC("iloc")),
mItemLocs(itemLocs), mHasConstructMethod1(false) {}
status_t parse(off64_t offset, size_t size);
@@ -497,7 +499,7 @@
ALOGV("attach reference type 0x%x to item id %d)", type(), mItemId);
switch(type()) {
- case FOURCC('d', 'i', 'm', 'g'): {
+ case FOURCC("dimg"): {
ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
// ignore non-image items
@@ -525,7 +527,7 @@
}
break;
}
- case FOURCC('t', 'h', 'm', 'b'): {
+ case FOURCC("thmb"): {
ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
// ignore non-image items
@@ -554,7 +556,7 @@
}
break;
}
- case FOURCC('c', 'd', 's', 'c'): {
+ case FOURCC("cdsc"): {
ssize_t itemIndex = itemIdToExifMap.indexOfKey(mItemId);
// ignore non-exif block items
@@ -575,7 +577,7 @@
}
break;
}
- case FOURCC('a', 'u', 'x', 'l'): {
+ case FOURCC("auxl"): {
ssize_t itemIndex = itemIdToItemMap.indexOfKey(mItemId);
// ignore non-image items
@@ -628,7 +630,7 @@
struct IrefBox : public FullBox {
IrefBox(DataSourceHelper *source, Vector<sp<ItemReference> > *itemRefs) :
- FullBox(source, FOURCC('i', 'r', 'e', 'f')), mRefIdSize(0), mItemRefs(itemRefs) {}
+ FullBox(source, FOURCC("iref")), mRefIdSize(0), mItemRefs(itemRefs) {}
status_t parse(off64_t offset, size_t size);
@@ -690,7 +692,7 @@
struct IspeBox : public FullBox, public ItemProperty {
IspeBox(DataSourceHelper *source) :
- FullBox(source, FOURCC('i', 's', 'p', 'e')), mWidth(0), mHeight(0) {}
+ FullBox(source, FOURCC("ispe")), mWidth(0), mHeight(0) {}
status_t parse(off64_t offset, size_t size) override;
@@ -726,7 +728,7 @@
struct HvccBox : public Box, public ItemProperty {
HvccBox(DataSourceHelper *source) :
- Box(source, FOURCC('h', 'v', 'c', 'C')) {}
+ Box(source, FOURCC("hvcC")) {}
status_t parse(off64_t offset, size_t size) override;
@@ -759,7 +761,7 @@
struct IrotBox : public Box, public ItemProperty {
IrotBox(DataSourceHelper *source) :
- Box(source, FOURCC('i', 'r', 'o', 't')), mAngle(0) {}
+ Box(source, FOURCC("irot")), mAngle(0) {}
status_t parse(off64_t offset, size_t size) override;
@@ -788,7 +790,7 @@
struct ColrBox : public Box, public ItemProperty {
ColrBox(DataSourceHelper *source) :
- Box(source, FOURCC('c', 'o', 'l', 'r')) {}
+ Box(source, FOURCC("colr")) {}
status_t parse(off64_t offset, size_t size) override;
@@ -812,11 +814,11 @@
}
offset += 4;
size -= 4;
- if (colour_type == FOURCC('n', 'c', 'l', 'x')) {
+ if (colour_type == FOURCC("nclx")) {
return OK;
}
- if ((colour_type != FOURCC('r', 'I', 'C', 'C')) &&
- (colour_type != FOURCC('p', 'r', 'o', 'f'))) {
+ if ((colour_type != FOURCC("rICC")) &&
+ (colour_type != FOURCC("prof"))) {
return ERROR_MALFORMED;
}
@@ -836,7 +838,7 @@
struct IpmaBox : public FullBox {
IpmaBox(DataSourceHelper *source, Vector<AssociationEntry> *associations) :
- FullBox(source, FOURCC('i', 'p', 'm', 'a')), mAssociations(associations) {}
+ FullBox(source, FOURCC("ipma")), mAssociations(associations) {}
status_t parse(off64_t offset, size_t size);
private:
@@ -910,7 +912,7 @@
struct IpcoBox : public Box {
IpcoBox(DataSourceHelper *source, Vector<sp<ItemProperty> > *properties) :
- Box(source, FOURCC('i', 'p', 'c', 'o')), mItemProperties(properties) {}
+ Box(source, FOURCC("ipco")), mItemProperties(properties) {}
status_t parse(off64_t offset, size_t size);
protected:
@@ -930,22 +932,22 @@
status_t IpcoBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
sp<ItemProperty> itemProperty;
switch(type) {
- case FOURCC('h', 'v', 'c', 'C'):
+ case FOURCC("hvcC"):
{
itemProperty = new HvccBox(source());
break;
}
- case FOURCC('i', 's', 'p', 'e'):
+ case FOURCC("ispe"):
{
itemProperty = new IspeBox(source());
break;
}
- case FOURCC('i', 'r', 'o', 't'):
+ case FOURCC("irot"):
{
itemProperty = new IrotBox(source());
break;
}
- case FOURCC('c', 'o', 'l', 'r'):
+ case FOURCC("colr"):
{
itemProperty = new ColrBox(source());
break;
@@ -969,7 +971,7 @@
IprpBox(DataSourceHelper *source,
Vector<sp<ItemProperty> > *properties,
Vector<AssociationEntry> *associations) :
- Box(source, FOURCC('i', 'p', 'r', 'p')),
+ Box(source, FOURCC("iprp")),
mProperties(properties), mAssociations(associations) {}
status_t parse(off64_t offset, size_t size);
@@ -993,12 +995,12 @@
status_t IprpBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
switch(type) {
- case FOURCC('i', 'p', 'c', 'o'):
+ case FOURCC("ipco"):
{
IpcoBox ipcoBox(source(), mProperties);
return ipcoBox.parse(offset, size);
}
- case FOURCC('i', 'p', 'm', 'a'):
+ case FOURCC("ipma"):
{
IpmaBox ipmaBox(source(), mAssociations);
return ipmaBox.parse(offset, size);
@@ -1024,7 +1026,7 @@
struct InfeBox : public FullBox {
InfeBox(DataSourceHelper *source) :
- FullBox(source, FOURCC('i', 'n', 'f', 'e')) {}
+ FullBox(source, FOURCC("infe")) {}
status_t parse(off64_t offset, size_t size, ItemInfo *itemInfo);
@@ -1104,7 +1106,7 @@
}
ALOGV("item_name %s", item_name.c_str());
- if (item_type == FOURCC('m', 'i', 'm', 'e')) {
+ if (item_type == FOURCC("mime")) {
String8 content_type;
if (!parseNullTerminatedString(&offset, &size, &content_type)) {
return ERROR_MALFORMED;
@@ -1117,7 +1119,7 @@
return ERROR_MALFORMED;
}
}
- } else if (item_type == FOURCC('u', 'r', 'i', ' ')) {
+ } else if (item_type == FOURCC("uri ")) {
String8 item_uri_type;
if (!parseNullTerminatedString(&offset, &size, &item_uri_type)) {
return ERROR_MALFORMED;
@@ -1129,19 +1131,18 @@
struct IinfBox : public FullBox {
IinfBox(DataSourceHelper *source, Vector<ItemInfo> *itemInfos) :
- FullBox(source, FOURCC('i', 'i', 'n', 'f')),
- mItemInfos(itemInfos), mHasGrids(false) {}
+ FullBox(source, FOURCC("iinf")), mItemInfos(itemInfos) {}
status_t parse(off64_t offset, size_t size);
- bool hasGrids() { return mHasGrids; }
+ bool hasFourCC(uint32_t type) { return mFourCCSeen.count(type) > 0; }
protected:
status_t onChunkData(uint32_t type, off64_t offset, size_t size) override;
private:
Vector<ItemInfo> *mItemInfos;
- bool mHasGrids;
+ std::unordered_set<uint32_t> mFourCCSeen;
};
status_t IinfBox::parse(off64_t offset, size_t size) {
@@ -1179,7 +1180,7 @@
}
status_t IinfBox::onChunkData(uint32_t type, off64_t offset, size_t size) {
- if (type != FOURCC('i', 'n', 'f', 'e')) {
+ if (type != FOURCC("infe")) {
return OK;
}
@@ -1188,7 +1189,7 @@
status_t err = infeBox.parse(offset, size, &itemInfo);
if (err == OK) {
mItemInfos->push_back(itemInfo);
- mHasGrids |= (itemInfo.itemType == FOURCC('g', 'r', 'i', 'd'));
+ mFourCCSeen.insert(itemInfo.itemType);
}
// InfeBox parse returns ERROR_UNSUPPORTED if the box if an unsupported
// version. Ignore this error as it's not fatal.
@@ -1214,31 +1215,31 @@
status_t ItemTable::parse(uint32_t type, off64_t data_offset, size_t chunk_data_size) {
switch(type) {
- case FOURCC('i', 'l', 'o', 'c'):
+ case FOURCC("iloc"):
{
return parseIlocBox(data_offset, chunk_data_size);
}
- case FOURCC('i', 'i', 'n', 'f'):
+ case FOURCC("iinf"):
{
return parseIinfBox(data_offset, chunk_data_size);
}
- case FOURCC('i', 'p', 'r', 'p'):
+ case FOURCC("iprp"):
{
return parseIprpBox(data_offset, chunk_data_size);
}
- case FOURCC('p', 'i', 't', 'm'):
+ case FOURCC("pitm"):
{
return parsePitmBox(data_offset, chunk_data_size);
}
- case FOURCC('i', 'd', 'a', 't'):
+ case FOURCC("idat"):
{
return parseIdatBox(data_offset, chunk_data_size);
}
- case FOURCC('i', 'r', 'e', 'f'):
+ case FOURCC("iref"):
{
return parseIrefBox(data_offset, chunk_data_size);
}
- case FOURCC('i', 'p', 'r', 'o'):
+ case FOURCC("ipro"):
{
ALOGW("ipro box not supported!");
break;
@@ -1277,7 +1278,7 @@
return err;
}
- if (iinfBox.hasGrids()) {
+ if (iinfBox.hasFourCC(FOURCC("grid")) || iinfBox.hasFourCC(FOURCC("Exif"))) {
mRequiredBoxes.insert('iref');
}
@@ -1355,9 +1356,9 @@
// 'grid': derived image from tiles
// 'hvc1': coded image (or tile)
// 'Exif': EXIF metadata
- if (info.itemType != FOURCC('g', 'r', 'i', 'd') &&
- info.itemType != FOURCC('h', 'v', 'c', '1') &&
- info.itemType != FOURCC('E', 'x', 'i', 'f')) {
+ if (info.itemType != FOURCC("grid") &&
+ info.itemType != FOURCC("hvc1") &&
+ info.itemType != FOURCC("Exif")) {
continue;
}
@@ -1380,7 +1381,7 @@
return ERROR_MALFORMED;
}
- if (info.itemType == FOURCC('E', 'x', 'i', 'f')) {
+ if (info.itemType == FOURCC("Exif")) {
// Only add if the Exif data is non-empty. The first 4 bytes contain
// the offset to TIFF header, which the Exif parser doesn't use.
if (size > 4) {
@@ -1687,8 +1688,31 @@
}
// skip the first 4-byte of the offset to TIFF header
- *offset = mItemIdToExifMap[exifIndex].offset + 4;
- *size = mItemIdToExifMap[exifIndex].size - 4;
+ uint32_t tiffOffset;
+ if (!mDataSource->readAt(
+ mItemIdToExifMap[exifIndex].offset, &tiffOffset, 4)) {
+ return ERROR_IO;
+ }
+
+ // We need 'Exif\0\0' before the tiff header
+ tiffOffset = ntohl(tiffOffset);
+ if (tiffOffset < 6) {
+ return ERROR_MALFORMED;
+ }
+ // The first 4-byte of the item is the offset of the tiff header within the
+ // exif data. The size of the item should be > 4 for a non-empty exif (this
+ // was already checked when the item was added). Also check that the tiff
+ // header offset is valid.
+ if (mItemIdToExifMap[exifIndex].size <= 4 ||
+ tiffOffset > mItemIdToExifMap[exifIndex].size - 4) {
+ return ERROR_MALFORMED;
+ }
+
+ // Offset of 'Exif\0\0' relative to the beginning of 'Exif' item
+ // (first 4-byte is the tiff header offset)
+ uint32_t exifOffset = 4 + tiffOffset - 6;
+ *offset = mItemIdToExifMap[exifIndex].offset + exifOffset;
+ *size = mItemIdToExifMap[exifIndex].size - exifOffset;
return OK;
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
old mode 100644
new mode 100755
index cc1534a..c776c51
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -308,43 +308,46 @@
static const char *FourCC2MIME(uint32_t fourcc) {
switch (fourcc) {
- case FOURCC('m', 'p', '4', 'a'):
+ case FOURCC("mp4a"):
return MEDIA_MIMETYPE_AUDIO_AAC;
- case FOURCC('s', 'a', 'm', 'r'):
+ case FOURCC("samr"):
return MEDIA_MIMETYPE_AUDIO_AMR_NB;
- case FOURCC('s', 'a', 'w', 'b'):
+ case FOURCC("sawb"):
return MEDIA_MIMETYPE_AUDIO_AMR_WB;
- case FOURCC('e', 'c', '-', '3'):
+ case FOURCC("ec-3"):
return MEDIA_MIMETYPE_AUDIO_EAC3;
- case FOURCC('m', 'p', '4', 'v'):
+ case FOURCC("mp4v"):
return MEDIA_MIMETYPE_VIDEO_MPEG4;
- case FOURCC('s', '2', '6', '3'):
- case FOURCC('h', '2', '6', '3'):
- case FOURCC('H', '2', '6', '3'):
+ case FOURCC("s263"):
+ case FOURCC("h263"):
+ case FOURCC("H263"):
return MEDIA_MIMETYPE_VIDEO_H263;
- case FOURCC('a', 'v', 'c', '1'):
+ case FOURCC("avc1"):
return MEDIA_MIMETYPE_VIDEO_AVC;
- case FOURCC('h', 'v', 'c', '1'):
- case FOURCC('h', 'e', 'v', '1'):
+ case FOURCC("hvc1"):
+ case FOURCC("hev1"):
return MEDIA_MIMETYPE_VIDEO_HEVC;
- case FOURCC('a', 'c', '-', '4'):
+ case FOURCC("ac-4"):
return MEDIA_MIMETYPE_AUDIO_AC4;
- case FOURCC('t', 'w', 'o', 's'):
- case FOURCC('s', 'o', 'w', 't'):
+ case FOURCC("twos"):
+ case FOURCC("sowt"):
return MEDIA_MIMETYPE_AUDIO_RAW;
- case FOURCC('a', 'l', 'a', 'c'):
+ case FOURCC("alac"):
return MEDIA_MIMETYPE_AUDIO_ALAC;
- case FOURCC('a', 'v', '0', '1'):
+ case FOURCC("av01"):
return MEDIA_MIMETYPE_VIDEO_AV1;
+ case FOURCC(".mp3"):
+ case 0x6D730055: // "ms U" mp3 audio
+ return MEDIA_MIMETYPE_AUDIO_MPEG;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -594,7 +597,7 @@
}
} else {
uint32_t sampleIndex;
- uint32_t sampleTime;
+ uint64_t sampleTime;
if (track->timescale != 0 &&
track->sampleTable->findThumbnailSample(&sampleIndex) == OK
&& track->sampleTable->getMetaDataForSample(
@@ -749,21 +752,21 @@
static bool underMetaDataPath(const Vector<uint32_t> &path) {
return path.size() >= 5
- && path[0] == FOURCC('m', 'o', 'o', 'v')
- && path[1] == FOURCC('u', 'd', 't', 'a')
- && path[2] == FOURCC('m', 'e', 't', 'a')
- && path[3] == FOURCC('i', 'l', 's', 't');
+ && path[0] == FOURCC("moov")
+ && path[1] == FOURCC("udta")
+ && path[2] == FOURCC("meta")
+ && path[3] == FOURCC("ilst");
}
static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
return path.size() >= 2
- && path[0] == FOURCC('m', 'o', 'o', 'v')
- && path[1] == FOURCC('m', 'e', 't', 'a')
+ && path[0] == FOURCC("moov")
+ && path[1] == FOURCC("meta")
&& (depth == 2
|| (depth == 3
- && (path[2] == FOURCC('h', 'd', 'l', 'r')
- || path[2] == FOURCC('i', 'l', 's', 't')
- || path[2] == FOURCC('k', 'e', 'y', 's'))));
+ && (path[2] == FOURCC("hdlr")
+ || path[2] == FOURCC("ilst")
+ || path[2] == FOURCC("keys"))));
}
// Given a time in seconds since Jan 1 1904, produce a human-readable string.
@@ -867,7 +870,7 @@
ALOGE("b/23540914");
return ERROR_MALFORMED;
}
- if (chunk_type != FOURCC('m', 'd', 'a', 't') && chunk_data_size > kMaxAtomSize) {
+ if (chunk_type != FOURCC("mdat") && chunk_data_size > kMaxAtomSize) {
char errMsg[100];
sprintf(errMsg, "%s atom has size %" PRId64, chunk, chunk_data_size);
ALOGE("%s (b/28615448)", errMsg);
@@ -875,8 +878,8 @@
return ERROR_MALFORMED;
}
- if (chunk_type != FOURCC('c', 'p', 'r', 't')
- && chunk_type != FOURCC('c', 'o', 'v', 'r')
+ if (chunk_type != FOURCC("cprt")
+ && chunk_type != FOURCC("covr")
&& mPath.size() == 5 && underMetaDataPath(mPath)) {
off64_t stop_offset = *offset + chunk_size;
*offset = data_offset;
@@ -895,40 +898,40 @@
}
switch(chunk_type) {
- case FOURCC('m', 'o', 'o', 'v'):
- case FOURCC('t', 'r', 'a', 'k'):
- case FOURCC('m', 'd', 'i', 'a'):
- case FOURCC('m', 'i', 'n', 'f'):
- case FOURCC('d', 'i', 'n', 'f'):
- case FOURCC('s', 't', 'b', 'l'):
- case FOURCC('m', 'v', 'e', 'x'):
- case FOURCC('m', 'o', 'o', 'f'):
- case FOURCC('t', 'r', 'a', 'f'):
- case FOURCC('m', 'f', 'r', 'a'):
- case FOURCC('u', 'd', 't', 'a'):
- case FOURCC('i', 'l', 's', 't'):
- case FOURCC('s', 'i', 'n', 'f'):
- case FOURCC('s', 'c', 'h', 'i'):
- case FOURCC('e', 'd', 't', 's'):
- case FOURCC('w', 'a', 'v', 'e'):
+ case FOURCC("moov"):
+ case FOURCC("trak"):
+ case FOURCC("mdia"):
+ case FOURCC("minf"):
+ case FOURCC("dinf"):
+ case FOURCC("stbl"):
+ case FOURCC("mvex"):
+ case FOURCC("moof"):
+ case FOURCC("traf"):
+ case FOURCC("mfra"):
+ case FOURCC("udta"):
+ case FOURCC("ilst"):
+ case FOURCC("sinf"):
+ case FOURCC("schi"):
+ case FOURCC("edts"):
+ case FOURCC("wave"):
{
- if (chunk_type == FOURCC('m', 'o', 'o', 'v') && depth != 0) {
+ if (chunk_type == FOURCC("moov") && depth != 0) {
ALOGE("moov: depth %d", depth);
return ERROR_MALFORMED;
}
- if (chunk_type == FOURCC('m', 'o', 'o', 'v') && mInitCheck == OK) {
+ if (chunk_type == FOURCC("moov") && mInitCheck == OK) {
ALOGE("duplicate moov");
return ERROR_MALFORMED;
}
- if (chunk_type == FOURCC('m', 'o', 'o', 'f') && !mMoofFound) {
+ if (chunk_type == FOURCC("moof") && !mMoofFound) {
// store the offset of the first segment
mMoofFound = true;
mMoofOffset = *offset;
}
- if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
+ if (chunk_type == FOURCC("stbl")) {
ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
if (mDataSource->flags()
@@ -954,7 +957,7 @@
}
bool isTrack = false;
- if (chunk_type == FOURCC('t', 'r', 'a', 'k')) {
+ if (chunk_type == FOURCC("trak")) {
if (depth != 1) {
ALOGE("trak: depth %d", depth);
return ERROR_MALFORMED;
@@ -1049,7 +1052,7 @@
return OK;
}
- } else if (chunk_type == FOURCC('m', 'o', 'o', 'v')) {
+ } else if (chunk_type == FOURCC("moov")) {
mInitCheck = OK;
return UNKNOWN_ERROR; // Return a dummy error.
@@ -1057,7 +1060,7 @@
break;
}
- case FOURCC('s', 'c', 'h', 'm'):
+ case FOURCC("schm"):
{
*offset += chunk_size;
@@ -1072,23 +1075,23 @@
scheme_type = ntohl(scheme_type);
int32_t mode = kCryptoModeUnencrypted;
switch(scheme_type) {
- case FOURCC('c', 'b', 'c', '1'):
+ case FOURCC("cbc1"):
{
mode = kCryptoModeAesCbc;
break;
}
- case FOURCC('c', 'b', 'c', 's'):
+ case FOURCC("cbcs"):
{
mode = kCryptoModeAesCbc;
mLastTrack->subsample_encryption = true;
break;
}
- case FOURCC('c', 'e', 'n', 'c'):
+ case FOURCC("cenc"):
{
mode = kCryptoModeAesCtr;
break;
}
- case FOURCC('c', 'e', 'n', 's'):
+ case FOURCC("cens"):
{
mode = kCryptoModeAesCtr;
mLastTrack->subsample_encryption = true;
@@ -1102,7 +1105,7 @@
}
- case FOURCC('e', 'l', 's', 't'):
+ case FOURCC("elst"):
{
*offset += chunk_size;
@@ -1158,7 +1161,7 @@
break;
}
- case FOURCC('f', 'r', 'm', 'a'):
+ case FOURCC("frma"):
{
*offset += chunk_size;
@@ -1187,7 +1190,7 @@
// If format type is 'alac', it is necessary to get the parameters
// from a alac atom spreading behind the frma atom.
// See 'external/alac/ALACMagicCookieDescription.txt'.
- if (original_fourcc == FOURCC('a', 'l', 'a', 'c')) {
+ if (original_fourcc == FOURCC("alac")) {
// Store ALAC magic cookie (decoder needs it).
uint8_t alacInfo[12];
data_offset = *offset;
@@ -1197,7 +1200,7 @@
}
uint32_t size = U32_AT(&alacInfo[0]);
if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
- (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+ (U32_AT(&alacInfo[4]) != FOURCC("alac")) ||
(U32_AT(&alacInfo[8]) != 0)) {
return ERROR_MALFORMED;
}
@@ -1226,7 +1229,7 @@
break;
}
- case FOURCC('t', 'e', 'n', 'c'):
+ case FOURCC("tenc"):
{
*offset += chunk_size;
@@ -1339,7 +1342,7 @@
break;
}
- case FOURCC('t', 'k', 'h', 'd'):
+ case FOURCC("tkhd"):
{
*offset += chunk_size;
@@ -1351,7 +1354,7 @@
break;
}
- case FOURCC('t', 'r', 'e', 'f'):
+ case FOURCC("tref"):
{
off64_t stop_offset = *offset + chunk_size;
*offset = data_offset;
@@ -1367,7 +1370,7 @@
break;
}
- case FOURCC('t', 'h', 'm', 'b'):
+ case FOURCC("thmb"):
{
*offset += chunk_size;
@@ -1384,7 +1387,7 @@
break;
}
- case FOURCC('p', 's', 's', 'h'):
+ case FOURCC("pssh"):
{
*offset += chunk_size;
@@ -1420,7 +1423,7 @@
break;
}
- case FOURCC('m', 'd', 'h', 'd'):
+ case FOURCC("mdhd"):
{
*offset += chunk_size;
@@ -1516,7 +1519,7 @@
break;
}
- case FOURCC('s', 't', 's', 'd'):
+ case FOURCC("stsd"):
{
uint8_t buffer[8];
if (chunk_data_size < (off64_t)sizeof(buffer)) {
@@ -1568,7 +1571,7 @@
}
break;
}
- case FOURCC('m', 'e', 't', 't'):
+ case FOURCC("mett"):
{
*offset += chunk_size;
@@ -1622,17 +1625,18 @@
break;
}
- case FOURCC('m', 'p', '4', 'a'):
- case FOURCC('e', 'n', 'c', 'a'):
- case FOURCC('s', 'a', 'm', 'r'):
- case FOURCC('s', 'a', 'w', 'b'):
- case FOURCC('t', 'w', 'o', 's'):
- case FOURCC('s', 'o', 'w', 't'):
- case FOURCC('a', 'l', 'a', 'c'):
+ case FOURCC("mp4a"):
+ case FOURCC("enca"):
+ case FOURCC("samr"):
+ case FOURCC("sawb"):
+ case FOURCC("twos"):
+ case FOURCC("sowt"):
+ case FOURCC("alac"):
+ case FOURCC(".mp3"):
+ case 0x6D730055: // "ms U" mp3 audio
{
- if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
- && depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
- // Ignore mp4a embedded in QT wave atom
+ if (mIsQT && depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
+ // Ignore all atoms embedded in QT wave atom
*offset += chunk_size;
break;
}
@@ -1661,7 +1665,7 @@
off64_t stop_offset = *offset + chunk_size;
*offset = data_offset + sizeof(buffer);
- if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')) {
+ if (mIsQT) {
if (version == 1) {
if (mDataSource->readAt(*offset, buffer, 16) < 16) {
return ERROR_IO;
@@ -1694,7 +1698,7 @@
}
}
- if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
+ if (chunk_type != FOURCC("enca")) {
// if the chunk type is enca, we'll get the type from the frma box later
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, FourCC2MIME(chunk_type));
@@ -1703,7 +1707,7 @@
if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, FourCC2MIME(chunk_type))) {
AMediaFormat_setInt32(mLastTrack->meta,
AMEDIAFORMAT_KEY_BITS_PER_SAMPLE, sample_size);
- if (chunk_type == FOURCC('t', 'w', 'o', 's')) {
+ if (chunk_type == FOURCC("twos")) {
AMediaFormat_setInt32(mLastTrack->meta,
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN, 1);
}
@@ -1714,7 +1718,7 @@
AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_CHANNEL_COUNT, num_channels);
AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_SAMPLE_RATE, sample_rate);
- if (chunk_type == FOURCC('a', 'l', 'a', 'c')) {
+ if (chunk_type == FOURCC("alac")) {
// See 'external/alac/ALACMagicCookieDescription.txt for the detail'.
// Store ALAC magic cookie (decoder needs it).
@@ -1726,7 +1730,7 @@
}
uint32_t size = U32_AT(&alacInfo[0]);
if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
- (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+ (U32_AT(&alacInfo[4]) != FOURCC("alac")) ||
(U32_AT(&alacInfo[8]) != 0)) {
return ERROR_MALFORMED;
}
@@ -1764,15 +1768,15 @@
break;
}
- case FOURCC('m', 'p', '4', 'v'):
- case FOURCC('e', 'n', 'c', 'v'):
- case FOURCC('s', '2', '6', '3'):
- case FOURCC('H', '2', '6', '3'):
- case FOURCC('h', '2', '6', '3'):
- case FOURCC('a', 'v', 'c', '1'):
- case FOURCC('h', 'v', 'c', '1'):
- case FOURCC('h', 'e', 'v', '1'):
- case FOURCC('a', 'v', '0', '1'):
+ case FOURCC("mp4v"):
+ case FOURCC("encv"):
+ case FOURCC("s263"):
+ case FOURCC("H263"):
+ case FOURCC("h263"):
+ case FOURCC("avc1"):
+ case FOURCC("hvc1"):
+ case FOURCC("hev1"):
+ case FOURCC("av01"):
{
uint8_t buffer[78];
if (chunk_data_size < (ssize_t)sizeof(buffer)) {
@@ -1802,7 +1806,7 @@
if (mLastTrack == NULL)
return ERROR_MALFORMED;
- if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
+ if (chunk_type != FOURCC("encv")) {
// if the chunk type is encv, we'll get the type from the frma box later
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, FourCC2MIME(chunk_type));
@@ -1825,8 +1829,8 @@
break;
}
- case FOURCC('s', 't', 'c', 'o'):
- case FOURCC('c', 'o', '6', '4'):
+ case FOURCC("stco"):
+ case FOURCC("co64"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
return ERROR_MALFORMED;
@@ -1845,7 +1849,7 @@
break;
}
- case FOURCC('s', 't', 's', 'c'):
+ case FOURCC("stsc"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
return ERROR_MALFORMED;
@@ -1863,8 +1867,8 @@
break;
}
- case FOURCC('s', 't', 's', 'z'):
- case FOURCC('s', 't', 'z', '2'):
+ case FOURCC("stsz"):
+ case FOURCC("stz2"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL)) {
return ERROR_MALFORMED;
@@ -1983,13 +1987,20 @@
break;
}
- case FOURCC('s', 't', 't', 's'):
+ case FOURCC("stts"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
return ERROR_MALFORMED;
*offset += chunk_size;
+ if (depth >= 1 && mPath[depth - 1] != FOURCC("stbl")) {
+ char chunk[5];
+ MakeFourCCString(mPath[depth - 1], chunk);
+ ALOGW("stts's parent box (%s) is not stbl, skip it.", chunk);
+ break;
+ }
+
status_t err =
mLastTrack->sampleTable->setTimeToSampleParams(
data_offset, chunk_data_size);
@@ -2001,7 +2012,7 @@
break;
}
- case FOURCC('c', 't', 't', 's'):
+ case FOURCC("ctts"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
return ERROR_MALFORMED;
@@ -2019,7 +2030,7 @@
break;
}
- case FOURCC('s', 't', 's', 's'):
+ case FOURCC("stss"):
{
if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
return ERROR_MALFORMED;
@@ -2038,7 +2049,7 @@
}
// \xA9xyz
- case FOURCC(0xA9, 'x', 'y', 'z'):
+ case FOURCC("\251xyz"):
{
*offset += chunk_size;
@@ -2088,7 +2099,7 @@
break;
}
- case FOURCC('e', 's', 'd', 's'):
+ case FOURCC("esds"):
{
*offset += chunk_size;
@@ -2096,9 +2107,10 @@
return ERROR_MALFORMED;
}
- uint8_t buffer[256];
- if (chunk_data_size > (off64_t)sizeof(buffer)) {
- return ERROR_BUFFER_TOO_SMALL;
+ auto tmp = heapbuffer<uint8_t>(chunk_data_size);
+ uint8_t *buffer = tmp.get();
+ if (buffer == NULL) {
+ return -ENOMEM;
}
if (mDataSource->readAt(
@@ -2118,7 +2130,7 @@
AMEDIAFORMAT_KEY_ESDS, &buffer[4], chunk_data_size - 4);
if (mPath.size() >= 2
- && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'a')) {
+ && mPath[mPath.size() - 2] == FOURCC("mp4a")) {
// Information from the ESDS must be relied on for proper
// setup of sample rate and channel count for MPEG4 Audio.
// The generic header appears to only contain generic
@@ -2132,7 +2144,7 @@
}
}
if (mPath.size() >= 2
- && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'v')) {
+ && mPath[mPath.size() - 2] == FOURCC("mp4v")) {
// Check if the video is MPEG2
ESDS esds(&buffer[4], chunk_data_size - 4);
@@ -2147,7 +2159,7 @@
break;
}
- case FOURCC('b', 't', 'r', 't'):
+ case FOURCC("btrt"):
{
*offset += chunk_size;
if (mLastTrack == NULL) {
@@ -2177,7 +2189,7 @@
break;
}
- case FOURCC('a', 'v', 'c', 'C'):
+ case FOURCC("avcC"):
{
*offset += chunk_size;
@@ -2201,7 +2213,7 @@
break;
}
- case FOURCC('h', 'v', 'c', 'C'):
+ case FOURCC("hvcC"):
{
auto buffer = heapbuffer<uint8_t>(chunk_data_size);
@@ -2224,8 +2236,30 @@
*offset += chunk_size;
break;
}
+ case FOURCC("av1C"):
+ {
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
- case FOURCC('d', '2', '6', '3'):
+ if (buffer.get() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(
+ data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ AMediaFormat_setBuffer(mLastTrack->meta,
+ AMEDIAFORMAT_KEY_CSD_0, buffer.get(), chunk_data_size);
+
+ *offset += chunk_size;
+ break;
+ }
+ case FOURCC("d263"):
{
*offset += chunk_size;
/*
@@ -2260,7 +2294,7 @@
break;
}
- case FOURCC('m', 'e', 't', 'a'):
+ case FOURCC("meta"):
{
off64_t stop_offset = *offset + chunk_size;
*offset = data_offset;
@@ -2304,13 +2338,13 @@
break;
}
- case FOURCC('i', 'l', 'o', 'c'):
- case FOURCC('i', 'i', 'n', 'f'):
- case FOURCC('i', 'p', 'r', 'p'):
- case FOURCC('p', 'i', 't', 'm'):
- case FOURCC('i', 'd', 'a', 't'):
- case FOURCC('i', 'r', 'e', 'f'):
- case FOURCC('i', 'p', 'r', 'o'):
+ case FOURCC("iloc"):
+ case FOURCC("iinf"):
+ case FOURCC("iprp"):
+ case FOURCC("pitm"):
+ case FOURCC("idat"):
+ case FOURCC("iref"):
+ case FOURCC("ipro"):
{
if (mIsHeif) {
if (mItemTable == NULL) {
@@ -2326,9 +2360,9 @@
break;
}
- case FOURCC('m', 'e', 'a', 'n'):
- case FOURCC('n', 'a', 'm', 'e'):
- case FOURCC('d', 'a', 't', 'a'):
+ case FOURCC("mean"):
+ case FOURCC("name"):
+ case FOURCC("data"):
{
*offset += chunk_size;
@@ -2343,7 +2377,7 @@
break;
}
- case FOURCC('m', 'v', 'h', 'd'):
+ case FOURCC("mvhd"):
{
*offset += chunk_size;
@@ -2395,7 +2429,7 @@
break;
}
- case FOURCC('m', 'e', 'h', 'd'):
+ case FOURCC("mehd"):
{
*offset += chunk_size;
@@ -2440,7 +2474,7 @@
break;
}
- case FOURCC('m', 'd', 'a', 't'):
+ case FOURCC("mdat"):
{
mMdatFound = true;
@@ -2448,7 +2482,7 @@
break;
}
- case FOURCC('h', 'd', 'l', 'r'):
+ case FOURCC("hdlr"):
{
*offset += chunk_size;
@@ -2466,7 +2500,7 @@
// For the 3GPP file format, the handler-type within the 'hdlr' box
// shall be 'text'. We also want to support 'sbtl' handler type
// for a practical reason as various MPEG4 containers use it.
- if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) {
+ if (type == FOURCC("text") || type == FOURCC("sbtl")) {
if (mLastTrack != NULL) {
AMediaFormat_setString(mLastTrack->meta,
AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_TEXT_3GPP);
@@ -2476,7 +2510,7 @@
break;
}
- case FOURCC('k', 'e', 'y', 's'):
+ case FOURCC("keys"):
{
*offset += chunk_size;
@@ -2489,7 +2523,7 @@
break;
}
- case FOURCC('t', 'r', 'e', 'x'):
+ case FOURCC("trex"):
{
*offset += chunk_size;
@@ -2508,7 +2542,7 @@
break;
}
- case FOURCC('t', 'x', '3', 'g'):
+ case FOURCC("tx3g"):
{
if (mLastTrack == NULL)
return ERROR_MALFORMED;
@@ -2552,7 +2586,7 @@
break;
}
- case FOURCC('c', 'o', 'v', 'r'):
+ case FOURCC("covr"):
{
*offset += chunk_size;
@@ -2583,12 +2617,12 @@
break;
}
- case FOURCC('c', 'o', 'l', 'r'):
+ case FOURCC("colr"):
{
*offset += chunk_size;
// this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
// ignore otherwise
- if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+ if (depth >= 2 && mPath[depth - 2] == FOURCC("stsd")) {
status_t err = parseColorInfo(data_offset, chunk_data_size);
if (err != OK) {
return err;
@@ -2598,12 +2632,12 @@
break;
}
- case FOURCC('t', 'i', 't', 'l'):
- case FOURCC('p', 'e', 'r', 'f'):
- case FOURCC('a', 'u', 't', 'h'):
- case FOURCC('g', 'n', 'r', 'e'):
- case FOURCC('a', 'l', 'b', 'm'):
- case FOURCC('y', 'r', 'r', 'c'):
+ case FOURCC("titl"):
+ case FOURCC("perf"):
+ case FOURCC("auth"):
+ case FOURCC("gnre"):
+ case FOURCC("albm"):
+ case FOURCC("yrrc"):
{
*offset += chunk_size;
@@ -2616,7 +2650,7 @@
break;
}
- case FOURCC('I', 'D', '3', '2'):
+ case FOURCC("ID32"):
{
*offset += chunk_size;
@@ -2629,7 +2663,7 @@
break;
}
- case FOURCC('-', '-', '-', '-'):
+ case FOURCC("----"):
{
mLastCommentMean.clear();
mLastCommentName.clear();
@@ -2638,7 +2672,7 @@
break;
}
- case FOURCC('s', 'i', 'd', 'x'):
+ case FOURCC("sidx"):
{
status_t err = parseSegmentIndex(data_offset, chunk_data_size);
if (err != OK) {
@@ -2648,25 +2682,46 @@
return UNKNOWN_ERROR; // stop parsing after sidx
}
- case FOURCC('a', 'c', '-', '3'):
+ case FOURCC("ac-3"):
{
*offset += chunk_size;
- return parseAC3SpecificBox(data_offset);
+ // bypass ac-3 if parse fail
+ if (parseAC3SpecificBox(data_offset) != OK) {
+ if (mLastTrack != NULL) {
+ ALOGW("Fail to parse ac-3");
+ mLastTrack->skipTrack = true;
+ }
+ }
+ return OK;
}
- case FOURCC('e', 'c', '-', '3'):
+ case FOURCC("ec-3"):
{
*offset += chunk_size;
- return parseEAC3SpecificBox(data_offset);
+ // bypass ec-3 if parse fail
+ if (parseEAC3SpecificBox(data_offset) != OK) {
+ if (mLastTrack != NULL) {
+ ALOGW("Fail to parse ec-3");
+ mLastTrack->skipTrack = true;
+ }
+ }
+ return OK;
}
- case FOURCC('a', 'c', '-', '4'):
+ case FOURCC("ac-4"):
{
*offset += chunk_size;
- return parseAC4SpecificBox(data_offset);
+ // bypass ac-4 if parse fail
+ if (parseAC4SpecificBox(data_offset) != OK) {
+ if (mLastTrack != NULL) {
+ ALOGW("Fail to parse ac-4");
+ mLastTrack->skipTrack = true;
+ }
+ }
+ return OK;
}
- case FOURCC('f', 't', 'y', 'p'):
+ case FOURCC("ftyp"):
{
if (chunk_data_size < 8 || depth != 0) {
return ERROR_MALFORMED;
@@ -2691,16 +2746,16 @@
brandSet.insert(brand);
}
- if (brandSet.count(FOURCC('q', 't', ' ', ' ')) > 0) {
+ if (brandSet.count(FOURCC("qt ")) > 0) {
mIsQT = true;
} else {
- if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
- && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
+ if (brandSet.count(FOURCC("mif1")) > 0
+ && brandSet.count(FOURCC("heic")) > 0) {
ALOGV("identified HEIF image");
mIsHeif = true;
- brandSet.erase(FOURCC('m', 'i', 'f', '1'));
- brandSet.erase(FOURCC('h', 'e', 'i', 'c'));
+ brandSet.erase(FOURCC("mif1"));
+ brandSet.erase(FOURCC("heic"));
}
if (!brandSet.empty()) {
@@ -2787,7 +2842,7 @@
// + 4-byte size
offset += 4;
uint32_t type;
- if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '4')) {
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dac4")) {
ALOGE("MPEG4Extractor: error while reading ac-4 specific block: header not dac4");
return ERROR_MALFORMED;
}
@@ -2914,7 +2969,7 @@
offset += 4;
uint32_t type;
- if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'e', 'c', '3')) {
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dec3")) {
ALOGE("MPEG4Extractor: error while reading eac-3 specific block: header not dec3");
return ERROR_MALFORMED;
}
@@ -2966,8 +3021,10 @@
}
unsigned bsid = br.getBits(5);
- if (bsid < 8) {
- ALOGW("Incorrect bsid in EAC3 header. Possibly AC-3?");
+ if (bsid == 9 || bsid == 10) {
+ ALOGW("EAC3 stream (bsid=%d) may be silenced by the decoder", bsid);
+ } else if (bsid > 16) {
+ ALOGE("EAC3 stream (bsid=%d) is not compatible with ETSI TS 102 366 v1.4.1", bsid);
delete[] chunk;
return ERROR_MALFORMED;
}
@@ -3071,7 +3128,7 @@
offset += 4;
uint32_t type;
- if (!mDataSource->getUInt32(offset, &type) || type != FOURCC('d', 'a', 'c', '3')) {
+ if (!mDataSource->getUInt32(offset, &type) || type != FOURCC("dac3")) {
ALOGE("MPEG4Extractor: error while reading ac-3 specific block: header not dac3");
return ERROR_MALFORMED;
}
@@ -3273,7 +3330,7 @@
uint32_t type;
if (!mDataSource->getUInt32(keyOffset + 4, &type)
- || type != FOURCC('m', 'd', 't', 'a')) {
+ || type != FOURCC("mdta")) {
return ERROR_MALFORMED;
}
@@ -3315,7 +3372,7 @@
}
uint32_t atomFourCC;
if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
- || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
+ || atomFourCC != FOURCC("data")) {
return ERROR_MALFORMED;
}
uint32_t dataType;
@@ -3476,48 +3533,48 @@
MakeFourCCString(mPath[4], chunk);
ALOGV("meta: %s @ %lld", chunk, (long long)offset);
switch ((int32_t)mPath[4]) {
- case FOURCC(0xa9, 'a', 'l', 'b'):
+ case FOURCC("\251alb"):
{
metadataKey = "album";
break;
}
- case FOURCC(0xa9, 'A', 'R', 'T'):
+ case FOURCC("\251ART"):
{
metadataKey = "artist";
break;
}
- case FOURCC('a', 'A', 'R', 'T'):
+ case FOURCC("aART"):
{
metadataKey = "albumartist";
break;
}
- case FOURCC(0xa9, 'd', 'a', 'y'):
+ case FOURCC("\251day"):
{
metadataKey = "year";
break;
}
- case FOURCC(0xa9, 'n', 'a', 'm'):
+ case FOURCC("\251nam"):
{
metadataKey = "title";
break;
}
- case FOURCC(0xa9, 'w', 'r', 't'):
+ case FOURCC("\251wrt"):
{
metadataKey = "writer";
break;
}
- case FOURCC('c', 'o', 'v', 'r'):
+ case FOURCC("covr"):
{
metadataKey = "albumart";
break;
}
- case FOURCC('g', 'n', 'r', 'e'):
- case FOURCC(0xa9, 'g', 'e', 'n'):
+ case FOURCC("gnre"):
+ case FOURCC("\251gen"):
{
metadataKey = "genre";
break;
}
- case FOURCC('c', 'p', 'i', 'l'):
+ case FOURCC("cpil"):
{
if (size == 9 && flags == 21) {
char tmp[16];
@@ -3528,7 +3585,7 @@
}
break;
}
- case FOURCC('t', 'r', 'k', 'n'):
+ case FOURCC("trkn"):
{
if (size == 16 && flags == 0) {
char tmp[16];
@@ -3540,7 +3597,7 @@
}
break;
}
- case FOURCC('d', 'i', 's', 'k'):
+ case FOURCC("disk"):
{
if ((size == 14 || size == 16) && flags == 0) {
char tmp[16];
@@ -3552,17 +3609,17 @@
}
break;
}
- case FOURCC('-', '-', '-', '-'):
+ case FOURCC("----"):
{
buffer[size] = '\0';
switch (mPath[5]) {
- case FOURCC('m', 'e', 'a', 'n'):
+ case FOURCC("mean"):
mLastCommentMean.setTo((const char *)buffer + 4);
break;
- case FOURCC('n', 'a', 'm', 'e'):
+ case FOURCC("name"):
mLastCommentName.setTo((const char *)buffer + 4);
break;
- case FOURCC('d', 'a', 't', 'a'):
+ case FOURCC("data"):
if (size < 8) {
delete[] buffer;
buffer = NULL;
@@ -3670,8 +3727,8 @@
}
int32_t type = U32_AT(&buffer[0]);
- if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
- || (type == FOURCC('n', 'c', 'l', 'c') && size >= 10)) {
+ if ((type == FOURCC("nclx") && size >= 11)
+ || (type == FOURCC("nclc") && size >= 10)) {
// only store the first color specification
int32_t existingColor;
if (!AMediaFormat_getInt32(mLastTrack->meta,
@@ -3679,7 +3736,7 @@
int32_t primaries = U16_AT(&buffer[4]);
int32_t isotransfer = U16_AT(&buffer[6]);
int32_t coeffs = U16_AT(&buffer[8]);
- bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+ bool fullRange = (type == FOURCC("nclx")) && (buffer[10] & 128);
int32_t range = 0;
int32_t standard = 0;
@@ -3725,27 +3782,27 @@
const char *metadataKey = nullptr;
switch (mPath[depth]) {
- case FOURCC('t', 'i', 't', 'l'):
+ case FOURCC("titl"):
{
metadataKey = "title";
break;
}
- case FOURCC('p', 'e', 'r', 'f'):
+ case FOURCC("perf"):
{
metadataKey = "artist";
break;
}
- case FOURCC('a', 'u', 't', 'h'):
+ case FOURCC("auth"):
{
metadataKey = "writer";
break;
}
- case FOURCC('g', 'n', 'r', 'e'):
+ case FOURCC("gnre"):
{
metadataKey = "genre";
break;
}
- case FOURCC('a', 'l', 'b', 'm'):
+ case FOURCC("albm"):
{
if (buffer[size - 1] != '\0') {
char tmp[4];
@@ -3757,7 +3814,7 @@
metadataKey = "album";
break;
}
- case FOURCC('y', 'r', 'r', 'c'):
+ case FOURCC("yrrc"):
{
if (size < 6) {
delete[] buffer;
@@ -3960,6 +4017,18 @@
if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
itemTable = mItemTable;
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+ void *data;
+ size_t size;
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+ return NULL;
+ }
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ if (size < 5 || ptr[0] != 0x81) { // configurationVersion == 1
+ return NULL;
+ }
}
if (track->has_elst and !strncasecmp("video/", mime, 6) and track->elst_media_time > 0) {
@@ -3993,6 +4062,10 @@
if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_HEVC, &data, &size)) {
return ERROR_MALFORMED;
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
+ return ERROR_MALFORMED;
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
|| !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
@@ -4085,12 +4158,10 @@
return OK;
}
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
+ if (objectTypeIndication == 0x6B || objectTypeIndication == 0x69) {
+ // mp3 audio
+ AMediaFormat_setString(mLastTrack->meta,AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
+ return OK;
}
if (mLastTrack != NULL) {
@@ -4503,7 +4574,7 @@
}
if (!strncasecmp("video/", mime, 6)) {
- uint32_t firstSampleCTS = 0;
+ uint64_t firstSampleCTS = 0;
err = mSampleTable->getMetaDataForSample(0, NULL, NULL, &firstSampleCTS);
// Start offset should be less or equal to composition time of first sample.
// Composition time stamp of first sample cannot be negative.
@@ -4610,8 +4681,8 @@
switch(chunk_type) {
- case FOURCC('t', 'r', 'a', 'f'):
- case FOURCC('m', 'o', 'o', 'f'): {
+ case FOURCC("traf"):
+ case FOURCC("moof"): {
off64_t stop_offset = *offset + chunk_size;
*offset = data_offset;
while (*offset < stop_offset) {
@@ -4620,7 +4691,7 @@
return err;
}
}
- if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ if (chunk_type == FOURCC("moof")) {
// *offset points to the box following this moof. Find the next moof from there.
while (true) {
@@ -4649,7 +4720,7 @@
return ERROR_MALFORMED;
}
- if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
+ if (chunk_type == FOURCC("moof")) {
mNextMoofOffset = *offset;
break;
} else if (chunk_size == 0) {
@@ -4661,7 +4732,7 @@
break;
}
- case FOURCC('t', 'f', 'h', 'd'): {
+ case FOURCC("tfhd"): {
status_t err;
if ((err = parseTrackFragmentHeader(data_offset, chunk_data_size)) != OK) {
return err;
@@ -4670,7 +4741,7 @@
break;
}
- case FOURCC('t', 'r', 'u', 'n'): {
+ case FOURCC("trun"): {
status_t err;
if (mLastParsedTrackId == mTrackId) {
if ((err = parseTrackFragmentRun(data_offset, chunk_data_size)) != OK) {
@@ -4682,7 +4753,7 @@
break;
}
- case FOURCC('s', 'a', 'i', 'z'): {
+ case FOURCC("saiz"): {
status_t err;
if ((err = parseSampleAuxiliaryInformationSizes(data_offset, chunk_data_size)) != OK) {
return err;
@@ -4690,7 +4761,7 @@
*offset += chunk_size;
break;
}
- case FOURCC('s', 'a', 'i', 'o'): {
+ case FOURCC("saio"): {
status_t err;
if ((err = parseSampleAuxiliaryInformationOffsets(data_offset, chunk_data_size))
!= OK) {
@@ -4700,7 +4771,7 @@
break;
}
- case FOURCC('s', 'e', 'n', 'c'): {
+ case FOURCC("senc"): {
status_t err;
if ((err = parseSampleEncryption(data_offset)) != OK) {
return err;
@@ -4709,7 +4780,7 @@
break;
}
- case FOURCC('m', 'd', 'a', 't'): {
+ case FOURCC("mdat"): {
// parse DRM info if present
ALOGV("MPEG4Source::parseChunk mdat");
// if saiz/saoi was previously observed, do something with the sampleinfos
@@ -4868,7 +4939,9 @@
off64_t offset, bool isSubsampleEncryption, uint32_t flags) {
int32_t ivlength;
- CHECK(AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength));
+ if (!AMediaFormat_getInt32(mFormat, AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE, &ivlength)) {
+ return ERROR_MALFORMED;
+ }
// only 0, 8 and 16 byte initialization vectors are supported
if (ivlength != 0 && ivlength != 8 && ivlength != 16) {
@@ -5349,7 +5422,7 @@
sampleIndex, &syncSampleIndex, findFlags);
}
- uint32_t sampleTime;
+ uint64_t sampleTime;
if (err == OK) {
err = mSampleTable->getMetaDataForSample(
sampleIndex, NULL, NULL, &sampleTime);
@@ -5399,7 +5472,7 @@
off64_t offset = 0;
size_t size = 0;
- uint32_t cts, stts;
+ uint64_t cts, stts;
bool isSyncSample;
bool newBuffer = false;
if (mBuffer == NULL) {
@@ -5609,10 +5682,10 @@
}
if (isMalFormed) {
- ALOGE("Video is malformed");
- mBuffer->release();
- mBuffer = NULL;
- return AMEDIA_ERROR_MALFORMED;
+ //if nallength abnormal,ignore it.
+ ALOGW("abnormal nallength, ignore this NAL");
+ srcOffset = size;
+ break;
}
if (nalLength == 0) {
@@ -6031,28 +6104,29 @@
static bool isCompatibleBrand(uint32_t fourcc) {
static const uint32_t kCompatibleBrands[] = {
- FOURCC('i', 's', 'o', 'm'),
- FOURCC('i', 's', 'o', '2'),
- FOURCC('a', 'v', 'c', '1'),
- FOURCC('h', 'v', 'c', '1'),
- FOURCC('h', 'e', 'v', '1'),
- FOURCC('a', 'v', '0', '1'),
- FOURCC('3', 'g', 'p', '4'),
- FOURCC('m', 'p', '4', '1'),
- FOURCC('m', 'p', '4', '2'),
- FOURCC('d', 'a', 's', 'h'),
+ FOURCC("isom"),
+ FOURCC("iso2"),
+ FOURCC("avc1"),
+ FOURCC("hvc1"),
+ FOURCC("hev1"),
+ FOURCC("av01"),
+ FOURCC("3gp4"),
+ FOURCC("mp41"),
+ FOURCC("mp42"),
+ FOURCC("dash"),
// Won't promise that the following file types can be played.
// Just give these file types a chance.
- FOURCC('q', 't', ' ', ' '), // Apple's QuickTime
- FOURCC('M', 'S', 'N', 'V'), // Sony's PSP
+ FOURCC("qt "), // Apple's QuickTime
+ FOURCC("MSNV"), // Sony's PSP
+ FOURCC("wmf "),
- FOURCC('3', 'g', '2', 'a'), // 3GPP2
- FOURCC('3', 'g', '2', 'b'),
- FOURCC('m', 'i', 'f', '1'), // HEIF image
- FOURCC('h', 'e', 'i', 'c'), // HEIF image
- FOURCC('m', 's', 'f', '1'), // HEIF image sequence
- FOURCC('h', 'e', 'v', 'c'), // HEIF image sequence
+ FOURCC("3g2a"), // 3GPP2
+ FOURCC("3g2b"),
+ FOURCC("mif1"), // HEIF image
+ FOURCC("heic"), // HEIF image
+ FOURCC("msf1"), // HEIF image sequence
+ FOURCC("hevc"), // HEIF image sequence
};
for (size_t i = 0;
@@ -6120,7 +6194,7 @@
ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld",
chunkstring, chunkSize, (long long)offset);
switch (chunkType) {
- case FOURCC('f', 't', 'y', 'p'):
+ case FOURCC("ftyp"):
{
if (chunkDataSize < 8) {
return false;
@@ -6155,7 +6229,7 @@
break;
}
- case FOURCC('m', 'o', 'o', 'v'):
+ case FOURCC("moov"):
{
moovAtomEndOffset = offset + chunkSize;
@@ -6201,6 +6275,7 @@
static const char *extensions[] = {
"3g2",
+ "3ga",
"3gp",
"3gpp",
"3gpp2",
@@ -6209,6 +6284,7 @@
"m4v",
"mov",
"mp4",
+ "qt",
NULL
};
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 1a6d306..2890b26 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -111,8 +111,15 @@
if ((err = getSampleSizeDirect(
firstChunkSampleIndex + i, &sampleSize)) != OK) {
ALOGE("getSampleSizeDirect return error");
- mCurrentChunkSampleSizes.clear();
- return err;
+ // stsc sample count is not sync with stsz sample count
+ if (err == ERROR_OUT_OF_RANGE) {
+ ALOGW("stsc samples(%d) not sync with stsz samples(%d)", mSamplesPerChunk, i);
+ mSamplesPerChunk = i;
+ break;
+ } else{
+ mCurrentChunkSampleSizes.clear();
+ return err;
+ }
}
mCurrentChunkSampleSizes.push(sampleSize);
@@ -301,7 +308,7 @@
}
status_t SampleIterator::findSampleTimeAndDuration(
- uint32_t sampleIndex, uint32_t *time, uint32_t *duration) {
+ uint32_t sampleIndex, uint64_t *time, uint64_t *duration) {
if (sampleIndex >= mTable->mNumSampleSizes) {
return ERROR_OUT_OF_RANGE;
}
@@ -314,8 +321,8 @@
break;
}
if (mTimeToSampleIndex == mTable->mTimeToSampleCount ||
- (mTTSDuration != 0 && mTTSCount > UINT32_MAX / mTTSDuration) ||
- mTTSSampleTime > UINT32_MAX - (mTTSCount * mTTSDuration)) {
+ (mTTSDuration != 0 && mTTSCount > UINT64_MAX / mTTSDuration) ||
+ mTTSSampleTime > UINT64_MAX - (mTTSCount * mTTSDuration)) {
return ERROR_OUT_OF_RANGE;
}
@@ -330,7 +337,7 @@
// below is equivalent to:
// *time = mTTSSampleTime + mTTSDuration * (sampleIndex - mTTSSampleIndex);
- uint32_t tmp;
+ uint64_t tmp;
if (__builtin_sub_overflow(sampleIndex, mTTSSampleIndex, &tmp) ||
__builtin_mul_overflow(mTTSDuration, tmp, &tmp) ||
__builtin_add_overflow(mTTSSampleTime, tmp, &tmp)) {
@@ -340,15 +347,15 @@
int32_t offset = mTable->getCompositionTimeOffset(sampleIndex);
if ((offset < 0 && *time < (offset == INT32_MIN ?
- INT32_MAX : uint32_t(-offset))) ||
- (offset > 0 && *time > UINT32_MAX - offset)) {
- ALOGE("%u + %d would overflow", *time, offset);
+ INT64_MAX : uint64_t(-offset))) ||
+ (offset > 0 && *time > UINT64_MAX - offset)) {
+ ALOGE("%llu + %d would overflow", (unsigned long long) *time, offset);
return ERROR_OUT_OF_RANGE;
}
if (offset > 0) {
*time += offset;
} else {
- *time -= (offset == INT32_MIN ? INT32_MAX : (-offset));
+ *time -= (offset == INT64_MIN ? INT64_MAX : (-offset));
}
*duration = mTTSDuration;
diff --git a/media/extractors/mp4/SampleIterator.h b/media/extractors/mp4/SampleIterator.h
index 6e4f60e..5a0ea76 100644
--- a/media/extractors/mp4/SampleIterator.h
+++ b/media/extractors/mp4/SampleIterator.h
@@ -33,8 +33,8 @@
uint32_t getDescIndex() const { return mChunkDesc; }
off64_t getSampleOffset() const { return mCurrentSampleOffset; }
size_t getSampleSize() const { return mCurrentSampleSize; }
- uint32_t getSampleTime() const { return mCurrentSampleTime; }
- uint32_t getSampleDuration() const { return mCurrentSampleDuration; }
+ uint64_t getSampleTime() const { return mCurrentSampleTime; }
+ uint64_t getSampleDuration() const { return mCurrentSampleDuration; }
uint32_t getLastSampleIndexInChunk() const {
return mCurrentSampleIndex + mSamplesPerChunk -
@@ -63,20 +63,20 @@
uint32_t mTimeToSampleIndex;
uint32_t mTTSSampleIndex;
- uint32_t mTTSSampleTime;
+ uint64_t mTTSSampleTime;
uint32_t mTTSCount;
- uint32_t mTTSDuration;
+ uint64_t mTTSDuration;
uint32_t mCurrentSampleIndex;
off64_t mCurrentSampleOffset;
size_t mCurrentSampleSize;
- uint32_t mCurrentSampleTime;
- uint32_t mCurrentSampleDuration;
+ uint64_t mCurrentSampleTime;
+ uint64_t mCurrentSampleDuration;
void reset();
status_t findChunkRange(uint32_t sampleIndex);
status_t getChunkOffset(uint32_t chunk, off64_t *offset);
- status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint32_t *time, uint32_t *duration);
+ status_t findSampleTimeAndDuration(uint32_t sampleIndex, uint64_t *time, uint64_t *duration);
SampleIterator(const SampleIterator &);
SampleIterator &operator=(const SampleIterator &);
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index d242798..bf29bf1 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -37,13 +37,13 @@
namespace android {
// static
-const uint32_t SampleTable::kChunkOffsetType32 = FOURCC('s', 't', 'c', 'o');
+const uint32_t SampleTable::kChunkOffsetType32 = FOURCC("stco");
// static
-const uint32_t SampleTable::kChunkOffsetType64 = FOURCC('c', 'o', '6', '4');
+const uint32_t SampleTable::kChunkOffsetType64 = FOURCC("co64");
// static
-const uint32_t SampleTable::kSampleSizeType32 = FOURCC('s', 't', 's', 'z');
+const uint32_t SampleTable::kSampleSizeType32 = FOURCC("stsz");
// static
-const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC('s', 't', 'z', '2');
+const uint32_t SampleTable::kSampleSizeTypeCompact = FOURCC("stz2");
////////////////////////////////////////////////////////////////////////////////
@@ -614,7 +614,7 @@
return OK;
}
-uint32_t abs_difference(uint32_t time1, uint32_t time2) {
+uint32_t abs_difference(uint64_t time1, uint64_t time2) {
return time1 > time2 ? time1 - time2 : time2 - time1;
}
@@ -662,7 +662,7 @@
}
uint32_t sampleIndex = 0;
- uint32_t sampleTime = 0;
+ uint64_t sampleTime = 0;
for (uint32_t i = 0; i < mTimeToSampleCount; ++i) {
uint32_t n = mTimeToSample[2 * i];
@@ -684,13 +684,13 @@
(compTimeDelta == INT32_MIN ?
INT32_MAX : uint32_t(-compTimeDelta)))
|| (compTimeDelta > 0 &&
- sampleTime > UINT32_MAX - compTimeDelta)) {
- ALOGE("%u + %d would overflow, clamping",
- sampleTime, compTimeDelta);
+ sampleTime > UINT64_MAX - compTimeDelta)) {
+ ALOGE("%llu + %d would overflow, clamping",
+ (unsigned long long) sampleTime, compTimeDelta);
if (compTimeDelta < 0) {
sampleTime = 0;
} else {
- sampleTime = UINT32_MAX;
+ sampleTime = UINT64_MAX;
}
compTimeDelta = 0;
}
@@ -701,10 +701,10 @@
}
++sampleIndex;
- if (sampleTime > UINT32_MAX - delta) {
- ALOGE("%u + %u would overflow, clamping",
- sampleTime, delta);
- sampleTime = UINT32_MAX;
+ if (sampleTime > UINT64_MAX - delta) {
+ ALOGE("%llu + %u would overflow, clamping",
+ (unsigned long long) sampleTime, delta);
+ sampleTime = UINT64_MAX;
} else {
sampleTime += delta;
}
@@ -870,19 +870,19 @@
if (err != OK) {
return err;
}
- uint32_t sample_time = mSampleIterator->getSampleTime();
+ uint64_t sample_time = mSampleIterator->getSampleTime();
err = mSampleIterator->seekTo(mSyncSamples[left]);
if (err != OK) {
return err;
}
- uint32_t upper_time = mSampleIterator->getSampleTime();
+ uint64_t upper_time = mSampleIterator->getSampleTime();
err = mSampleIterator->seekTo(mSyncSamples[left - 1]);
if (err != OK) {
return err;
}
- uint32_t lower_time = mSampleIterator->getSampleTime();
+ uint64_t lower_time = mSampleIterator->getSampleTime();
// use abs_difference for safety
if (abs_difference(upper_time, sample_time) >
@@ -955,9 +955,9 @@
uint32_t sampleIndex,
off64_t *offset,
size_t *size,
- uint32_t *compositionTime,
+ uint64_t *compositionTime,
bool *isSyncSample,
- uint32_t *sampleDuration) {
+ uint64_t *sampleDuration) {
Mutex::Autolock autoLock(mLock);
status_t err;
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
index d4b5dc8..57f6e62 100644
--- a/media/extractors/mp4/SampleTable.h
+++ b/media/extractors/mp4/SampleTable.h
@@ -66,9 +66,9 @@
uint32_t sampleIndex,
off64_t *offset,
size_t *size,
- uint32_t *compositionTime,
+ uint64_t *compositionTime,
bool *isSyncSample = NULL,
- uint32_t *sampleDuration = NULL);
+ uint64_t *sampleDuration = NULL);
// call only after getMetaDataForSample has been called successfully.
uint32_t getLastSampleIndexInChunk();
@@ -124,7 +124,7 @@
struct SampleTimeEntry {
uint32_t mSampleIndex;
- uint32_t mCompositionTime;
+ uint64_t mCompositionTime;
};
SampleTimeEntry *mSampleTimeEntries;
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 2a94671..0f0c72c 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -24,6 +24,8 @@
header_libs: [
"libbase_headers",
+ "libstagefright_headers",
+ "libmedia_headers",
],
static_libs: [
@@ -31,7 +33,7 @@
"libstagefright_foundation_without_imemory",
"libstagefright_mpeg2support",
"libutils",
- "libstagefright",
+ "libstagefright_mpeg2extractor",
"libstagefright_esds",
],
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index e1509ee..49dd0b4 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -302,16 +302,21 @@
return AMEDIA_ERROR_UNKNOWN;
}
-void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
- bool found = false;
+status_t MPEG2TSExtractor::findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index) {
for (size_t i = 0; i < mSourceImpls.size(); i++) {
if (mSourceImpls[i] == impl) {
- found = true;
- break;
+ *index = i;
+ return OK;
}
}
- if (!found) {
+ return NAME_NOT_FOUND;
+}
+
+void MPEG2TSExtractor::addSource(const sp<AnotherPacketSource> &impl) {
+ size_t index;
+ if (findIndexOfSource(impl, &index) != OK) {
mSourceImpls.push(impl);
+ mSyncPoints.push();
}
}
@@ -319,6 +324,7 @@
bool haveAudio = false;
bool haveVideo = false;
int64_t startTime = ALooper::GetNowUs();
+ size_t index;
status_t err;
while ((err = feedMore(true /* isInit */)) == OK
@@ -337,8 +343,9 @@
haveVideo = true;
addSource(impl);
if (!isScrambledFormat(*(format.get()))) {
- mSyncPoints.push();
- mSeekSyncPoints = &mSyncPoints.editTop();
+ if (findIndexOfSource(impl, &index) == OK) {
+ mSeekSyncPoints = &mSyncPoints.editItemAt(index);
+ }
}
}
}
@@ -352,10 +359,9 @@
if (format != NULL) {
haveAudio = true;
addSource(impl);
- if (!isScrambledFormat(*(format.get()))) {
- mSyncPoints.push();
- if (!haveVideo) {
- mSeekSyncPoints = &mSyncPoints.editTop();
+ if (!isScrambledFormat(*(format.get())) && !haveVideo) {
+ if (findIndexOfSource(impl, &index) == OK) {
+ mSeekSyncPoints = &mSyncPoints.editItemAt(index);
}
}
}
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.h b/media/extractors/mpeg2/MPEG2TSExtractor.h
index e425d23..2537d3b 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.h
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.h
@@ -95,6 +95,7 @@
status_t seekBeyond(int64_t seekTimeUs);
status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+ status_t findIndexOfSource(const sp<AnotherPacketSource> &impl, size_t *index);
// Add a SynPoint derived from |event|.
void addSyncPoint_l(const ATSParser::SyncEvent &event);
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 596c1c8..d99493d 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -364,7 +364,13 @@
return OK;
}
- ++*pageOffset;
+ // see how far ahead to skip; avoid some fruitless comparisons
+ unsigned int i;
+ for (i = 1; i < 4 ; i++) {
+ if (signature[i] == 'O')
+ break;
+ }
+ *pageOffset += i;
}
}
@@ -983,7 +989,7 @@
size_t denom = numerator - kMaxNumTOCEntries;
size_t accum = 0;
- for (ssize_t i = mTableOfContents.size() - 1; i >= 0; --i) {
+ for (ssize_t i = mTableOfContents.size(); i > 0; --i) {
accum += denom;
if (accum >= numerator) {
mTableOfContents.removeAt(i);
@@ -1382,6 +1388,7 @@
static const char *extensions[] = {
"oga",
"ogg",
+ "opus",
NULL
};
diff --git a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
index 9711b86..8eb70b1 100644
--- a/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
+++ b/media/libaaudio/examples/loopback/src/LoopbackAnalyzer.h
@@ -310,7 +310,7 @@
}
// Write SHORT data from the first channel.
- int write(int16_t *inputData, int inputChannelCount, int numFrames) {
+ int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
// stop at end of buffer
if ((mFrameCounter + numFrames) > mMaxFrames) {
numFrames = mMaxFrames - mFrameCounter;
@@ -322,7 +322,7 @@
}
// Write FLOAT data from the first channel.
- int write(float *inputData, int inputChannelCount, int numFrames) {
+ int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
// stop at end of buffer
if ((mFrameCounter + numFrames) > mMaxFrames) {
numFrames = mMaxFrames - mFrameCounter;
@@ -333,7 +333,7 @@
return numFrames;
}
- int size() {
+ int32_t size() {
return mFrameCounter;
}
@@ -443,9 +443,14 @@
virtual ~LoopbackProcessor() = default;
+ enum process_result {
+ PROCESS_RESULT_OK,
+ PROCESS_RESULT_GLITCH
+ };
+
virtual void reset() {}
- virtual void process(float *inputData, int inputChannelCount,
+ virtual process_result process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) = 0;
@@ -639,7 +644,7 @@
return getSampleRate() / 8;
}
- void process(float *inputData, int inputChannelCount,
+ process_result process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) override {
int channelsValid = std::min(inputChannelCount, outputChannelCount);
@@ -750,6 +755,7 @@
mState = nextState;
mLoopCounter++;
+ return PROCESS_RESULT_OK;
}
int save(const char *fileName) override {
@@ -896,9 +902,10 @@
* @param inputData contains microphone data with sine signal feedback
* @param outputData contains the reference sine wave
*/
- void process(float *inputData, int inputChannelCount,
+ process_result process(float *inputData, int inputChannelCount,
float *outputData, int outputChannelCount,
int numFrames) override {
+ process_result result = PROCESS_RESULT_OK;
mProcessCount++;
float peak = measurePeakAmplitude(inputData, inputChannelCount, numFrames);
@@ -978,6 +985,7 @@
mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
if (absDiff > mTolerance) {
mGlitchCount++;
+ result = PROCESS_RESULT_GLITCH;
//printf("%5d: Got a glitch # %d, predicted = %f, actual = %f\n",
// mFrameCounter, mGlitchCount, predicted, sample);
mState = STATE_IMMUNE;
@@ -1018,6 +1026,7 @@
mFrameCounter++;
}
+ return result;
}
void resetAccumulator() {
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 2a02b20..6578156 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -34,9 +34,13 @@
#include "AAudioSimpleRecorder.h"
#include "AAudioExampleUtils.h"
#include "LoopbackAnalyzer.h"
+#include "../../utils/AAudioExampleUtils.h"
-// V0.4.00 = rectify and low-pass filter the echos, use auto-correlation on entire echo
-#define APP_VERSION "0.4.00"
+// V0.4.00 = rectify and low-pass filter the echos, auto-correlate entire echo
+// V0.4.01 = add -h hang option
+// fix -n option to set output buffer for -tm
+// plot first glitch
+#define APP_VERSION "0.4.01"
// Tag for machine readable results as property = value pairs
#define RESULT_TAG "RESULT: "
@@ -47,10 +51,14 @@
constexpr int kLogPeriodMillis = 1000;
constexpr int kNumInputChannels = 1;
constexpr int kNumCallbacksToDrain = 20;
+constexpr int kNumCallbacksToNotRead = 0; // let input fill back up
constexpr int kNumCallbacksToDiscard = 20;
+constexpr int kDefaultHangTimeMillis = 50;
+constexpr int kMaxGlitchEventsToSave = 32;
struct LoopbackData {
AAudioStream *inputStream = nullptr;
+ AAudioStream *outputStream = nullptr;
int32_t inputFramesMaximum = 0;
int16_t *inputShortData = nullptr;
float *inputFloatData = nullptr;
@@ -58,6 +66,7 @@
int32_t actualInputChannelCount = 0;
int32_t actualOutputChannelCount = 0;
int32_t numCallbacksToDrain = kNumCallbacksToDrain;
+ int32_t numCallbacksToNotRead = kNumCallbacksToNotRead;
int32_t numCallbacksToDiscard = kNumCallbacksToDiscard;
int32_t minNumFrames = INT32_MAX;
int32_t maxNumFrames = 0;
@@ -65,6 +74,9 @@
int32_t insufficientReadFrames = 0;
int32_t framesReadTotal = 0;
int32_t framesWrittenTotal = 0;
+ int32_t hangPeriodMillis = 5 * 1000; // time between hangs
+ int32_t hangCountdownFrames = 5 * 48000; // frames til next hang
+ int32_t hangTimeMillis = 0; // 0 for no hang
bool isDone = false;
aaudio_result_t inputError = AAUDIO_OK;
@@ -74,6 +86,29 @@
EchoAnalyzer echoAnalyzer;
AudioRecording audioRecording;
LoopbackProcessor *loopbackProcessor;
+
+ int32_t glitchFrames[kMaxGlitchEventsToSave];
+ int32_t numGlitchEvents = 0;
+
+ void hangIfRequested(int32_t numFrames) {
+ if (hangTimeMillis > 0) {
+ hangCountdownFrames -= numFrames;
+ if (hangCountdownFrames <= 0) {
+ const int64_t startNanos = getNanoseconds();
+ usleep(hangTimeMillis * 1000);
+ const int64_t endNanos = getNanoseconds();
+ const int32_t elapsedMicros = (int32_t)
+ ((endNanos - startNanos) / 1000);
+ printf("callback hanging for %d millis, actual = %d micros\n",
+ hangTimeMillis, elapsedMicros);
+ hangCountdownFrames = (int64_t) hangPeriodMillis
+ * AAudioStream_getSampleRate(outputStream)
+ / 1000;
+ }
+ }
+
+
+ }
};
static void convertPcm16ToFloat(const int16_t *source,
@@ -105,9 +140,14 @@
assert(false);
}
if (framesRead < 0) {
- myData->inputError = framesRead;
- printf("ERROR in read = %d = %s\n", framesRead,
- AAudio_convertResultToText(framesRead));
+ // Expect INVALID_STATE if STATE_STARTING
+ if (myData->framesReadTotal > 0) {
+ myData->inputError = framesRead;
+ printf("ERROR in read = %d = %s\n", framesRead,
+ AAudio_convertResultToText(framesRead));
+ } else {
+ framesRead = 0;
+ }
} else {
myData->framesReadTotal += framesRead;
}
@@ -149,8 +189,10 @@
int32_t totalFramesRead = 0;
do {
actualFramesRead = readFormattedData(myData, numFrames);
- if (actualFramesRead) {
+ if (actualFramesRead > 0) {
totalFramesRead += actualFramesRead;
+ } else if (actualFramesRead < 0) {
+ result = AAUDIO_CALLBACK_RESULT_STOP;
}
// Ignore errors because input stream may not be started yet.
} while (actualFramesRead > 0);
@@ -159,6 +201,9 @@
myData->numCallbacksToDrain--;
}
+ } else if (myData->numCallbacksToNotRead > 0) {
+ // Let the input fill up a bit so we are not so close to the write pointer.
+ myData->numCallbacksToNotRead--;
} else if (myData->numCallbacksToDiscard > 0) {
// Ignore. Allow the input to fill back up to equilibrium with the output.
actualFramesRead = readFormattedData(myData, numFrames);
@@ -168,6 +213,7 @@
myData->numCallbacksToDiscard--;
} else {
+ myData->hangIfRequested(numFrames);
int32_t numInputBytes = numFrames * myData->actualInputChannelCount * sizeof(float);
memset(myData->inputFloatData, 0 /* value */, numInputBytes);
@@ -184,7 +230,7 @@
if (actualFramesRead < numFrames) {
if(actualFramesRead < (int32_t) framesAvailable) {
- printf("insufficient but numFrames = %d"
+ printf("insufficient for no reason, numFrames = %d"
", actualFramesRead = %d"
", inputFramesWritten = %d"
", inputFramesRead = %d"
@@ -205,16 +251,25 @@
if (myData->actualInputFormat == AAUDIO_FORMAT_PCM_I16) {
convertPcm16ToFloat(myData->inputShortData, myData->inputFloatData, numSamples);
}
- // Save for later.
- myData->audioRecording.write(myData->inputFloatData,
- myData->actualInputChannelCount,
- numFrames);
+
// Analyze the data.
- myData->loopbackProcessor->process(myData->inputFloatData,
+ LoopbackProcessor::process_result procResult = myData->loopbackProcessor->process(myData->inputFloatData,
myData->actualInputChannelCount,
outputData,
myData->actualOutputChannelCount,
numFrames);
+
+ if (procResult == LoopbackProcessor::PROCESS_RESULT_GLITCH) {
+ if (myData->numGlitchEvents < kMaxGlitchEventsToSave) {
+ myData->glitchFrames[myData->numGlitchEvents++] = myData->audioRecording.size();
+ }
+ }
+
+ // Save for later.
+ myData->audioRecording.write(myData->inputFloatData,
+ myData->actualInputChannelCount,
+ actualFramesRead);
+
myData->isDone = myData->loopbackProcessor->isDone();
if (myData->isDone) {
result = AAUDIO_CALLBACK_RESULT_STOP;
@@ -242,6 +297,7 @@
printf(" -C{channels} number of input channels\n");
printf(" -F{0,1,2} input format, 1=I16, 2=FLOAT\n");
printf(" -g{gain} recirculating loopback gain\n");
+ printf(" -h{hangMillis} occasionally hang in the callback\n");
printf(" -P{inPerf} set input AAUDIO_PERFORMANCE_MODE*\n");
printf(" n for _NONE\n");
printf(" l for _LATENCY\n");
@@ -300,9 +356,7 @@
return testMode;
}
-void printAudioGraph(AudioRecording &recording, int numSamples) {
- int32_t start = recording.size() / 2;
- int32_t end = start + numSamples;
+void printAudioGraphRegion(AudioRecording &recording, int32_t start, int32_t end) {
if (end >= recording.size()) {
end = recording.size() - 1;
}
@@ -345,7 +399,7 @@
int32_t requestedInputCapacity = AAUDIO_UNSPECIFIED;
aaudio_performance_mode_t inputPerformanceLevel = AAUDIO_PERFORMANCE_MODE_LOW_LATENCY;
- int32_t outputFramesPerBurst = 0;
+ int32_t outputFramesPerBurst = 0;
aaudio_format_t actualOutputFormat = AAUDIO_FORMAT_INVALID;
int32_t actualSampleRate = 0;
@@ -353,6 +407,7 @@
int testMode = TEST_ECHO_LATENCY;
double gain = 1.0;
+ int hangTimeMillis = 0;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
@@ -382,6 +437,15 @@
case 'g':
gain = atof(&arg[2]);
break;
+ case 'h':
+ // Was there a number after the "-h"?
+ if (arg[2]) {
+ hangTimeMillis = atoi(&arg[2]);
+ } else {
+ // If no number then use the default.
+ hangTimeMillis = kDefaultHangTimeMillis;
+ }
+ break;
case 'P':
inputPerformanceLevel = parsePerformanceMode(arg[2]);
break;
@@ -415,6 +479,8 @@
int32_t timeMillis = 0;
int32_t recordingDuration = std::min(60 * 5, requestedDuration);
+ int32_t requestedOutputBursts = argParser.getNumberOfBursts();
+
switch(testMode) {
case TEST_SINE_MAGNITUDE:
loopbackData.loopbackProcessor = &loopbackData.sineAnalyzer;
@@ -446,7 +512,7 @@
fprintf(stderr, "ERROR - player.open() returned %d\n", result);
exit(1);
}
- outputStream = player.getStream();
+ outputStream = loopbackData.outputStream = player.getStream();
actualOutputFormat = AAudioStream_getFormat(outputStream);
if (actualOutputFormat != AAUDIO_FORMAT_PCM_FLOAT) {
@@ -482,24 +548,29 @@
{
int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
- result = AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
- if (result < 0) {
- fprintf(stderr, "ERROR - AAudioStream_setBufferSizeInFrames() returned %d\n", result);
- goto finish;
- } else {}
- }
+ (void) AAudioStream_setBufferSizeInFrames(inputStream, actualCapacity);
- argParser.compareWithStream(inputStream);
+ if (testMode == TEST_SINE_MAGNITUDE
+ && requestedOutputBursts == AAUDIO_UNSPECIFIED) {
+ result = AAudioStream_setBufferSizeInFrames(outputStream, actualCapacity);
+ if (result < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_setBufferSizeInFrames(output) returned %d\n",
+ result);
+ goto finish;
+ } else {
+ printf("Output buffer size set to match input capacity = %d frames!\n", result);
+ }
+ }
- // If the input stream is too small then we cannot satisfy the output callback.
- {
- int32_t actualCapacity = AAudioStream_getBufferCapacityInFrames(inputStream);
+ // If the input stream is too small then we cannot satisfy the output callback.
if (actualCapacity < 2 * outputFramesPerBurst) {
fprintf(stderr, "ERROR - input capacity < 2 * outputFramesPerBurst\n");
goto finish;
}
}
+ argParser.compareWithStream(inputStream);
+
// ------- Setup loopbackData -----------------------------
loopbackData.actualInputFormat = AAudioStream_getFormat(inputStream);
@@ -518,6 +589,8 @@
loopbackData.loopbackProcessor->reset();
+ loopbackData.hangTimeMillis = hangTimeMillis;
+
// Start OUTPUT first so INPUT does not overflow.
result = player.start();
if (result != AAUDIO_OK) {
@@ -604,7 +677,17 @@
if (loopbackData.inputError == AAUDIO_OK) {
if (testMode == TEST_SINE_MAGNITUDE) {
- printAudioGraph(loopbackData.audioRecording, 200);
+ if (loopbackData.numGlitchEvents > 0) {
+ // Graph around the first glitch if there is one.
+ const int32_t start = loopbackData.glitchFrames[0] - 8;
+ const int32_t end = start + outputFramesPerBurst + 8 + 8;
+ printAudioGraphRegion(loopbackData.audioRecording, start, end);
+ } else {
+ // Or graph the middle of the signal.
+ const int32_t start = loopbackData.audioRecording.size() / 2;
+ const int32_t end = start + 200;
+ printAudioGraphRegion(loopbackData.audioRecording, start, end);
+ }
}
loopbackData.loopbackProcessor->report();
@@ -654,6 +737,11 @@
delete[] loopbackData.inputShortData;
report_result:
+
+ for (int i = 0; i < loopbackData.numGlitchEvents; i++) {
+ printf(" glitch at frame %d\n", loopbackData.glitchFrames[i]);
+ }
+
written = loopbackData.loopbackProcessor->save(FILENAME_PROCESSED);
if (written > 0) {
printf("main() wrote %8d processed samples to \"%s\" on Android device\n",
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index a5dc55f..f5ed7aa 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -130,12 +130,10 @@
}
int32_t getBufferCapacity() const {
- printf("%s() returns %d\n", __func__, mBufferCapacity);
return mBufferCapacity;
}
void setBufferCapacity(int32_t frames) {
- printf("%s(%d)\n", __func__, frames);
mBufferCapacity = frames;
}
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 1645986..4373fa9 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -32,8 +32,6 @@
// Arbitrary period for glitches
#define FORCED_UNDERRUN_PERIOD_FRAMES (2 * 48000)
-// How long to sleep in a callback to cause an intentional glitch. For testing.
-#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
#define MAX_TIMESTAMPS 16
@@ -275,7 +273,7 @@
int scheduler = 0;
bool schedulerChecked = false;
- bool forceUnderruns = false;
+ int32_t hangTimeMSec = 0;
AAudioSimplePlayer simplePlayer;
int32_t callbackCount = 0;
@@ -327,10 +325,12 @@
sineData->setupSineSweeps();
}
- if (sineData->forceUnderruns) {
+ if (sineData->hangTimeMSec > 0) {
if (sineData->framesTotal > sineData->nextFrameToGlitch) {
- usleep(FORCED_UNDERRUN_SLEEP_MICROS);
- printf("Simulate glitch at %lld\n", (long long) sineData->framesTotal);
+ usleep(sineData->hangTimeMSec * 1000);
+ printf("Hang callback at %lld frames for %d msec\n",
+ (long long) sineData->framesTotal,
+ sineData->hangTimeMSec);
sineData->nextFrameToGlitch += FORCED_UNDERRUN_PERIOD_FRAMES;
}
}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 7a48153..2b05f10 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -26,11 +26,14 @@
#include <string.h>
#include <time.h>
#include <aaudio/AAudio.h>
+
#include "AAudioExampleUtils.h"
#include "AAudioSimplePlayer.h"
#include "AAudioArgsParser.h"
-#define APP_VERSION "0.1.5"
+#define APP_VERSION "0.1.6"
+
+constexpr int32_t kDefaultHangTimeMSec = 10;
/**
* Open stream, play some sine waves, then close the stream.
@@ -41,7 +44,7 @@
static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
int32_t loopCount,
int32_t prefixToneMsec,
- bool forceUnderruns)
+ int32_t hangTimeMSec)
{
SineThreadedData_t myData;
AAudioSimplePlayer &player = myData.simplePlayer;
@@ -53,10 +56,12 @@
printf("----------------------- run complete test --------------------------\n");
myData.schedulerChecked = false;
myData.callbackCount = 0;
- myData.forceUnderruns = forceUnderruns; // test AAudioStream_getXRunCount()
+ myData.hangTimeMSec = hangTimeMSec; // test AAudioStream_getXRunCount()
result = player.open(argParser,
- SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
+ SimplePlayerDataCallbackProc,
+ SimplePlayerErrorCallbackProc,
+ &myData);
if (result != AAUDIO_OK) {
fprintf(stderr, "ERROR - player.open() returned %s\n",
AAudio_convertResultToText(result));
@@ -115,12 +120,17 @@
int64_t millis =
(getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
result = myData.waker.get();
+ const int32_t framesWritten = (int32_t) AAudioStream_getFramesWritten(player.getStream());
+ const int32_t framesRead = (int32_t) AAudioStream_getFramesRead(player.getStream());
+ const int32_t xruns = AAudioStream_getXRunCount(player.getStream());
printf(" waker result = %d, at %6d millis"
- ", second = %3d, framesWritten = %8d, underruns = %d\n",
+ ", second = %3d, frames written %8d - read %8d = %8d, underruns = %d\n",
result, (int) millis,
second,
- (int) AAudioStream_getFramesWritten(player.getStream()),
- (int) AAudioStream_getXRunCount(player.getStream()));
+ framesWritten,
+ framesRead,
+ framesWritten - framesRead,
+ xruns);
if (result != AAUDIO_OK) {
disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
bailOut = true;
@@ -210,7 +220,9 @@
AAudioArgsParser::usage();
printf(" -l{count} loopCount start/stop, every other one is silent\n");
printf(" -t{msec} play a high pitched tone at the beginning\n");
- printf(" -z force periodic underruns by sleeping in callback\n");
+ printf(" -h{msec} force periodic underruns by hanging in callback\n");
+ printf(" If no value specified then %d used.\n",
+ kDefaultHangTimeMSec);
}
int main(int argc, const char **argv)
@@ -219,13 +231,14 @@
aaudio_result_t result;
int32_t loopCount = 1;
int32_t prefixToneMsec = 0;
- bool forceUnderruns = false;
+ int32_t hangTimeMSec = 0;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V%s\n", argv[0], APP_VERSION);
+ printf("%s - Play a sine sweep using an AAudio callback V%s\n",
+ argv[0], APP_VERSION);
for (int i = 1; i < argc; i++) {
const char *arg = argv[i];
@@ -240,8 +253,10 @@
case 't':
prefixToneMsec = atoi(&arg[2]);
break;
- case 'z':
- forceUnderruns = true; // Zzzzzzz
+ case 'h':
+ hangTimeMSec = (arg[2]) // value specified?
+ ? atoi(&arg[2])
+ : kDefaultHangTimeMSec;
break;
default:
usage();
@@ -257,7 +272,8 @@
}
// Keep looping until we can complete the test without disconnecting.
- while((result = testOpenPlayClose(argParser, loopCount, prefixToneMsec, forceUnderruns))
+ while((result = testOpenPlayClose(argParser, loopCount,
+ prefixToneMsec, hangTimeMSec))
== AAUDIO_ERROR_DISCONNECTED);
return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 3b03601..ec270f3 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -713,3 +713,7 @@
aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
}
+
+bool AudioStreamInternal::isClockModelInControl() const {
+ return isActive() && mAudioEndpoint.isFreeRunning() && mClockModel.isRunning();
+}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 1c88f52..86c4698 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -144,6 +144,14 @@
*/
bool isInService() const { return mInService; }
+ /**
+ * Is the service FIFO position currently controlled by the AAudio service or HAL,
+ * or set based on the Clock Model.
+ *
+ * @return true if the ClockModel is currently determining the FIFO position
+ */
+ bool isClockModelInControl() const;
+
IsochronousClockModel mClockModel; // timing model for chasing the HAL
AudioEndpoint mAudioEndpoint; // source for reads or sink for writes
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 7dcb620..a6cc45b 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -210,17 +210,12 @@
}
int64_t AudioStreamInternalCapture::getFramesWritten() {
- int64_t framesWrittenHardware;
- if (isActive()) {
- framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
- } else {
- framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
- }
- // Prevent retrograde motion.
+ const int64_t framesWrittenHardware = isClockModelInControl()
+ ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ : mAudioEndpoint.getDataWriteCounter();
+ // Add service offset and prevent retrograde motion.
mLastFramesWritten = std::max(mLastFramesWritten,
framesWrittenHardware + mFramesOffsetFromService);
- //ALOGD("getFramesWritten() returns %lld",
- // (long long)mLastFramesWritten);
return mLastFramesWritten;
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index 6af8e7d..e1443d9 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -242,27 +242,17 @@
return framesWritten;
}
-int64_t AudioStreamInternalPlay::getFramesRead()
-{
- int64_t framesReadHardware;
- if (isActive()) {
- framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
- } else {
- framesReadHardware = mAudioEndpoint.getDataReadCounter();
- }
- int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
- // Prevent retrograde motion.
- if (framesRead < mLastFramesRead) {
- framesRead = mLastFramesRead;
- } else {
- mLastFramesRead = framesRead;
- }
- return framesRead;
+int64_t AudioStreamInternalPlay::getFramesRead() {
+ const int64_t framesReadHardware = isClockModelInControl()
+ ? mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
+ : mAudioEndpoint.getDataReadCounter();
+ // Add service offset and prevent retrograde motion.
+ mLastFramesRead = std::max(mLastFramesRead, framesReadHardware + mFramesOffsetFromService);
+ return mLastFramesRead;
}
-int64_t AudioStreamInternalPlay::getFramesWritten()
-{
- int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
+int64_t AudioStreamInternalPlay::getFramesWritten() {
+ const int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
+ mFramesOffsetFromService;
return framesWritten;
}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 95b52be..747d0e1 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -60,10 +60,14 @@
mState = STATE_STOPPED;
}
-bool IsochronousClockModel::isStarting() {
+bool IsochronousClockModel::isStarting() const {
return mState == STATE_STARTING;
}
+bool IsochronousClockModel::isRunning() const {
+ return mState == STATE_RUNNING;
+}
+
void IsochronousClockModel::processTimestamp(int64_t framePosition, int64_t nanoTime) {
// ALOGD("processTimestamp() - framePosition = %lld at nanoTime %llu",
// (long long)framePosition,
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 7182376..46ca48e 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -36,7 +36,15 @@
void start(int64_t nanoTime);
void stop(int64_t nanoTime);
- bool isStarting();
+ /**
+ * @return true if the model is starting up
+ */
+ bool isStarting() const;
+
+ /**
+ * @return true if the model is running and producing valid results
+ */
+ bool isRunning() const;
void processTimestamp(int64_t framePosition, int64_t nanoTime);
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index f550089..4a65fc9 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -326,16 +326,13 @@
if (mAudioRecord.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
- // Get current position so we can detect when the track is recording.
- status_t err = mAudioRecord->getPosition(&mPositionWhenStarting);
- if (err != OK) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // Enable callback before starting AudioTrack to avoid shutting
+ // Enable callback before starting AudioRecord to avoid shutting
// down because of a race condition.
mCallbackEnabled.store(true);
- err = mAudioRecord->start();
+ mFramesWritten.reset32(); // service writes frames
+ mTimestampPosition.reset32();
+ status_t err = mAudioRecord->start(); // resets position to zero
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
} else {
@@ -349,12 +346,10 @@
return AAUDIO_ERROR_INVALID_STATE;
}
setState(AAUDIO_STREAM_STATE_STOPPING);
- incrementFramesWritten(getFramesRead() - getFramesWritten()); // TODO review
- mTimestampPosition.set(getFramesRead());
+ mFramesWritten.catchUpTo(getFramesRead());
+ mTimestampPosition.catchUpTo(getFramesRead());
mAudioRecord->stop();
mCallbackEnabled.store(false);
- mFramesWritten.reset32(); // service writes frames, service position reset on flush
- mTimestampPosition.reset32();
// Pass false to prevent errorCallback from being called after disconnect
// when app has already requested a stop().
return checkForDisconnectRequest(false);
@@ -368,10 +363,12 @@
switch (getState()) {
// TODO add better state visibility to AudioRecord
case AAUDIO_STREAM_STATE_STARTING:
+ // When starting, the position will begin at zero and then go positive.
+ // The position can wrap but by that time the state will not be STARTING.
err = mAudioRecord->getPosition(&position);
if (err != OK) {
result = AAudioConvert_androidToAAudioResult(err);
- } else if (position != mPositionWhenStarting) {
+ } else if (position > 0) {
setState(AAUDIO_STREAM_STATE_STARTED);
}
break;
@@ -504,12 +501,12 @@
switch (getState()) {
case AAUDIO_STREAM_STATE_STARTING:
case AAUDIO_STREAM_STATE_STARTED:
- case AAUDIO_STREAM_STATE_STOPPING:
result = mAudioRecord->getPosition(&position);
if (result == OK) {
mFramesWritten.update32(position);
}
break;
+ case AAUDIO_STREAM_STATE_STOPPING:
default:
break;
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index c995e99..ff95aed 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -323,8 +323,8 @@
}
setState(AAUDIO_STREAM_STATE_STOPPING);
- incrementFramesRead(getFramesWritten() - getFramesRead()); // TODO review
- mTimestampPosition.set(getFramesWritten());
+ mFramesRead.catchUpTo(getFramesWritten());
+ mTimestampPosition.catchUpTo(getFramesWritten());
mFramesRead.reset32(); // service reads frames, service position reset on stop
mTimestampPosition.reset32();
mAudioTrack->stop();
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 5833eab..63add4e 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -41,10 +41,12 @@
}
/**
- * set the current value of the counter
+ * advance the current value to match the counter
*/
- void set(int64_t counter) {
- mCounter64 = counter;
+ void catchUpTo(int64_t counter) {
+ if ((counter - mCounter64) > 0) {
+ mCounter64 = counter;
+ }
}
/**
diff --git a/media/libaaudio/tests/test_clock_model.cpp b/media/libaaudio/tests/test_clock_model.cpp
index 3c09025..7f7abbd 100644
--- a/media/libaaudio/tests/test_clock_model.cpp
+++ b/media/libaaudio/tests/test_clock_model.cpp
@@ -43,13 +43,47 @@
}
void TearDown() {
-
}
~ClockModelTestFixture() {
// cleanup any pending stuff, but no exceptions allowed
}
+ // Test processing of timestamps when the hardware may be slightly off from
+ // the expected sample rate.
+ void checkDriftingClock(double hardwareFramesPerSecond, int numLoops) {
+ const int64_t startTimeNanos = 500000000; // arbitrary
+ model.start(startTimeNanos);
+
+ const int64_t startPositionFrames = HW_FRAMES_PER_BURST; // hardware
+ // arbitrary time for first burst
+ const int64_t markerTime = startTimeNanos + NANOS_PER_MILLISECOND
+ + (200 * NANOS_PER_MICROSECOND);
+
+ // Should set initial marker.
+ model.processTimestamp(startPositionFrames, markerTime);
+ ASSERT_EQ(startPositionFrames, model.convertTimeToPosition(markerTime));
+
+ double elapsedTimeSeconds = startTimeNanos / (double) NANOS_PER_SECOND;
+ for (int i = 0; i < numLoops; i++) {
+ // Calculate random delay over several bursts.
+ const double timeDelaySeconds = 10.0 * drand48() * NANOS_PER_BURST / NANOS_PER_SECOND;
+ elapsedTimeSeconds += timeDelaySeconds;
+ const int64_t elapsedTimeNanos = (int64_t)(elapsedTimeSeconds * NANOS_PER_SECOND);
+ const int64_t currentTimeNanos = startTimeNanos + elapsedTimeNanos;
+ // Simulate DSP running at the specified rate.
+ const int64_t currentTimeFrames = startPositionFrames +
+ (int64_t)(hardwareFramesPerSecond * elapsedTimeSeconds);
+ const int64_t numBursts = currentTimeFrames / HW_FRAMES_PER_BURST;
+ const int64_t alignedPosition = startPositionFrames + (numBursts * HW_FRAMES_PER_BURST);
+
+ // Apply drifting timestamp.
+ model.processTimestamp(alignedPosition, currentTimeNanos);
+
+ ASSERT_EQ(alignedPosition, model.convertTimeToPosition(currentTimeNanos));
+ }
+ }
+
IsochronousClockModel model;
};
@@ -95,7 +129,6 @@
}
// timestamps moves the window if outside the bounds
-// TODO test nudging the window
TEST_F(ClockModelTestFixture, clock_timestamp) {
const int64_t startTime = 100000000;
model.start(startTime);
@@ -113,3 +146,21 @@
// convertPositionToTime rounds up
EXPECT_EQ(markerTime + NANOS_PER_BURST, model.convertPositionToTime(position + 17));
}
+
+#define NUM_LOOPS_DRIFT 10000
+
+// test nudging the window by using a drifting HW clock
+TEST_F(ClockModelTestFixture, clock_no_drift) {
+ checkDriftingClock(SAMPLE_RATE, NUM_LOOPS_DRIFT);
+}
+
+// These slow drift rates caused errors when I disabled the code that handles
+// drifting in the clock model. So I think the test is valid.
+// It is unlikely that real hardware would be off by more than this amount.
+TEST_F(ClockModelTestFixture, clock_slow_drift) {
+ checkDriftingClock(0.998 * SAMPLE_RATE, NUM_LOOPS_DRIFT);
+}
+
+TEST_F(ClockModelTestFixture, clock_fast_drift) {
+ checkDriftingClock(1.002 * SAMPLE_RATE, NUM_LOOPS_DRIFT);
+}
\ No newline at end of file
diff --git a/media/libaaudio/tests/test_return_stop.cpp b/media/libaaudio/tests/test_return_stop.cpp
index 9a9e00c..1252dd3 100644
--- a/media/libaaudio/tests/test_return_stop.cpp
+++ b/media/libaaudio/tests/test_return_stop.cpp
@@ -140,7 +140,7 @@
printf("%s() - error = %d\n", __func__, error);
}
-void usage() {
+static void s_usage() {
printf("test_return_stop [-i] [-x] [-n] [-c]\n");
printf(" -i direction INPUT, otherwise OUTPUT\n");
printf(" -x sharing mode EXCLUSIVE, otherwise SHARED\n");
@@ -148,6 +148,28 @@
printf(" -c always return CONTINUE from callback, not STOP\n");
}
+/**
+ * @return 0 is OK, -1 for error
+ */
+static int s_checkEnginePositions(AudioEngine *engine) {
+ const int64_t framesRead = AAudioStream_getFramesRead(engine->stream);
+ const int64_t framesWritten = AAudioStream_getFramesWritten(engine->stream);
+ const int32_t delta = (int32_t)(framesWritten - framesRead);
+ printf("playing framesRead = %7d, framesWritten = %7d"
+ ", delta = %4d, framesCalled = %6d, callbackCount = %4d\n",
+ (int32_t) framesRead,
+ (int32_t) framesWritten,
+ delta,
+ engine->framesCalled.load(),
+ engine->callbackCount.load()
+ );
+ if (delta > AAudioStream_getBufferCapacityInFrames(engine->stream)) {
+ printf("ERROR - delta > capacity\n");
+ return -1;
+ }
+ return 0;
+}
+
int main(int argc, char **argv) {
(void) argc;
(void) argv;
@@ -188,12 +210,12 @@
sharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
break;
default:
- usage();
+ s_usage();
exit(EXIT_FAILURE);
break;
}
} else {
- usage();
+ s_usage();
exit(EXIT_FAILURE);
break;
}
@@ -201,12 +223,20 @@
result = s_OpenAudioStream(&engine, direction, sharingMode, perfMode);
if (result != AAUDIO_OK) {
- printf("s_OpenAudioStream returned %s",
+ printf("s_OpenAudioStream returned %s\n",
AAudio_convertResultToText(result));
errorCount++;
}
int32_t framesPerBurst = AAudioStream_getFramesPerBurst(engine.stream);
+ // Use double buffered stream.
+ const int32_t bufferSize = AAudioStream_setBufferSizeInFrames(engine.stream, 2 * framesPerBurst);
+ if (bufferSize < 0) {
+ printf("AAudioStream_setBufferSizeInFrames returned %s\n",
+ AAudio_convertResultToText(bufferSize));
+ errorCount++;
+ }
+
// Check to see what kind of stream we actually got.
int32_t deviceId = AAudioStream_getDeviceId(engine.stream);
aaudio_performance_mode_t actualPerfMode = AAudioStream_getPerformanceMode(engine.stream);
@@ -235,21 +265,14 @@
if (result == AAUDIO_OK) {
const int watchLoops = LOOP_DURATION_MSEC / SLEEP_DURATION_MSEC;
for (int i = watchLoops; i > 0; i--) {
- printf("playing silence #%02d, framesRead = %7d, framesWritten = %7d,"
- " framesCalled = %6d, callbackCount = %4d\n",
- i,
- (int32_t) AAudioStream_getFramesRead(engine.stream),
- (int32_t) AAudioStream_getFramesWritten(engine.stream),
- engine.framesCalled.load(),
- engine.callbackCount.load()
- );
+ errorCount += s_checkEnginePositions(&engine) ? 1 : 0;
usleep(SLEEP_DURATION_MSEC * 1000);
}
}
if (engine.stopAtFrame != INT32_MAX) {
callbackResult = (engine.callbackCountAfterStop == 0) ? EXIT_SUCCESS
- : EXIT_FAILURE;
+ : EXIT_FAILURE;
if (callbackResult) {
printf("ERROR - Callback count after STOP = %d\n",
engine.callbackCountAfterStop.load());
@@ -268,9 +291,7 @@
errorCount++;
}
usleep(SLEEP_DURATION_MSEC * 1000);
- printf("getFramesRead() = %d, getFramesWritten() = %d\n",
- (int32_t) AAudioStream_getFramesRead(engine.stream),
- (int32_t) AAudioStream_getFramesWritten(engine.stream));
+ errorCount += s_checkEnginePositions(&engine) ? 1 : 0;
}
s_CloseAudioStream(&engine);
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 1417aaf..e9b6fb1 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -5,6 +5,29 @@
}
cc_library_shared {
+ name: "libaudiopolicy",
+ srcs: [
+ "AudioAttributes.cpp",
+ "AudioPolicy.cpp",
+ "AudioProductStrategy.cpp",
+ "AudioVolumeGroup.cpp",
+ ],
+ shared_libs: [
+ "libaudioutils",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ include_dirs: ["system/media/audio_utils/include"],
+ export_include_dirs: ["include"],
+}
+
+cc_library_shared {
name: "libaudioclient",
aidl: {
@@ -23,7 +46,6 @@
":libaudioclient_aidl",
"AudioEffect.cpp",
- "AudioPolicy.cpp",
"AudioRecord.cpp",
"AudioSystem.cpp",
"AudioTrack.cpp",
@@ -41,6 +63,7 @@
],
shared_libs: [
"libaudioutils",
+ "libaudiopolicy",
"libaudiomanager",
"libbinder",
"libcutils",
@@ -52,6 +75,7 @@
"libnblog",
"libprocessgroup",
"libutils",
+ "libvibrator",
],
export_shared_lib_headers: ["libbinder"],
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
new file mode 100644
index 0000000..1ee6930
--- /dev/null
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioAttributes"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <binder/Parcel.h>
+
+#include <media/AudioAttributes.h>
+
+namespace android {
+
+status_t AudioAttributes::readFromParcel(const Parcel *parcel)
+{
+ status_t ret = NO_ERROR;
+ mAttributes.content_type = static_cast<audio_content_type_t>(parcel->readInt32());
+ mAttributes.usage = static_cast<audio_usage_t>(parcel->readInt32());
+ mAttributes.source = static_cast<audio_source_t>(parcel->readInt32());
+ mAttributes.flags = static_cast<audio_flags_mask_t>(parcel->readInt32());
+ const bool hasFlattenedTag = (parcel->readInt32() == 1);
+ if (hasFlattenedTag) {
+ std::string tags;
+ ret = parcel->readUtf8FromUtf16(&tags);
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ std::strncpy(mAttributes.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ } else {
+ strcpy(mAttributes.tags, "");
+ }
+ mStreamType = static_cast<audio_stream_type_t>(parcel->readInt32());
+ mGroupId = static_cast<volume_group_t>(parcel->readUint32());
+ return NO_ERROR;
+}
+
+status_t AudioAttributes::writeToParcel(Parcel *parcel) const
+{
+ parcel->writeInt32(static_cast<int32_t>(mAttributes.content_type));
+ parcel->writeInt32(static_cast<int32_t>(mAttributes.usage));
+ parcel->writeInt32(static_cast<int32_t>(mAttributes.source));
+ parcel->writeInt32(static_cast<int32_t>(mAttributes.flags));
+ if (strlen(mAttributes.tags) == 0) {
+ parcel->writeInt32(0);
+ } else {
+ parcel->writeInt32(1);
+ parcel->writeUtf8AsUtf16(mAttributes.tags);
+ }
+ parcel->writeInt32(static_cast<int32_t>(mStreamType));
+ parcel->writeUint32(static_cast<uint32_t>(mGroupId));
+ return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libaudioclient/AudioProductStrategy.cpp b/media/libaudioclient/AudioProductStrategy.cpp
new file mode 100644
index 0000000..1da1114
--- /dev/null
+++ b/media/libaudioclient/AudioProductStrategy.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioProductStrategy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+#include <media/AudioProductStrategy.h>
+#include <media/AudioAttributes.h>
+#include <media/AudioSystem.h>
+
+namespace android {
+
+status_t AudioProductStrategy::readFromParcel(const Parcel *parcel)
+{
+ mId = static_cast<product_strategy_t>(parcel->readInt32());
+ status_t ret = parcel->readUtf8FromUtf16(&mName);
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ size_t size = static_cast<size_t>(parcel->readInt32());
+ for (size_t i = 0; i < size; i++) {
+ AudioAttributes attribute;
+ ret = attribute.readFromParcel(parcel);
+ if (ret != NO_ERROR) {
+ mAudioAttributes.clear();
+ return ret;
+ }
+ mAudioAttributes.push_back(attribute);
+ }
+ return NO_ERROR;
+}
+
+status_t AudioProductStrategy::writeToParcel(Parcel *parcel) const
+{
+ parcel->writeInt32(static_cast<int32_t>(mId));
+ parcel->writeUtf8AsUtf16(mName);
+ size_t size = mAudioAttributes.size();
+ size_t sizePosition = parcel->dataPosition();
+ parcel->writeInt32(size);
+ size_t finalSize = size;
+
+ for (size_t i = 0; i < size; i++) {
+ size_t position = parcel->dataPosition();
+ AudioAttributes attribute(mAudioAttributes[i]);
+ status_t ret = attribute.writeToParcel(parcel);
+ if (ret != NO_ERROR) {
+ parcel->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = parcel->dataPosition();
+ parcel->setDataPosition(sizePosition);
+ parcel->writeInt32(finalSize);
+ parcel->setDataPosition(position);
+ }
+ return NO_ERROR;
+}
+
+bool AudioProductStrategy::attributesMatches(const audio_attributes_t refAttributes,
+ const audio_attributes_t clientAttritubes)
+{
+ if (refAttributes == AUDIO_ATTRIBUTES_INITIALIZER) {
+ // The default product strategy is the strategy that holds default attributes by convention.
+ // All attributes that fail to match will follow the default strategy for routing.
+ // Choosing the default must be done as a fallback, the attributes match shall not
+ // select the default.
+ return false;
+ }
+ return ((refAttributes.usage == AUDIO_USAGE_UNKNOWN) ||
+ (clientAttritubes.usage == refAttributes.usage)) &&
+ ((refAttributes.content_type == AUDIO_CONTENT_TYPE_UNKNOWN) ||
+ (clientAttritubes.content_type == refAttributes.content_type)) &&
+ ((refAttributes.flags == AUDIO_FLAG_NONE) ||
+ (clientAttritubes.flags != AUDIO_FLAG_NONE &&
+ (clientAttritubes.flags & refAttributes.flags) == clientAttritubes.flags)) &&
+ ((strlen(refAttributes.tags) == 0) ||
+ (std::strcmp(clientAttritubes.tags, refAttributes.tags) == 0));
+}
+
+} // namespace android
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 72a23e3..baa1469 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -355,7 +355,10 @@
}
// create the IAudioRecord
- status = createRecord_l(0 /*epoch*/, mOpPackageName);
+ {
+ AutoMutex lock(mLock);
+ status = createRecord_l(0 /*epoch*/, mOpPackageName);
+ }
ALOGV("%s(%d): status %d", __func__, mPortId, status);
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index baeae8b..35adb72 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -25,6 +25,7 @@
#include <media/AudioSystem.h>
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
+#include <media/TypeConverter.h>
#include <math.h>
#include <system/audio.h>
@@ -521,10 +522,12 @@
if (ioDesc == 0 || ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return;
audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
- Vector < wp<AudioDeviceCallback> > callbacks;
-
+ Vector<sp<AudioDeviceCallback>> callbacksToCall;
{
Mutex::Autolock _l(mLock);
+ bool deviceValidOrChanged = false;
+ bool sendCallbacks = false;
+ ssize_t ioIndex = -1;
switch (event) {
case AUDIO_OUTPUT_OPENED:
@@ -542,11 +545,17 @@
if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
deviceId = ioDesc->getDeviceId();
if (event == AUDIO_OUTPUT_OPENED || event == AUDIO_INPUT_OPENED) {
- ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+ ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
if (ioIndex >= 0) {
- callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+ sendCallbacks = true;
+ deviceValidOrChanged = true;
}
}
+ if (event == AUDIO_OUTPUT_REGISTERED || event == AUDIO_INPUT_REGISTERED) {
+ ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
+ sendCallbacks = (ioIndex >= 0)
+ && !mAudioDeviceCallbackProxies.valueAt(ioIndex).notifiedOnce();
+ }
}
ALOGV("ioConfigChanged() new %s %s %d samplingRate %u, format %#x channel mask %#x "
"frameCount %zu deviceId %d",
@@ -568,7 +577,7 @@
event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
mIoDescriptors.removeItem(ioDesc->mIoHandle);
- mAudioDeviceCallbacks.removeItem(ioDesc->mIoHandle);
+ mAudioDeviceCallbackProxies.removeItem(ioDesc->mIoHandle);
} break;
case AUDIO_OUTPUT_CONFIG_CHANGED:
@@ -583,11 +592,10 @@
mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
if (deviceId != ioDesc->getDeviceId()) {
+ deviceValidOrChanged = true;
deviceId = ioDesc->getDeviceId();
- ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
- if (ioIndex >= 0) {
- callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
- }
+ ioIndex = mAudioDeviceCallbackProxies.indexOfKey(ioDesc->mIoHandle);
+ sendCallbacks = ioIndex >= 0;
}
ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
"channel mask %#x frameCount %zu frameCountHAL %zu deviceId %d",
@@ -598,24 +606,34 @@
} break;
}
- }
- bool callbackRemoved = false;
- // callbacks.size() != 0 => ioDesc->mIoHandle and deviceId are valid
- for (size_t i = 0; i < callbacks.size(); ) {
- sp<AudioDeviceCallback> callback = callbacks[i].promote();
- if (callback.get() != nullptr) {
- callback->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
- i++;
- } else {
- callbacks.removeAt(i);
- callbackRemoved = true;
+
+ // sendCallbacks true => ioDesc->mIoHandle and deviceId are valid
+ if (sendCallbacks) {
+ AudioDeviceCallbackProxies &callbackProxies =
+ mAudioDeviceCallbackProxies.editValueAt(ioIndex);
+ for (size_t i = 0; i < callbackProxies.size(); ) {
+ sp<AudioDeviceCallback> callback = callbackProxies[i]->callback();
+ if (callback.get() != nullptr) {
+ // Call the callback only if the device actually changed, the input or output
+ // was opened or closed or the client was newly registered and the callback
+ // was never called
+ if (!callbackProxies[i]->notifiedOnce() || deviceValidOrChanged) {
+ callbacksToCall.add(callback);
+ callbackProxies[i]->setNotifiedOnce();
+ }
+ i++;
+ } else {
+ callbackProxies.removeAt(i);
+ }
+ }
+ callbackProxies.setNotifiedOnce();
}
}
- // clean up callback list while we are here if some clients have disappeared without
- // unregistering their callback
- if (callbackRemoved) {
- Mutex::Autolock _l(mLock);
- mAudioDeviceCallbacks.replaceValueFor(ioDesc->mIoHandle, callbacks);
+
+ // Callbacks must be called without mLock held. May lead to dead lock if calling for
+ // example getRoutedDevice that updates the device and tries to acquire mLock.
+ for (size_t i = 0; i < callbacksToCall.size(); i++) {
+ callbacksToCall[i]->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
}
}
@@ -671,20 +689,21 @@
const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
{
Mutex::Autolock _l(mLock);
- Vector < wp<AudioDeviceCallback> > callbacks;
- ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+ AudioDeviceCallbackProxies callbackProxies;
+ ssize_t ioIndex = mAudioDeviceCallbackProxies.indexOfKey(audioIo);
if (ioIndex >= 0) {
- callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+ callbackProxies = mAudioDeviceCallbackProxies.valueAt(ioIndex);
}
- for (size_t cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
- if (callbacks[cbIndex].unsafe_get() == callback.unsafe_get()) {
+ for (size_t cbIndex = 0; cbIndex < callbackProxies.size(); cbIndex++) {
+ sp<AudioDeviceCallback> cbk = callbackProxies[cbIndex]->callback();
+ if (cbk.get() == callback.unsafe_get()) {
return INVALID_OPERATION;
}
}
- callbacks.add(callback);
-
- mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+ callbackProxies.add(new AudioDeviceCallbackProxy(callback));
+ callbackProxies.resetNotifiedOnce();
+ mAudioDeviceCallbackProxies.replaceValueFor(audioIo, callbackProxies);
return NO_ERROR;
}
@@ -692,26 +711,26 @@
const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
{
Mutex::Autolock _l(mLock);
- ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+ ssize_t ioIndex = mAudioDeviceCallbackProxies.indexOfKey(audioIo);
if (ioIndex < 0) {
return INVALID_OPERATION;
}
- Vector < wp<AudioDeviceCallback> > callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
-
+ AudioDeviceCallbackProxies callbackProxies = mAudioDeviceCallbackProxies.valueAt(ioIndex);
size_t cbIndex;
- for (cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
- if (callbacks[cbIndex].unsafe_get() == callback.unsafe_get()) {
+ for (cbIndex = 0; cbIndex < callbackProxies.size(); cbIndex++) {
+ sp<AudioDeviceCallback> cbk = callbackProxies[cbIndex]->callback();
+ if (cbk.get() == callback.unsafe_get()) {
break;
}
}
- if (cbIndex == callbacks.size()) {
+ if (cbIndex == callbackProxies.size()) {
return INVALID_OPERATION;
}
- callbacks.removeAt(cbIndex);
- if (callbacks.size() != 0) {
- mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+ callbackProxies.removeAt(cbIndex);
+ if (callbackProxies.size() != 0) {
+ mAudioDeviceCallbackProxies.replaceValueFor(audioIo, callbackProxies);
} else {
- mAudioDeviceCallbacks.removeItem(audioIo);
+ mAudioDeviceCallbackProxies.removeItem(audioIo);
}
return NO_ERROR;
}
@@ -784,7 +803,8 @@
status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
const char *address = "";
@@ -798,7 +818,7 @@
if (device_name != NULL) {
name = device_name;
}
- return aps->setDeviceConnectionState(device, state, address, name);
+ return aps->setDeviceConnectionState(device, state, address, name, encodedFormat);
}
audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -812,7 +832,8 @@
status_t AudioSystem::handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
const char *address = "";
@@ -826,7 +847,7 @@
if (device_name != NULL) {
name = device_name;
}
- return aps->handleDeviceConfigChange(device, address, name);
+ return aps->handleDeviceConfigChange(device, address, name, encodedFormat);
}
status_t AudioSystem::setPhoneState(audio_mode_t state)
@@ -869,13 +890,14 @@
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId)
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return NO_INIT;
return aps->getOutputForAttr(attr, output, session, stream, pid, uid,
config,
- flags, selectedDeviceId, portId);
+ flags, selectedDeviceId, portId, secondaryOutputs);
}
status_t AudioSystem::startOutput(audio_port_handle_t portId)
@@ -968,7 +990,7 @@
uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return 0;
+ if (aps == 0) return PRODUCT_STRATEGY_NONE;
return aps->getStrategyForStream(stream);
}
@@ -1279,6 +1301,20 @@
return aps->getMasterMono(mono);
}
+status_t AudioSystem::setMasterBalance(float balance)
+{
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ return af->setMasterBalance(balance);
+}
+
+status_t AudioSystem::getMasterBalance(float *balance)
+{
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return PERMISSION_DENIED;
+ return af->getMasterBalance(balance);
+}
+
float AudioSystem::getStreamVolumeDB(audio_stream_type_t stream, int index, audio_devices_t device)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1311,7 +1347,6 @@
return aps->setSurroundFormatEnabled(audioFormat, enabled);
}
-
status_t AudioSystem::setAssistantUid(uid_t uid)
{
const sp <IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1335,6 +1370,84 @@
return aps->isHapticPlaybackSupported();
}
+status_t AudioSystem::getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats) {
+ const sp <IAudioPolicyService>
+ & aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
+
+status_t AudioSystem::listAudioProductStrategies(AudioProductStrategyVector &strategies)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->listAudioProductStrategies(strategies);
+}
+
+audio_attributes_t AudioSystem::streamTypeToAttributes(audio_stream_type_t stream)
+{
+ AudioProductStrategyVector strategies;
+ listAudioProductStrategies(strategies);
+ for (const auto &strategy : strategies) {
+ auto attrVect = strategy.getAudioAttributes();
+ auto iter = std::find_if(begin(attrVect), end(attrVect), [&stream](const auto &attributes) {
+ return attributes.getStreamType() == stream; });
+ if (iter != end(attrVect)) {
+ return iter->getAttributes();
+ }
+ }
+ ALOGE("invalid stream type %s when converting to attributes", toString(stream).c_str());
+ return AUDIO_ATTRIBUTES_INITIALIZER;
+}
+
+audio_stream_type_t AudioSystem::attributesToStreamType(const audio_attributes_t &attr)
+{
+ product_strategy_t psId;
+ status_t ret = AudioSystem::getProductStrategyFromAudioAttributes(AudioAttributes(attr), psId);
+ if (ret != NO_ERROR) {
+ ALOGE("no strategy found for attributes %s", toString(attr).c_str());
+ return AUDIO_STREAM_MUSIC;
+ }
+ AudioProductStrategyVector strategies;
+ listAudioProductStrategies(strategies);
+ for (const auto &strategy : strategies) {
+ if (strategy.getId() == psId) {
+ auto attrVect = strategy.getAudioAttributes();
+ auto iter = std::find_if(begin(attrVect), end(attrVect), [&attr](const auto &refAttr) {
+ return AudioProductStrategy::attributesMatches(
+ refAttr.getAttributes(), attr); });
+ if (iter != end(attrVect)) {
+ return iter->getStreamType();
+ }
+ }
+ }
+ ALOGE("invalid attributes %s when converting to stream", toString(attr).c_str());
+ return AUDIO_STREAM_MUSIC;
+}
+
+status_t AudioSystem::getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getProductStrategyFromAudioAttributes(aa,productStrategy);
+}
+
+status_t AudioSystem::listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->listAudioVolumeGroups(groups);
+}
+
+status_t AudioSystem::getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getVolumeGroupFromAudioAttributes(aa, volumeGroup);
+}
// ---------------------------------------------------------------------------
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index e9a0e22..7881bb8 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -33,7 +33,6 @@
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
#include <media/AudioParameter.h>
-#include <media/AudioPolicyHelper.h>
#include <media/AudioResamplerPublic.h>
#include <media/AudioSystem.h>
#include <media/MediaAnalyticsItem.h>
@@ -293,6 +292,11 @@
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0)
{
+ mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
+ mAttributes.usage = AUDIO_USAGE_UNKNOWN;
+ mAttributes.flags = 0x0;
+ strcpy(mAttributes.tags, "");
+
(void)set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
@@ -324,6 +328,11 @@
mPausedPosition(0),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
+ mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
+ mAttributes.usage = AUDIO_USAGE_UNKNOWN;
+ mAttributes.flags = 0x0;
+ strcpy(mAttributes.tags, "");
+
(void)set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
@@ -477,7 +486,7 @@
__func__,
mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
mStreamType = AUDIO_STREAM_DEFAULT;
- audio_attributes_flags_to_audio_output_flags(mAttributes.flags, flags);
+ audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
}
// these below should probably come from the audioFlinger too...
@@ -612,8 +621,10 @@
}
// create the IAudioTrack
- status = createTrack_l();
-
+ {
+ AutoMutex lock(mLock);
+ status = createTrack_l();
+ }
if (status != NO_ERROR) {
if (mAudioTrackThread != 0) {
mAudioTrackThread->requestExit(); // see comment in AudioTrack.h
@@ -1380,7 +1391,7 @@
audio_stream_type_t AudioTrack::streamType() const
{
if (mStreamType == AUDIO_STREAM_DEFAULT) {
- return audio_attributes_to_stream_type(&mAttributes);
+ return AudioSystem::attributesToStreamType(mAttributes);
}
return mStreamType;
}
@@ -1463,7 +1474,7 @@
IAudioFlinger::CreateTrackInput input;
if (mStreamType != AUDIO_STREAM_DEFAULT) {
- stream_type_to_audio_attributes(mStreamType, &input.attr);
+ input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
} else {
input.attr = mAttributes;
}
@@ -2881,7 +2892,8 @@
mPortId, mStatus, mState, mSessionId, mFlags);
result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
(mStreamType == AUDIO_STREAM_DEFAULT) ?
- audio_attributes_to_stream_type(&mAttributes) : mStreamType,
+ AudioSystem::attributesToStreamType(mAttributes) :
+ mStreamType,
mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
mFormat, mChannelMask, mChannelCount);
@@ -2949,7 +2961,7 @@
}
AutoMutex lock(mLock);
if (mDeviceCallback.unsafe_get() != callback.get()) {
- ALOGW("%s(%d): removing different callback!", __func__, mPortId);
+ ALOGW("%s removing different callback!", __FUNCTION__);
return INVALID_OPERATION;
}
mDeviceCallback.clear();
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
new file mode 100644
index 0000000..e79a362
--- /dev/null
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioVolumeGroup"
+
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <binder/Parcel.h>
+
+#include <media/AudioVolumeGroup.h>
+#include <media/AudioAttributes.h>
+
+namespace android {
+
+status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
+{
+ status_t ret = parcel->readUtf8FromUtf16(&mName);
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ mGroupId = static_cast<volume_group_t>(parcel->readInt32());
+ size_t size = static_cast<size_t>(parcel->readInt32());
+ for (size_t i = 0; i < size; i++) {
+ AudioAttributes attribute;
+ attribute.readFromParcel(parcel);
+ if (ret != NO_ERROR) {
+ mAudioAttributes.clear();
+ return ret;
+ }
+ mAudioAttributes.push_back(attribute.getAttributes());
+ }
+ size = static_cast<size_t>(parcel->readInt32());
+ for (size_t i = 0; i < size; i++) {
+ audio_stream_type_t stream = static_cast<audio_stream_type_t>(parcel->readInt32());
+ mStreams.push_back(stream);
+ }
+ return NO_ERROR;
+}
+
+status_t AudioVolumeGroup::writeToParcel(Parcel *parcel) const
+{
+ parcel->writeUtf8AsUtf16(mName);
+ parcel->writeInt32(static_cast<int32_t>(mGroupId));
+ size_t size = mAudioAttributes.size();
+ size_t sizePosition = parcel->dataPosition();
+ parcel->writeInt32(size);
+ size_t finalSize = size;
+ for (const auto &attributes : mAudioAttributes) {
+ size_t position = parcel->dataPosition();
+ AudioAttributes attribute(attributes);
+ status_t ret = attribute.writeToParcel(parcel);
+ if (ret != NO_ERROR) {
+ parcel->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = parcel->dataPosition();
+ parcel->setDataPosition(sizePosition);
+ parcel->writeInt32(finalSize);
+ parcel->setDataPosition(position);
+ }
+ parcel->writeInt32(mStreams.size());
+ for (const auto &stream : mStreams) {
+ parcel->writeInt32(static_cast<int32_t>(stream));
+ }
+ return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 00678c2..825cd4e 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -87,6 +87,8 @@
SYSTEM_READY,
FRAME_COUNT_HAL,
GET_MICROPHONES,
+ SET_MASTER_BALANCE,
+ GET_MASTER_BALANCE,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -242,6 +244,34 @@
return reply.readInt32();
}
+ status_t setMasterBalance(float balance) override
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeFloat(balance);
+ status_t status = remote()->transact(SET_MASTER_BALANCE, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return reply.readInt32();
+ }
+
+ status_t getMasterBalance(float *balance) const override
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_MASTER_BALANCE, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = (status_t)reply.readInt32();
+ if (status != NO_ERROR) {
+ return status;
+ }
+ *balance = reply.readFloat();
+ return NO_ERROR;
+ }
+
virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
audio_io_handle_t output)
{
@@ -1050,6 +1080,21 @@
reply->writeInt32( masterMute() );
return NO_ERROR;
} break;
+ case SET_MASTER_BALANCE: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ reply->writeInt32( setMasterBalance(data.readFloat()) );
+ return NO_ERROR;
+ } break;
+ case GET_MASTER_BALANCE: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ float f;
+ const status_t status = getMasterBalance(&f);
+ reply->writeInt32((int32_t)status);
+ if (status == NO_ERROR) {
+ (void)reply->writeFloat(f);
+ }
+ return NO_ERROR;
+ } break;
case SET_STREAM_VOLUME: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
int stream = data.readInt32();
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 272415c..feb1317 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -92,6 +92,11 @@
IS_HAPTIC_PLAYBACK_SUPPORTED,
SET_UID_DEVICE_AFFINITY,
REMOVE_UID_DEVICE_AFFINITY,
+ GET_OFFLOAD_FORMATS_A2DP,
+ LIST_AUDIO_PRODUCT_STRATEGIES,
+ GET_STRATEGY_FOR_ATTRIBUTES,
+ LIST_AUDIO_VOLUME_GROUPS,
+ GET_VOLUME_GROUP_FOR_ATTRIBUTES
};
#define MAX_ITEMS_PER_LIST 1024
@@ -108,7 +113,8 @@
audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -116,6 +122,7 @@
data.writeInt32(static_cast <uint32_t>(state));
data.writeCString(device_address);
data.writeCString(device_name);
+ data.writeInt32(static_cast <uint32_t>(encodedFormat));
remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
@@ -134,13 +141,15 @@
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(device));
data.writeCString(device_address);
data.writeCString(device_name);
+ data.writeInt32(static_cast <uint32_t>(encodedFormat));
remote()->transact(HANDLE_DEVICE_CONFIG_CHANGE, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
@@ -182,16 +191,17 @@
return static_cast <audio_io_handle_t> (reply.readInt32());
}
- virtual status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- pid_t pid,
- uid_t uid,
- const audio_config_t *config,
- audio_output_flags_t flags,
- audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId)
+ status_t getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ pid_t pid,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t flags,
+ audio_port_handle_t *selectedDeviceId,
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs) override
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -217,6 +227,10 @@
ALOGE("getOutputForAttr NULL portId - shouldn't happen");
return BAD_VALUE;
}
+ if (secondaryOutputs == NULL) {
+ ALOGE("getOutputForAttr NULL secondaryOutputs - shouldn't happen");
+ return BAD_VALUE;
+ }
if (attr == NULL) {
data.writeInt32(0);
} else {
@@ -251,7 +265,9 @@
}
*selectedDeviceId = (audio_port_handle_t)reply.readInt32();
*portId = (audio_port_handle_t)reply.readInt32();
- return status;
+ secondaryOutputs->resize(reply.readInt32());
+ return reply.read(secondaryOutputs->data(),
+ secondaryOutputs->size() * sizeof(audio_io_handle_t));
}
virtual status_t startOutput(audio_port_handle_t portId)
@@ -407,7 +423,7 @@
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(stream));
remote()->transact(GET_STRATEGY_FOR_STREAM, data, &reply);
- return reply.readInt32();
+ return reply.readUint32();
}
virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream)
@@ -884,7 +900,30 @@
return reply.readInt32();
}
- virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
+ virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats)
+ {
+ if (formats == NULL) {
+ return BAD_VALUE;
+ }
+
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ status_t status = remote()->transact(GET_OFFLOAD_FORMATS_A2DP, data, &reply);
+ if (status != NO_ERROR || (status = (status_t)reply.readInt32()) != NO_ERROR) {
+ return status;
+ }
+
+ size_t list_size = reply.readUint32();
+
+ for (size_t i = 0; i < list_size; i++) {
+ formats->push_back(static_cast<audio_format_t>(reply.readInt32()));
+ }
+ return NO_ERROR;
+ }
+
+
+ virtual status_t addStreamDefaultEffect(const effect_uuid_t *type,
const String16& opPackageName,
const effect_uuid_t *uuid,
int32_t priority,
@@ -1023,19 +1062,116 @@
return status;
}
- virtual status_t removeUidDeviceAffinities(uid_t uid)
- {
+ virtual status_t removeUidDeviceAffinities(uid_t uid) {
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32((int32_t) uid);
- status_t status = remote()->transact(REMOVE_UID_DEVICE_AFFINITY, data, &reply);
+ status_t status =
+ remote()->transact(REMOVE_UID_DEVICE_AFFINITY, data, &reply);
if (status == NO_ERROR) {
- status = (status_t)reply.readInt32();
+ status = (status_t) reply.readInt32();
}
return status;
}
+
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(LIST_AUDIO_PRODUCT_STRATEGIES, data, &reply);
+ if (status != NO_ERROR) {
+ ALOGE("%s: permission denied", __func__);
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ uint32_t numStrategies = static_cast<uint32_t>(reply.readInt32());
+ for (size_t i = 0; i < numStrategies; i++) {
+ AudioProductStrategy strategy;
+ status = strategy.readFromParcel(&reply);
+ if (status != NO_ERROR) {
+ ALOGE("%s: failed to read strategies", __FUNCTION__);
+ strategies.clear();
+ return status;
+ }
+ strategies.push_back(strategy);
+ }
+ return NO_ERROR;
+ }
+
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ status_t status = aa.writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(GET_STRATEGY_FOR_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ productStrategy = static_cast<product_strategy_t>(reply.readInt32());
+ return NO_ERROR;
+ }
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(LIST_AUDIO_VOLUME_GROUPS, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ uint32_t numGroups = static_cast<uint32_t>(reply.readInt32());
+ for (size_t i = 0; i < numGroups; i++) {
+ AudioVolumeGroup group;
+ status = group.readFromParcel(&reply);
+ if (status != NO_ERROR) {
+ ALOGE("%s: failed to read volume groups", __FUNCTION__);
+ groups.clear();
+ return status;
+ }
+ groups.push_back(group);
+ }
+ return NO_ERROR;
+ }
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ status_t status = aa.writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(GET_VOLUME_GROUP_FOR_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ volumeGroup = static_cast<volume_group_t>(reply.readInt32());
+ return NO_ERROR;
+ }
};
IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -1054,7 +1190,6 @@
case START_INPUT:
case STOP_INPUT:
case RELEASE_INPUT:
- case GET_STRATEGY_FOR_STREAM:
case GET_OUTPUT_FOR_EFFECT:
case REGISTER_EFFECT:
case UNREGISTER_EFFECT:
@@ -1089,14 +1224,15 @@
case SET_STREAM_VOLUME:
case REGISTER_POLICY_MIXES:
case SET_MASTER_MONO:
- case START_AUDIO_SOURCE:
- case STOP_AUDIO_SOURCE:
case GET_SURROUND_FORMATS:
case SET_SURROUND_FORMAT_ENABLED:
case SET_ASSISTANT_UID:
case SET_A11Y_SERVICES_UIDS:
case SET_UID_DEVICE_AFFINITY:
- case REMOVE_UID_DEVICE_AFFINITY: {
+ case REMOVE_UID_DEVICE_AFFINITY:
+ case GET_OFFLOAD_FORMATS_A2DP:
+ case LIST_AUDIO_VOLUME_GROUPS:
+ case GET_VOLUME_GROUP_FOR_ATTRIBUTES: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1121,6 +1257,7 @@
static_cast <audio_policy_dev_state_t>(data.readInt32());
const char *device_address = data.readCString();
const char *device_name = data.readCString();
+ audio_format_t codecFormat = static_cast <audio_format_t>(data.readInt32());
if (device_address == nullptr || device_name == nullptr) {
ALOGE("Bad Binder transaction: SET_DEVICE_CONNECTION_STATE for device %u", device);
reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
@@ -1128,7 +1265,8 @@
reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
state,
device_address,
- device_name)));
+ device_name,
+ codecFormat)));
}
return NO_ERROR;
} break;
@@ -1154,13 +1292,16 @@
static_cast <audio_devices_t>(data.readInt32());
const char *device_address = data.readCString();
const char *device_name = data.readCString();
+ audio_format_t codecFormat =
+ static_cast <audio_format_t>(data.readInt32());
if (device_address == nullptr || device_name == nullptr) {
ALOGE("Bad Binder transaction: HANDLE_DEVICE_CONFIG_CHANGE for device %u", device);
reply->writeInt32(static_cast<int32_t> (BAD_VALUE));
} else {
reply->writeInt32(static_cast<uint32_t> (handleDeviceConfigChange(device,
device_address,
- device_name)));
+ device_name,
+ codecFormat)));
}
return NO_ERROR;
} break;
@@ -1223,16 +1364,19 @@
audio_port_handle_t selectedDeviceId = data.readInt32();
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t output = 0;
+ std::vector<audio_io_handle_t> secondaryOutputs;
status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
&output, session, &stream, pid, uid,
&config,
- flags, &selectedDeviceId, &portId);
+ flags, &selectedDeviceId, &portId, &secondaryOutputs);
reply->writeInt32(status);
reply->writeInt32(output);
reply->writeInt32(stream);
reply->writeInt32(selectedDeviceId);
reply->writeInt32(portId);
- return NO_ERROR;
+ reply->writeInt32(secondaryOutputs.size());
+ return reply->write(secondaryOutputs.data(),
+ secondaryOutputs.size() * sizeof(audio_io_handle_t));
} break;
case START_OUTPUT: {
@@ -1344,7 +1488,7 @@
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream =
static_cast <audio_stream_type_t>(data.readInt32());
- reply->writeInt32(getStrategyForStream(stream));
+ reply->writeUint32(getStrategyForStream(stream));
return NO_ERROR;
} break;
@@ -1745,6 +1889,21 @@
return NO_ERROR;
}
+ case GET_OFFLOAD_FORMATS_A2DP: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ std::vector<audio_format_t> encodingFormats;
+ status_t status = getHwOffloadEncodingFormatsSupportedForA2DP(&encodingFormats);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ reply->writeUint32(static_cast<uint32_t>(encodingFormats.size()));
+ for (size_t i = 0; i < encodingFormats.size(); i++)
+ reply->writeInt32(static_cast<int32_t>(encodingFormats[i]));
+ return NO_ERROR;
+ }
+
+
case ADD_STREAM_DEFAULT_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
effect_uuid_t type;
@@ -1886,6 +2045,96 @@
return NO_ERROR;
}
+ case LIST_AUDIO_PRODUCT_STRATEGIES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioProductStrategyVector strategies;
+ status_t status = listAudioProductStrategies(strategies);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ size_t size = strategies.size();
+ size_t sizePosition = reply->dataPosition();
+ reply->writeInt32(size);
+ size_t finalSize = size;
+ for (size_t i = 0; i < size; i++) {
+ size_t position = reply->dataPosition();
+ if (strategies[i].writeToParcel(reply) != NO_ERROR) {
+ reply->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = reply->dataPosition();
+ reply->setDataPosition(sizePosition);
+ reply->writeInt32(finalSize);
+ reply->setDataPosition(position);
+ }
+ return NO_ERROR;
+ }
+
+ case GET_STRATEGY_FOR_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioAttributes attributes;
+ status_t status = attributes.readFromParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ product_strategy_t strategy;
+ status = getProductStrategyFromAudioAttributes(attributes, strategy);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ reply->writeUint32(static_cast<int>(strategy));
+ return NO_ERROR;
+ }
+
+ case LIST_AUDIO_VOLUME_GROUPS: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioVolumeGroupVector groups;
+ status_t status = listAudioVolumeGroups(groups);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ size_t size = groups.size();
+ size_t sizePosition = reply->dataPosition();
+ reply->writeInt32(size);
+ size_t finalSize = size;
+ for (size_t i = 0; i < size; i++) {
+ size_t position = reply->dataPosition();
+ if (groups[i].writeToParcel(reply) != NO_ERROR) {
+ reply->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = reply->dataPosition();
+ reply->setDataPosition(sizePosition);
+ reply->writeInt32(finalSize);
+ reply->setDataPosition(position);
+ }
+ return NO_ERROR;
+ }
+
+ case GET_VOLUME_GROUP_FOR_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioAttributes attributes;
+ status_t status = attributes.readFromParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ volume_group_t group;
+ status = getVolumeGroupFromAudioAttributes(attributes, group);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ reply->writeUint32(static_cast<int>(group));
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index 5c5dbd6..536b00d 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -20,7 +20,6 @@
#include <math.h>
#include <utils/Log.h>
#include <cutils/properties.h>
-#include <media/AudioPolicyHelper.h>
#include "media/ToneGenerator.h"
@@ -1242,7 +1241,7 @@
if (mStreamType == AUDIO_STREAM_VOICE_CALL) {
streamType = AUDIO_STREAM_DTMF;
}
- stream_type_to_audio_attributes(streamType, &attr);
+ attr = AudioSystem::streamTypeToAttributes(streamType);
const size_t frameCount = mProcessSize;
status_t status = mpAudioTrack->set(
diff --git a/media/libaudioclient/include/media/AudioAttributes.h b/media/libaudioclient/include/media/AudioAttributes.h
new file mode 100644
index 0000000..0a35e9e
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioAttributes.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <media/AudioCommonTypes.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioAttributes : public Parcelable
+{
+public:
+ AudioAttributes() = default;
+ AudioAttributes(const audio_attributes_t &attributes) : mAttributes(attributes) {}
+ AudioAttributes(volume_group_t groupId,
+ audio_stream_type_t stream,
+ const audio_attributes_t &attributes) :
+ mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {}
+
+ audio_attributes_t getAttributes() const { return mAttributes; }
+
+ status_t readFromParcel(const Parcel *parcel) override;
+ status_t writeToParcel(Parcel *parcel) const override;
+
+ audio_stream_type_t getStreamType() const { return mStreamType; }
+ volume_group_t getGroupId() const { return mGroupId; }
+
+private:
+ audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ /**
+ * @brief mStreamType: for legacy volume management, we need to be able to convert an attribute
+ * to a given stream type.
+ */
+ audio_stream_type_t mStreamType = AUDIO_STREAM_DEFAULT;
+
+ /**
+ * @brief mGroupId: for future volume management, define groups within a strategy that follows
+ * the same curves of volume (extension of stream types to manage volume)
+ */
+ volume_group_t mGroupId = VOLUME_GROUP_NONE;
+};
+
+} // namespace android
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
new file mode 100644
index 0000000..8e446ea
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+enum product_strategy_t : uint32_t;
+const product_strategy_t PRODUCT_STRATEGY_NONE = static_cast<product_strategy_t>(-1);
+
+using AttributesVector = std::vector<audio_attributes_t>;
+using StreamTypeVector = std::vector<audio_stream_type_t>;
+
+constexpr bool operator==(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
+{
+ return lhs.usage == rhs.usage && lhs.content_type == rhs.content_type &&
+ lhs.flags == rhs.flags && (std::strcmp(lhs.tags, rhs.tags) == 0);
+}
+constexpr bool operator!=(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+enum volume_group_t : uint32_t;
+static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
+
+} // namespace android
+
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index 3ae7104..41b425f 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -26,6 +26,7 @@
#include <unordered_map>
#include <vector>
+#include <android/os/IExternalVibratorService.h>
#include <media/AudioBufferProvider.h>
#include <media/AudioResampler.h>
#include <media/AudioResamplerPublic.h>
@@ -80,6 +81,7 @@
MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
// for haptic
HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
+ HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
// for target RESAMPLE
SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
// parameter 'value' is the new sample rate in Hz.
@@ -102,6 +104,32 @@
// parameter 'value' is a pointer to the new playback rate.
};
+ typedef enum { // Haptic intensity, should keep consistent with VibratorService
+ HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
+ HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
+ HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
+ HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
+ HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
+ HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
+ } haptic_intensity_t;
+ static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
+ static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
+ static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
+
+ static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
+ switch (hapticIntensity) {
+ case HAPTIC_SCALE_MUTE:
+ case HAPTIC_SCALE_VERY_LOW:
+ case HAPTIC_SCALE_LOW:
+ case HAPTIC_SCALE_NONE:
+ case HAPTIC_SCALE_HIGH:
+ case HAPTIC_SCALE_VERY_HIGH:
+ return true;
+ default:
+ return false;
+ }
+ }
+
AudioMixer(size_t frameCount, uint32_t sampleRate)
: mSampleRate(sampleRate)
, mFrameCount(frameCount) {
@@ -147,6 +175,7 @@
}
}
(this->*mHook)();
+ processHapticData();
}
size_t getUnreleasedFrames(int name) const;
@@ -364,6 +393,7 @@
// Haptic
bool mHapticPlaybackEnabled;
+ haptic_intensity_t mHapticIntensity;
audio_channel_mask_t mHapticChannelMask;
uint32_t mHapticChannelCount;
audio_channel_mask_t mMixerHapticChannelMask;
@@ -374,6 +404,38 @@
uint32_t mAdjustNonDestructiveOutChannelCount;
bool mKeepContractedChannels;
+ float getHapticScaleGamma() const {
+ // Need to keep consistent with the value in VibratorService.
+ switch (mHapticIntensity) {
+ case HAPTIC_SCALE_VERY_LOW:
+ return 2.0f;
+ case HAPTIC_SCALE_LOW:
+ return 1.5f;
+ case HAPTIC_SCALE_HIGH:
+ return 0.5f;
+ case HAPTIC_SCALE_VERY_HIGH:
+ return 0.25f;
+ default:
+ return 1.0f;
+ }
+ }
+
+ float getHapticMaxAmplitudeRatio() const {
+ // Need to keep consistent with the value in VibratorService.
+ switch (mHapticIntensity) {
+ case HAPTIC_SCALE_VERY_LOW:
+ return HAPTIC_SCALE_VERY_LOW_RATIO;
+ case HAPTIC_SCALE_LOW:
+ return HAPTIC_SCALE_LOW_RATIO;
+ case HAPTIC_SCALE_NONE:
+ case HAPTIC_SCALE_HIGH:
+ case HAPTIC_SCALE_VERY_HIGH:
+ return 1.0f;
+ default:
+ return 0.0f;
+ }
+ }
+
private:
// hooks
void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
@@ -410,6 +472,8 @@
template <int MIXTYPE, typename TO, typename TI, typename TA>
void process__noResampleOneTrack();
+ void processHapticData();
+
static process_hook_t getProcessHook(int processType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat);
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index 786fb9a..bf8d627 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -49,8 +49,12 @@
#define MIX_STATE_IDLE 0
#define MIX_STATE_MIXING 1
+/** Control to which device some audio is rendered */
#define MIX_ROUTE_FLAG_RENDER 0x1
+/** Loop back some audio instead of rendering it */
#define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
+/** Loop back some audio while it is rendered */
+#define MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
#define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
#define MAX_MIXES_PER_POLICY 10
@@ -119,6 +123,11 @@
#define RECORD_CONFIG_EVENT_START 1
#define RECORD_CONFIG_EVENT_STOP 0
+static inline bool is_mix_loopback_render(uint32_t routeFlags) {
+ return (routeFlags & MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER)
+ == MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER;
+}
+
}; // namespace android
#endif // ANDROID_AUDIO_POLICY_H
diff --git a/media/libaudioclient/include/media/AudioPolicyHelper.h b/media/libaudioclient/include/media/AudioPolicyHelper.h
deleted file mode 100644
index 46de6b3..0000000
--- a/media/libaudioclient/include/media/AudioPolicyHelper.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef AUDIO_POLICY_HELPER_H_
-#define AUDIO_POLICY_HELPER_H_
-
-#include <android-base/macros.h>
-#include <system/audio.h>
-
-static inline
-audio_stream_type_t audio_usage_to_stream_type(const audio_usage_t usage)
-{
- switch(usage) {
- case AUDIO_USAGE_MEDIA:
- case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- case AUDIO_USAGE_ASSISTANT:
- return AUDIO_STREAM_MUSIC;
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return AUDIO_STREAM_ACCESSIBILITY;
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return AUDIO_STREAM_SYSTEM;
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return AUDIO_STREAM_VOICE_CALL;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return AUDIO_STREAM_DTMF;
-
- case AUDIO_USAGE_ALARM:
- return AUDIO_STREAM_ALARM;
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return AUDIO_STREAM_RING;
-
- case AUDIO_USAGE_NOTIFICATION:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return AUDIO_STREAM_NOTIFICATION;
-
- case AUDIO_USAGE_UNKNOWN:
- default:
- return AUDIO_STREAM_MUSIC;
- }
-}
-
-static inline
-audio_stream_type_t audio_attributes_to_stream_type(const audio_attributes_t *attr)
-{
- // flags to stream type mapping
- if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
- return AUDIO_STREAM_ENFORCED_AUDIBLE;
- }
- if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
- return AUDIO_STREAM_BLUETOOTH_SCO;
- }
-
- // usage to stream type mapping
- return audio_usage_to_stream_type(attr->usage);
-}
-
-static inline
-void stream_type_to_audio_attributes(audio_stream_type_t streamType,
- audio_attributes_t *attr) {
- memset(attr, 0, sizeof(audio_attributes_t));
-
- switch (streamType) {
- case AUDIO_STREAM_DEFAULT:
- case AUDIO_STREAM_MUSIC:
- attr->content_type = AUDIO_CONTENT_TYPE_MUSIC;
- attr->usage = AUDIO_USAGE_MEDIA;
- break;
- case AUDIO_STREAM_VOICE_CALL:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
- break;
- case AUDIO_STREAM_ENFORCED_AUDIBLE:
- attr->flags |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
- FALLTHROUGH_INTENDED; // attributes in common with STREAM_SYSTEM
- case AUDIO_STREAM_SYSTEM:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
- break;
- case AUDIO_STREAM_RING:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
- break;
- case AUDIO_STREAM_ALARM:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_ALARM;
- break;
- case AUDIO_STREAM_NOTIFICATION:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_NOTIFICATION;
- break;
- case AUDIO_STREAM_BLUETOOTH_SCO:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION;
- attr->flags |= AUDIO_FLAG_SCO;
- break;
- case AUDIO_STREAM_DTMF:
- attr->content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
- attr->usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
- break;
- case AUDIO_STREAM_TTS:
- attr->content_type = AUDIO_CONTENT_TYPE_SPEECH;
- attr->usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
- break;
- default:
- ALOGE("invalid stream type %d when converting to attributes", streamType);
- }
-}
-
-// Convert flags sent from Java AudioAttributes.getFlags() method to audio_output_flags_t
-static inline
-void audio_attributes_flags_to_audio_output_flags(const audio_flags_mask_t audioAttributeFlags,
- audio_output_flags_t &flags) {
- if ((audioAttributeFlags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
- flags = static_cast<audio_output_flags_t>(flags |
- AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_DIRECT);
- }
- if ((audioAttributeFlags & AUDIO_FLAG_LOW_LATENCY) != 0) {
- flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_FAST);
- }
- // check deep buffer after flags have been modified above
- if (flags == AUDIO_OUTPUT_FLAG_NONE && (audioAttributeFlags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
- flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- }
-}
-
-#endif //AUDIO_POLICY_HELPER_H_
diff --git a/media/libaudioclient/include/media/AudioProductStrategy.h b/media/libaudioclient/include/media/AudioProductStrategy.h
new file mode 100644
index 0000000..7441095
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioProductStrategy.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <media/AudioCommonTypes.h>
+#include <media/AudioAttributes.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioProductStrategy : public Parcelable
+{
+public:
+ AudioProductStrategy() {}
+ AudioProductStrategy(const std::string &name, const std::vector<AudioAttributes> &attributes,
+ product_strategy_t id) :
+ mName(name), mAudioAttributes(attributes), mId(id) {}
+
+ const std::string &getName() const { return mName; }
+ std::vector<AudioAttributes> getAudioAttributes() const { return mAudioAttributes; }
+ product_strategy_t getId() const { return mId; }
+
+ status_t readFromParcel(const Parcel *parcel) override;
+ status_t writeToParcel(Parcel *parcel) const override;
+
+ /**
+ * @brief attributesMatches: checks if client attributes matches with a reference attributes
+ * "matching" means the usage shall match if reference attributes has a defined usage, AND
+ * content type shall match if reference attributes has a defined content type AND
+ * flags shall match if reference attributes has defined flags AND
+ * tags shall match if reference attributes has defined tags.
+ * Reference attributes "default" shall not be considered as a "true" case. This convention
+ * is used to identify the default strategy.
+ * @param refAttributes to be considered
+ * @param clientAttritubes to be considered
+ * @return true if matching, false otherwise
+ */
+ static bool attributesMatches(const audio_attributes_t refAttributes,
+ const audio_attributes_t clientAttritubes);
+private:
+ std::string mName;
+ std::vector<AudioAttributes> mAudioAttributes;
+ product_strategy_t mId;
+};
+
+using AudioProductStrategyVector = std::vector<AudioProductStrategy>;
+
+} // namespace android
+
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index ebee124..4707c4a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -677,7 +677,7 @@
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
sp<IMemory> mBufferMemory;
- audio_io_handle_t mInput; // returned by AudioSystem::getInput()
+ audio_io_handle_t mInput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getInputforAttr()
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
@@ -719,7 +719,7 @@
private:
class MediaMetrics {
public:
- MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiorecord")),
+ MediaMetrics() : mAnalyticsItem(MediaAnalyticsItem::create("audiorecord")),
mCreatedNs(systemTime(SYSTEM_TIME_REALTIME)),
mStartedNs(0), mDurationNs(0), mCount(0),
mLastError(NO_ERROR) {
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 781e9df..142d2bb 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -20,6 +20,8 @@
#include <sys/types.h>
#include <media/AudioPolicy.h>
+#include <media/AudioProductStrategy.h>
+#include <media/AudioVolumeGroup.h>
#include <media/AudioIoDescriptor.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioPolicyServiceClient.h>
@@ -209,12 +211,14 @@
// IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
//
static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
- const char *device_address, const char *device_name);
+ const char *device_address, const char *device_name,
+ audio_format_t encodedFormat);
static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address);
static status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
static status_t setPhoneState(audio_mode_t state);
static status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
static audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
@@ -228,7 +232,8 @@
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId);
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs);
static status_t startOutput(audio_port_handle_t portId);
static status_t stopOutput(audio_port_handle_t portId);
static void releaseOutput(audio_port_handle_t portId);
@@ -337,11 +342,17 @@
static status_t setMasterMono(bool mono);
static status_t getMasterMono(bool *mono);
+ static status_t setMasterBalance(float balance);
+ static status_t getMasterBalance(float *balance);
+
static float getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device);
static status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+ static status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats);
+
// numSurroundFormats holds the maximum number of formats and bool value allowed in the array.
// When numSurroundFormats is 0, surroundFormats and surroundFormatsEnabled will not be
// populated. The actual number of surround formats should be returned at numSurroundFormats.
@@ -356,6 +367,18 @@
static bool isHapticPlaybackSupported();
+ static status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
+ static status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy);
+
+ static audio_attributes_t streamTypeToAttributes(audio_stream_type_t stream);
+ static audio_stream_type_t attributesToStreamType(const audio_attributes_t &attr);
+
+ static status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups);
+
+ static status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup);
+
// ----------------------------------------------------------------------------
class AudioPortCallback : public RefBase
@@ -385,6 +408,28 @@
audio_port_handle_t deviceId) = 0;
};
+ class AudioDeviceCallbackProxy : public RefBase
+ {
+ public:
+
+ AudioDeviceCallbackProxy(wp<AudioDeviceCallback> callback)
+ : mCallback(callback) {}
+ ~AudioDeviceCallbackProxy() override {}
+
+ sp<AudioDeviceCallback> callback() const { return mCallback.promote(); };
+
+ bool notifiedOnce() const { return mNotifiedOnce; }
+ void setNotifiedOnce() { mNotifiedOnce = true; }
+ private:
+ /**
+ * @brief mNotifiedOnce it forces the callback to be called at least once when
+ * registered with a VALID AudioDevice, and allows not to flood other listeners
+ * on this iohandle that already know the valid device.
+ */
+ bool mNotifiedOnce = false;
+ wp<AudioDeviceCallback> mCallback;
+ };
+
static status_t addAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
audio_io_handle_t audioIo);
static status_t removeAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
@@ -428,8 +473,27 @@
private:
Mutex mLock;
DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> > mIoDescriptors;
- DefaultKeyedVector<audio_io_handle_t, Vector < wp<AudioDeviceCallback> > >
- mAudioDeviceCallbacks;
+
+ class AudioDeviceCallbackProxies : public Vector<sp<AudioDeviceCallbackProxy>>
+ {
+ public:
+ /**
+ * @brief notifiedOnce ensures that if a client adds a callback, it must at least be
+ * called once with the device on which it will be routed to.
+ * @return true if already notified or nobody waits for a callback, false otherwise.
+ */
+ bool notifiedOnce() const { return (size() == 0) || mNotifiedOnce; }
+ void setNotifiedOnce() { mNotifiedOnce = true; }
+ void resetNotifiedOnce() { mNotifiedOnce = false; }
+ private:
+ /**
+ * @brief mNotifiedOnce it forces each callback to be called at least once when
+ * registered with a VALID AudioDevice
+ */
+ bool mNotifiedOnce = false;
+ };
+ DefaultKeyedVector<audio_io_handle_t, AudioDeviceCallbackProxies>
+ mAudioDeviceCallbackProxies;
// cached values for recording getInputBufferSize() queries
size_t mInBuffSize; // zero indicates cache is invalid
uint32_t mInSamplingRate;
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 7fdf7cc..12f5d71 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -1021,7 +1021,7 @@
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
audio_track_cblk_t* mCblk; // re-load after mLock.unlock()
- audio_io_handle_t mOutput; // returned by AudioSystem::getOutputForAttr()
+ audio_io_handle_t mOutput = AUDIO_IO_HANDLE_NONE; // from AudioSystem::getOutputForAttr()
sp<AudioTrackThread> mAudioTrackThread;
bool mThreadCanCallJava;
@@ -1227,7 +1227,7 @@
private:
class MediaMetrics {
public:
- MediaMetrics() : mAnalyticsItem(new MediaAnalyticsItem("audiotrack")) {
+ MediaMetrics() : mAnalyticsItem(MediaAnalyticsItem::create("audiotrack")) {
}
~MediaMetrics() {
// mAnalyticsItem alloc failure will be flagged in the constructor
diff --git a/media/libaudioclient/include/media/AudioVolumeGroup.h b/media/libaudioclient/include/media/AudioVolumeGroup.h
new file mode 100644
index 0000000..9a6ea07
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioVolumeGroup.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <media/AudioProductStrategy.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioVolumeGroup : public Parcelable
+{
+public:
+ AudioVolumeGroup() {}
+ AudioVolumeGroup(const std::string &name,
+ volume_group_t group,
+ const AttributesVector &attributes,
+ const StreamTypeVector &streams) :
+ mName(name), mGroupId(group), mAudioAttributes(attributes), mStreams(streams) {}
+
+ const std::string &getName() const { return mName; }
+ volume_group_t getId() const { return mGroupId; }
+ AttributesVector getAudioAttributes() const { return mAudioAttributes; }
+ StreamTypeVector getStreamTypes() const { return mStreams; }
+
+ status_t readFromParcel(const Parcel *parcel) override;
+ status_t writeToParcel(Parcel *parcel) const override;
+
+private:
+ std::string mName;
+ volume_group_t mGroupId = VOLUME_GROUP_NONE;
+ AttributesVector mAudioAttributes;
+ StreamTypeVector mStreams;
+};
+
+using AudioVolumeGroupVector = std::vector<AudioVolumeGroup>;
+
+} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index a34b207..ef0ed0c 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -359,6 +359,9 @@
virtual float masterVolume() const = 0;
virtual bool masterMute() const = 0;
+ virtual status_t setMasterBalance(float balance) = 0;
+ virtual status_t getMasterBalance(float *balance) const = 0;
+
/* set/get stream type state. This will probably be used by
* the preference panel, mostly.
*/
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index fb4fe93..800344d 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -44,12 +44,14 @@
virtual status_t setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name) = 0;
+ const char *device_name,
+ audio_format_t encodedFormat) = 0;
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address) = 0;
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name) = 0;
+ const char *device_name,
+ audio_format_t encodedFormat) = 0;
virtual status_t setPhoneState(audio_mode_t state) = 0;
virtual status_t setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config) = 0;
@@ -64,7 +66,8 @@
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId) = 0;
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs) = 0;
virtual status_t startOutput(audio_port_handle_t portId) = 0;
virtual status_t stopOutput(audio_port_handle_t portId) = 0;
virtual void releaseOutput(audio_port_handle_t portId) = 0;
@@ -186,12 +189,21 @@
audio_format_t *surroundFormats,
bool *surroundFormatsEnabled,
bool reported) = 0;
+ virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats) = 0;
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled) = 0;
virtual status_t setAssistantUid(uid_t uid) = 0;
virtual status_t setA11yServicesUids(const std::vector<uid_t>& uids) = 0;
virtual bool isHapticPlaybackSupported() = 0;
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy) = 0;
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) = 0;
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup) = 0;
};
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index a1e869f..b25f82e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -123,15 +123,13 @@
status_t DeviceHalHidl::setMasterVolume(float volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+ return processReturn("setMasterVolume", mDevice->setMasterVolume(volume));
}
status_t DeviceHalHidl::getMasterVolume(float *volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
+ Return<void> ret = mDevice->getMasterVolume(
[&](Result r, float v) {
retval = r;
if (retval == Result::OK) {
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 2e35be6..e396cf3 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -106,7 +106,7 @@
status_t status = parametersFromHal(kvPairs, &hidlParams);
if (status != OK) return status;
return processReturn("setParameters",
- utils::setParameters(mStream, hidlParams, {} /* options */));
+ utils::setParameters(mStream, {} /* context */, hidlParams));
}
status_t StreamHalHidl::getParameters(const String8& keys, String8 *values) {
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index 817fb0b..cb78063 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -12,6 +12,11 @@
"libnblog",
"libsonic",
"libutils",
+ "libvibrator",
+ ],
+
+ header_libs: [
+ "libbase_headers",
],
cflags: [
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 86711de..2c57db7 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -113,10 +113,10 @@
// Integer volume.
// Currently integer volume is kept for the legacy integer mixer.
// Will be removed when the legacy mixer path is removed.
- t->volume[0] = UNITY_GAIN_INT;
- t->volume[1] = UNITY_GAIN_INT;
- t->prevVolume[0] = UNITY_GAIN_INT << 16;
- t->prevVolume[1] = UNITY_GAIN_INT << 16;
+ t->volume[0] = 0;
+ t->volume[1] = 0;
+ t->prevVolume[0] = 0 << 16;
+ t->prevVolume[1] = 0 << 16;
t->volumeInc[0] = 0;
t->volumeInc[1] = 0;
t->auxLevel = 0;
@@ -124,10 +124,10 @@
t->prevAuxLevel = 0;
// Floating point volume.
- t->mVolume[0] = UNITY_GAIN_FLOAT;
- t->mVolume[1] = UNITY_GAIN_FLOAT;
- t->mPrevVolume[0] = UNITY_GAIN_FLOAT;
- t->mPrevVolume[1] = UNITY_GAIN_FLOAT;
+ t->mVolume[0] = 0.f;
+ t->mVolume[1] = 0.f;
+ t->mPrevVolume[0] = 0.f;
+ t->mPrevVolume[1] = 0.f;
t->mVolumeInc[0] = 0.;
t->mVolumeInc[1] = 0.;
t->mAuxLevel = 0.;
@@ -167,6 +167,7 @@
t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
// haptic
t->mHapticPlaybackEnabled = false;
+ t->mHapticIntensity = HAPTIC_SCALE_NONE;
t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
t->mMixerHapticChannelCount = 0;
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
@@ -717,6 +718,12 @@
track->prepareForAdjustChannels();
}
} break;
+ case HAPTIC_INTENSITY: {
+ const haptic_intensity_t hapticIntensity = static_cast<haptic_intensity_t>(valueInt);
+ if (track->mHapticIntensity != hapticIntensity) {
+ track->mHapticIntensity = hapticIntensity;
+ }
+ } break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
@@ -1846,6 +1853,40 @@
}
}
+void AudioMixer::processHapticData()
+{
+ // Need to keep consistent with VibrationEffect.scale(int, float, int)
+ for (const auto &pair : mGroups) {
+ // process by group of tracks with same output main buffer.
+ const auto &group = pair.second;
+ for (const int name : group) {
+ const std::shared_ptr<Track> &t = mTracks[name];
+ if (t->mHapticPlaybackEnabled) {
+ size_t sampleCount = mFrameCount * t->mMixerHapticChannelCount;
+ float gamma = t->getHapticScaleGamma();
+ float maxAmplitudeRatio = t->getHapticMaxAmplitudeRatio();
+ uint8_t* buffer = (uint8_t*)pair.first + mFrameCount * audio_bytes_per_frame(
+ t->mMixerChannelCount, t->mMixerFormat);
+ switch (t->mMixerFormat) {
+ // Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
+ case AUDIO_FORMAT_PCM_FLOAT: {
+ float* fout = (float*) buffer;
+ for (size_t i = 0; i < sampleCount; i++) {
+ float mul = fout[i] >= 0 ? 1.0 : -1.0;
+ fout[i] = powf(fabsf(fout[i] / HAPTIC_MAX_AMPLITUDE_FLOAT), gamma)
+ * maxAmplitudeRatio * HAPTIC_MAX_AMPLITUDE_FLOAT * mul;
+ }
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
+ break;
+ }
+ break;
+ }
+ }
+ }
+}
+
/* This track hook is called to do resampling then mixing,
* pulling from the track's upstream AudioBufferProvider.
*
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 811c16b..0c8e5bb 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -10,6 +10,7 @@
"libcutils",
"liblog",
"libutils",
+ "libvibrator",
],
cflags: [
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
new file mode 100644
index 0000000..e2e7dbd
--- /dev/null
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -0,0 +1,31 @@
+// Build testbench for downmix module.
+cc_test {
+ name:"downmixtest",
+ host_supported: false,
+ proprietary: true,
+ include_dirs: [
+ "frameworks/av/media/libeffects/downmix",
+ ],
+
+ header_libs: [
+ "libaudioeffects",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libdownmix",
+ "liblog",
+ ],
+
+ relative_install_path: "soundfx",
+
+ srcs: [
+ "downmixtest.cpp",
+ ],
+
+ cflags: [
+ "-v",
+ "-Werror",
+ "-Wextra",
+ ],
+}
diff --git a/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh b/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..d0faebe
--- /dev/null
+++ b/media/libeffects/downmix/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+#Run tests in this directory.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+#ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm -j
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount
+
+#location of test files
+testdir="/data/local/tmp/downmixtest"
+
+fs_arr=(
+ 8000
+ 11025
+ 12000
+ 16000
+ 22050
+ 24000
+ 32000
+ 44100
+ 48000
+ 88200
+ 96000
+ 176400
+ 192000
+)
+
+echo "========================================"
+echo "testing Downmix"
+adb shell mkdir $testdir
+
+adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw \
+$testdir
+adb push $OUT/testcases/downmixtest/arm64/downmixtest $testdir
+
+#run the downmix test application for test.
+for fs in ${fs_arr[*]}
+do
+ for f_ch in {1..8}
+ do
+ for ch_fmt in {0..4}
+ do
+ adb shell LD_LIBRARY_PATH=/vendor/lib64/soundfx \
+ $testdir/downmixtest $testdir/sinesweepraw.raw \
+ $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_$((fs)).raw \
+ -ch_fmt:$ch_fmt -fch:$f_ch -fs:$fs
+
+ # Implementation dependent test:
+ # check that higher frequencies match 8 kHz result.
+ if [ $fs != 8000 ]
+ then
+ adb shell cmp \
+ $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_8000.raw \
+ $testdir/sinesweep_fmt_$((ch_fmt))_fch_$((f_ch))_$((fs)).raw
+ fi
+ done
+ done
+done
+adb shell rm -r $testdir
diff --git a/media/libeffects/downmix/tests/downmixtest.cpp b/media/libeffects/downmix/tests/downmixtest.cpp
new file mode 100644
index 0000000..71f83e5
--- /dev/null
+++ b/media/libeffects/downmix/tests/downmixtest.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
+#include <log/log.h>
+#include <system/audio.h>
+
+#include "EffectDownmix.h"
+#define FRAME_LENGTH 256
+#define MAX_NUM_CHANNELS 8
+
+struct downmix_cntxt_s {
+ effect_descriptor_t desc;
+ effect_handle_t handle;
+ effect_config_t config;
+
+ int numFileChannels;
+ int numProcessChannels;
+};
+
+extern audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM;
+
+void printUsage() {
+ printf("\nUsage:");
+ printf("\n downmixtest <input_file> <out_file> [options]\n");
+ printf("\nwhere,");
+ printf("\n <input_file> is the input file name");
+ printf("\n on which LVM effects are applied");
+ printf("\n <output_file> processed output file");
+ printf("\n and options are mentioned below");
+ printf("\n");
+ printf("\n -h");
+ printf("\n Prints this usage information");
+ printf("\n");
+ printf("\n -ch_fmt:<format_of_input_audio>");
+ printf("\n 0:AUDIO_CHANNEL_OUT_7POINT1(default)");
+ printf("\n 1:AUDIO_CHANNEL_OUT_5POINT1_SIDE");
+ printf("\n 2:AUDIO_CHANNEL_OUT_5POINT1_BACK");
+ printf("\n 3:AUDIO_CHANNEL_OUT_QUAD_SIDE");
+ printf("\n 4:AUDIO_CHANNEL_OUT_QUAD_BACK");
+ printf("\n");
+ printf("\n -fch:<file_channels> (1 through 8)");
+ printf("\n");
+}
+
+int32_t DownmixDefaultConfig(effect_config_t *pConfig) {
+ pConfig->inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ pConfig->inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pConfig->inputCfg.channels = AUDIO_CHANNEL_OUT_7POINT1;
+ pConfig->inputCfg.bufferProvider.getBuffer = nullptr;
+ pConfig->inputCfg.bufferProvider.releaseBuffer = nullptr;
+ pConfig->inputCfg.bufferProvider.cookie = nullptr;
+ pConfig->inputCfg.mask = EFFECT_CONFIG_ALL;
+
+ pConfig->inputCfg.samplingRate = 44100;
+ pConfig->outputCfg.samplingRate = pConfig->inputCfg.samplingRate;
+
+ // set a default value for the access mode, but should be overwritten by caller
+ pConfig->outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+ pConfig->outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ pConfig->outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ pConfig->outputCfg.bufferProvider.getBuffer = nullptr;
+ pConfig->outputCfg.bufferProvider.releaseBuffer = nullptr;
+ pConfig->outputCfg.bufferProvider.cookie = nullptr;
+ pConfig->outputCfg.mask = EFFECT_CONFIG_ALL;
+
+ return 0;
+}
+
+int32_t DownmixConfiureAndEnable(downmix_cntxt_s *pDescriptor) {
+ effect_handle_t *effectHandle = &pDescriptor->handle;
+ downmix_module_t *downmixEffectHandle = (downmix_module_t *)*effectHandle;
+ const struct effect_interface_s *Downmix_api = downmixEffectHandle->itfe;
+ int32_t err = 0;
+ uint32_t replySize = (uint32_t)sizeof(err);
+
+ err = (Downmix_api->command)(*effectHandle, EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t), &(pDescriptor->config),
+ &replySize, &err);
+ if (err != 0) {
+ ALOGE("Downmix command to configure returned an error %d", err);
+ return err;
+ }
+
+ err = ((Downmix_api->command))(*effectHandle, EFFECT_CMD_ENABLE, 0, nullptr,
+ &replySize, &err);
+ if (err != 0) {
+ ALOGE("Downmix command to enable effect returned an error %d", err);
+ return err;
+ }
+ return 0;
+}
+
+int32_t DownmixExecute(downmix_cntxt_s *pDescriptor, FILE *finp,
+ FILE *fout) {
+ effect_handle_t *effectHandle = &pDescriptor->handle;
+ downmix_module_t *downmixEffectHandle = (downmix_module_t *)*effectHandle;
+ const struct effect_interface_s *Downmix_api = downmixEffectHandle->itfe;
+
+ const int numFileChannels = pDescriptor->numFileChannels;
+ const int numProcessChannels = pDescriptor->numProcessChannels;
+ const int fileFrameSize = numFileChannels * sizeof(short);
+ const unsigned int outputChannels =
+ audio_channel_count_from_out_mask(AUDIO_CHANNEL_OUT_STEREO);
+
+ std::vector<float> outFloat(FRAME_LENGTH * MAX_NUM_CHANNELS);
+ std::vector<float> inFloat(FRAME_LENGTH * MAX_NUM_CHANNELS);
+
+ audio_buffer_t inbuffer, outbuffer;
+ inbuffer.f32 = inFloat.data();
+ outbuffer.f32 = outFloat.data();
+ inbuffer.frameCount = FRAME_LENGTH;
+ outbuffer.frameCount = FRAME_LENGTH;
+
+ audio_buffer_t *pinbuf, *poutbuf;
+ pinbuf = &inbuffer;
+ poutbuf = &outbuffer;
+
+ int frameCounter = 0;
+ std::vector<short> inS16(FRAME_LENGTH * MAX_NUM_CHANNELS);
+ std::vector<short> outS16(FRAME_LENGTH * MAX_NUM_CHANNELS);
+
+ while (fread(inS16.data(), fileFrameSize, FRAME_LENGTH, finp) ==
+ FRAME_LENGTH) {
+ if (numFileChannels != numProcessChannels) {
+ adjust_channels(inS16.data(), numFileChannels, inS16.data(),
+ numProcessChannels, sizeof(short),
+ FRAME_LENGTH * fileFrameSize);
+ }
+
+ memcpy_to_float_from_i16(inFloat.data(), inS16.data(),
+ FRAME_LENGTH * numProcessChannels);
+
+ const int32_t err = (Downmix_api->process)(*effectHandle, pinbuf, poutbuf);
+ if (err != 0) {
+ ALOGE("DownmixProcess returned an error %d", err);
+ return -1;
+ }
+
+ memcpy_to_i16_from_float(outS16.data(), outFloat.data(),
+ FRAME_LENGTH * outputChannels);
+ fwrite(outS16.data(), sizeof(short), (FRAME_LENGTH * outputChannels),
+ fout);
+ frameCounter++;
+ }
+ printf("frameCounter: [%d]\n", frameCounter);
+ return 0;
+}
+
+int32_t DowmixMainProcess(downmix_cntxt_s *pDescriptor, FILE *finp,
+ FILE *fout) {
+ effect_handle_t *effectHandle = &pDescriptor->handle;
+ int32_t sessionId = 0, ioId = 0;
+ const effect_uuid_t downmix_uuid = {
+ 0x93f04452, 0xe4fe, 0x41cc, 0x91f9, {0xe4, 0x75, 0xb6, 0xd1, 0xd6, 0x9f}};
+
+ int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+ &downmix_uuid, sessionId, ioId,
+ effectHandle);
+ if (err != 0) {
+ ALOGE("DownmixLib_Create returned an error %d", err);
+ return -1;
+ }
+
+ // Passing the init config for time being.
+ err = DownmixConfiureAndEnable(pDescriptor);
+ if (err != 0) {
+ ALOGE("DownmixConfigureAndEnable returned an error %d", err);
+ return -1;
+ }
+ // execute call for downmix.
+ err = DownmixExecute(pDescriptor, finp, fout);
+ if (err != 0) {
+ ALOGE("DownmixExecute returned an error %d", err);
+ return -1;
+ }
+ // Release the library function.
+ err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(*effectHandle);
+ if (err != 0) {
+ ALOGE("DownmixRelease returned an error %d", err);
+ return -1;
+ }
+ return 0;
+}
+
+int main(int argc, const char *argv[]) {
+ int numFileChannels = 1, numProcessChannels = 8;
+ downmix_cntxt_s descriptor = {};
+ DownmixDefaultConfig(&(descriptor.config));
+
+ const char *infile = nullptr;
+ const char *outfile = nullptr;
+ for (int i = 1; i < argc; i++) {
+ printf("%s ", argv[i]);
+ if (argv[i][0] != '-') {
+ if (infile == nullptr) {
+ infile = argv[i];
+ } else if (outfile == nullptr) {
+ outfile = argv[i];
+ } else {
+ printUsage();
+ return -1;
+ }
+ } else if (!strncmp(argv[i], "-fs:", 4)) {
+ // Add a check for all the supported streams.
+ const int samplingFreq = atoi(argv[i] + 4);
+ if (samplingFreq != 8000 && samplingFreq != 11025 &&
+ samplingFreq != 12000 && samplingFreq != 16000 &&
+ samplingFreq != 22050 && samplingFreq != 24000 &&
+ samplingFreq != 32000 && samplingFreq != 44100 &&
+ samplingFreq != 48000 && samplingFreq != 88200 &&
+ samplingFreq != 96000 && samplingFreq != 176400 &&
+ samplingFreq != 192000) {
+ printf("Unsupported Sampling Frequency : %d", samplingFreq);
+ printUsage();
+ return -1;
+ }
+
+ descriptor.config.inputCfg.samplingRate = samplingFreq;
+ descriptor.config.outputCfg.samplingRate = samplingFreq;
+ } else if (!strncmp(argv[i], "-ch_fmt:", 8)) {
+ const int format = atoi(argv[i] + 8);
+ uint32_t *audioType = &descriptor.config.inputCfg.channels;
+ switch (format) {
+ case 0:
+ *audioType = AUDIO_CHANNEL_OUT_7POINT1;
+ break;
+ case 1:
+ *audioType = AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ break;
+ case 2:
+ *audioType = AUDIO_CHANNEL_OUT_5POINT1_BACK;
+ break;
+ case 3:
+ *audioType = AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ break;
+ case 4:
+ *audioType = AUDIO_CHANNEL_OUT_QUAD_BACK;
+ break;
+ default:
+ *audioType = AUDIO_CHANNEL_OUT_7POINT1;
+ break;
+ }
+ descriptor.numProcessChannels =
+ audio_channel_count_from_out_mask(*audioType);
+ } else if (!strncmp(argv[i], "-fch:", 5)) {
+ const int fChannels = atoi(argv[i] + 5);
+ if (fChannels > 8 || fChannels < 1) {
+ printf("Unsupported number of file channels : %d", fChannels);
+ printUsage();
+ return -1;
+ }
+ descriptor.numFileChannels = fChannels;
+
+ } else if (!strncmp(argv[i], "-h", 2)) {
+ printUsage();
+ return 0;
+ }
+ }
+
+ if (/*infile == nullptr || */ outfile == nullptr) {
+ printUsage();
+ return -1;
+ }
+
+ FILE *finp = fopen(infile, "rb");
+ if (finp == nullptr) {
+ printf("Cannot open input file %s", infile);
+ return -1;
+ }
+ FILE *fout = fopen(outfile, "wb");
+ if (fout == nullptr) {
+ printf("Cannot open output file %s", outfile);
+ fclose(finp);
+ return -1;
+ }
+
+ const int err = DowmixMainProcess(&descriptor, finp, fout);
+ // close input and output files.
+ fclose(finp);
+ fclose(fout);
+ if (err != 0) {
+ printf("Error: %d\n", err);
+ }
+ return err;
+}
diff --git a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
index 0669a81..c57498e 100644
--- a/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
+++ b/media/libeffects/lvm/lib/Bundle/src/LVM_Init.c
@@ -61,6 +61,72 @@
/* */
/****************************************************************************************/
+/*
+ * 4 Types of Memory Regions of LVM
+ * TODO: Allocate on the fly.
+ * i) LVM_MEMREGION_PERSISTENT_SLOW_DATA - For Instance Handles
+ * ii) LVM_MEMREGION_PERSISTENT_FAST_DATA - Persistent Buffers
+ * iii) LVM_MEMREGION_PERSISTENT_FAST_COEF - For Holding Structure values
+ * iv) LVM_MEMREGION_TEMPORARY_FAST - For Holding Structure values
+ *
+ * LVM_MEMREGION_PERSISTENT_SLOW_DATA:
+ * Total Memory size:
+ * sizeof(LVM_Instance_t) + \
+ * sizeof(LVM_Buffer_t) + \
+ * sizeof(LVPSA_InstancePr_t) + \
+ * sizeof(LVM_Buffer_t) - needed if buffer mode is LVM_MANAGED_BUFFER
+ *
+ * LVM_MEMREGION_PERSISTENT_FAST_DATA:
+ * Total Memory size:
+ * sizeof(LVM_TE_Data_t) + \
+ * 2 * pInstParams->EQNB_NumBands * sizeof(LVM_EQNB_BandDef_t) + \
+ * sizeof(LVCS_Data_t) + \
+ * sizeof(LVDBE_Data_FLOAT_t) + \
+ * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ * pInstParams->EQNB_NumBands * sizeof(Biquad_2I_Order2_FLOAT_Taps_t) + \
+ * pInstParams->EQNB_NumBands * sizeof(LVEQNB_BandDef_t) + \
+ * pInstParams->EQNB_NumBands * sizeof(LVEQNB_BiquadType_en) + \
+ * 2 * LVM_HEADROOM_MAX_NBANDS * sizeof(LVM_HeadroomBandDef_t) + \
+ * PSA_InitParams.nBands * sizeof(Biquad_1I_Order2_Taps_t) + \
+ * PSA_InitParams.nBands * sizeof(QPD_Taps_t)
+ *
+ * LVM_MEMREGION_PERSISTENT_FAST_COEF:
+ * Total Memory size:
+ * sizeof(LVM_TE_Coefs_t) + \
+ * sizeof(LVCS_Coefficient_t) + \
+ * sizeof(LVDBE_Coef_FLOAT_t) + \
+ * sizeof(Biquad_FLOAT_Instance_t) + \
+ * sizeof(Biquad_FLOAT_Instance_t) + \
+ * pInstParams->EQNB_NumBands * sizeof(Biquad_FLOAT_Instance_t) + \
+ * PSA_InitParams.nBands * sizeof(Biquad_Instance_t) + \
+ * PSA_InitParams.nBands * sizeof(QPD_State_t)
+ *
+ * LVM_MEMREGION_TEMPORARY_FAST (Scratch):
+ * Total Memory Size:
+ * BundleScratchSize + \
+ * MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT) + \
+ * MaxScratchOf (CS, EQNB, DBE, PSA)
+ *
+ * a)BundleScratchSize:
+ * 3 * LVM_MAX_CHANNELS \
+ * * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_FLOAT)
+ * This Memory is allocated only when Buffer mode is LVM_MANAGED_BUFFER.
+ * b)MaxScratchOf (CS, EQNB, DBE, PSA)
+ * This Memory is needed for scratch usage for CS, EQNB, DBE, PSA.
+ * CS = (LVCS_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
+ * * pCapabilities->MaxBlockSize)
+ * EQNB = (LVEQNB_SCRATCHBUFFERS * sizeof(LVM_FLOAT)
+ * * pCapabilities->MaxBlockSize)
+ * DBE = (LVDBE_SCRATCHBUFFERS_INPLACE*sizeof(LVM_FLOAT)
+ * * pCapabilities->MaxBlockSize)
+ * PSA = (2 * pInitParams->MaxInputBlockSize * sizeof(LVM_FLOAT))
+ * one MaxInputBlockSize for input and another for filter output
+ * c)MAX_INTERNAL_BLOCKSIZE
+ * This Memory is needed for PSAInput - Temp memory to store output
+ * from McToMono block and given as input to PSA block
+ */
+
LVM_ReturnStatus_en LVM_GetMemoryTable(LVM_Handle_t hInstance,
LVM_MemTab_t *pMemoryTable,
LVM_InstParams_t *pInstParams)
@@ -168,7 +234,13 @@
AlgScratchSize = 0;
if (pInstParams->BufferMode == LVM_MANAGED_BUFFERS)
{
+#ifdef BUILD_FLOAT
+ BundleScratchSize = 3 * LVM_MAX_CHANNELS \
+ * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
+ * sizeof(LVM_FLOAT);
+#else
BundleScratchSize = 6 * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_INT16);
+#endif
InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST], /* Scratch buffer */
BundleScratchSize);
InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
@@ -369,8 +441,13 @@
PSA_MemTab.Region[LVM_PERSISTENT_FAST_COEF].Size);
/* Fast Temporary */
+#ifdef BUILD_FLOAT
+ InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
+ MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_FLOAT));
+#else
InstAlloc_AddMember(&AllocMem[LVM_TEMPORARY_FAST],
MAX_INTERNAL_BLOCKSIZE * sizeof(LVM_INT16));
+#endif
if (PSA_MemTab.Region[LVM_TEMPORARY_FAST].Size > AlgScratchSize)
{
@@ -559,13 +636,20 @@
*/
pInstance->pBufferManagement = InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_PERSISTENT_SLOW_DATA],
sizeof(LVM_Buffer_t));
+#ifdef BUILD_FLOAT
+ BundleScratchSize = (LVM_INT32)
+ (3 * LVM_MAX_CHANNELS \
+ * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) \
+ * sizeof(LVM_FLOAT));
+#else
BundleScratchSize = (LVM_INT32)(6 * (MIN_INTERNAL_BLOCKSIZE + InternalBlockSize) * sizeof(LVM_INT16));
+#endif
pInstance->pBufferManagement->pScratch = InstAlloc_AddMember(&AllocMem[LVM_MEMREGION_TEMPORARY_FAST], /* Scratch 1 buffer */
(LVM_UINT32)BundleScratchSize);
#ifdef BUILD_FLOAT
LoadConst_Float(0, /* Clear the input delay buffer */
(LVM_FLOAT *)&pInstance->pBufferManagement->InDelayBuffer,
- (LVM_INT16)(2 * MIN_INTERNAL_BLOCKSIZE));
+ (LVM_INT16)(LVM_MAX_CHANNELS * MIN_INTERNAL_BLOCKSIZE));
#else
LoadConst_16(0, /* Clear the input delay buffer */
(LVM_INT16 *)&pInstance->pBufferManagement->InDelayBuffer,
diff --git a/media/libeffects/lvm/lib/Common/src/Copy_16.c b/media/libeffects/lvm/lib/Common/src/Copy_16.c
index 1f9f659..3858450 100644
--- a/media/libeffects/lvm/lib/Common/src/Copy_16.c
+++ b/media/libeffects/lvm/lib/Common/src/Copy_16.c
@@ -132,8 +132,8 @@
src += NrChannels * (NrFrames - 1);
for (ii = NrFrames; ii != 0; ii--)
{
- dst[0] = src_st[0];
dst[1] = src_st[1];
+ dst[0] = src_st[0]; // copy 1 before 0 is required for NrChannels == 3.
for (jj = 2; jj < NrChannels; jj++)
{
dst[jj] = src[jj];
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
index 48f5d54..9d3ee88 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_SetTimeConstant.c
@@ -51,7 +51,7 @@
LVM_INT16 NumChannels)
{
#ifdef HIGHER_FS
- LVM_FLOAT DeltaTable[11] = {0.500000f,/*8000*/
+ LVM_FLOAT DeltaTable[13] = {0.500000f,/*8000*/
0.362812f,/*11025*/
0.333333f,/*12000*/
0.250000f,/*16000*/
@@ -60,7 +60,9 @@
0.125000f,/*32000*/
0.090703f,/*44100*/
0.083333f,/*48000*/
+ 0.045352f,/*88200*/
0.041667f,/*96000*/
+ 0.022676f,/*176400*/
0.020833f};/*192000*/
#else
LVM_FLOAT DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
index 9dc7d21..0e0acf1 100644
--- a/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
+++ b/media/libeffects/lvm/lib/Common/src/LVC_Mixer_VarSlope_SetTimeConstant.c
@@ -52,7 +52,7 @@
LVM_INT16 NumChannels)
{
#ifdef HIGHER_FS
- LVM_FLOAT DeltaTable[11] = {0.500000f,/*8000*/
+ LVM_FLOAT DeltaTable[13] = {0.500000f,/*8000*/
0.362812f,/*11025*/
0.333333f,/*12000*/
0.250000f,/*16000*/
@@ -61,7 +61,9 @@
0.125000f,/*32000*/
0.090703f,/*44100*/
0.083333f,/*48000*/
+ 0.045352f,/*88200*/
0.041666f,/*96000*/
+ 0.022676f,/*176400*/
0.020833f};/*192000*/
#else
LVM_FLOAT DeltaTable[9] = {0.500000f,/*8000*/
diff --git a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
index 7846ca0..6307e68 100644
--- a/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
+++ b/media/libeffects/lvm/lib/Common/src/LVM_GetOmega.c
@@ -53,7 +53,9 @@
#define LVVDL_2PiBy_48000_f 0.000130900f
#ifdef HIGHER_FS
+#define LVVDL_2PiBy_88200_f 0.000071238f
#define LVVDL_2PiBy_96000_f 0.000065450f
+#define LVVDL_2PiBy_176400_f 0.000035619f
#define LVVDL_2PiBy_192000_f 0.000032725f
#endif
const LVM_FLOAT LVVDL_2PiOnFsTable[] = {LVVDL_2PiBy_8000_f,
@@ -66,7 +68,9 @@
LVVDL_2PiBy_44100_f,
LVVDL_2PiBy_48000_f
#ifdef HIGHER_FS
+ ,LVVDL_2PiBy_88200_f
,LVVDL_2PiBy_96000_f
+ ,LVVDL_2PiBy_176400_f
,LVVDL_2PiBy_192000_f
#endif
};
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
index e45d81f..ba05577 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_Headphone_Coeffs.h
@@ -239,13 +239,12 @@
#define LVCS_STEREODELAY_CS_24KHZ 279 /* Sample rate 24kS/s */
#define LVCS_STEREODELAY_CS_32KHZ 372 /* Sample rate 32kS/s */
#define LVCS_STEREODELAY_CS_44KHZ 512 /* Sample rate 44kS/s */
-// TODO: this should linearly scale by frequency but is limited to 512 frames until
-// we ensure enough buffer size has been allocated.
-#define LVCS_STEREODELAY_CS_48KHZ 512 /* Sample rate 48kS/s */
-#define LVCS_STEREODELAY_CS_88KHZ 512 /* Sample rate 88.2kS/s */
-#define LVCS_STEREODELAY_CS_96KHZ 512 /* Sample rate 96kS/s */
-#define LVCS_STEREODELAY_CS_176KHZ 512 /* Sample rate 176.4kS/s */
-#define LVCS_STEREODELAY_CS_192KHZ 512 /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_48KHZ 557 /* Sample rate 48kS/s */
+#define LVCS_STEREODELAY_CS_88KHZ 1024 /* Sample rate 88.2kS/s */
+#define LVCS_STEREODELAY_CS_96KHZ 1115 /* Sample rate 96kS/s */
+#define LVCS_STEREODELAY_CS_176KHZ 2048 /* Sample rate 176.4kS/s */
+#define LVCS_STEREODELAY_CS_192KHZ 2229 /* Sample rate 196kS/s */
+#define LVCS_STEREODELAY_CS_MAX_VAL LVCS_STEREODELAY_CS_192KHZ
/* Reverb coefficients for 8000 Hz sample rate, scaled with 1.038030 */
#define CS_REVERB_8000_A0 0.667271
diff --git a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
index 69892b6..f94d4e4 100644
--- a/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
+++ b/media/libeffects/lvm/lib/StereoWidening/src/LVCS_ReverbGenerator.h
@@ -65,7 +65,7 @@
/* Filter */
void (*pBiquadCallBack) (Biquad_Instance_t*, LVM_INT16*, LVM_INT16*, LVM_INT16);
#else
- LVM_FLOAT StereoSamples[2 * LVCS_STEREODELAY_CS_48KHZ];
+ LVM_FLOAT StereoSamples[2 * LVCS_STEREODELAY_CS_MAX_VAL];
/* Reverb Level */
LVM_FLOAT ReverbLevel;
/* Filter */
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index 8ee807c..003ce9e 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -44,3 +44,16 @@
"-Wextra",
],
}
+
+cc_test {
+ name: "snr",
+ host_supported: false,
+
+ srcs: ["snr.cpp"],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+}
diff --git a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
index 861ee64..1a874a3 100755
--- a/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
+++ b/media/libeffects/lvm/tests/build_and_run_all_unit_tests.sh
@@ -25,16 +25,17 @@
adb shell mkdir -p $testdir
adb push $ANDROID_BUILD_TOP/cts/tests/tests/media/res/raw/sinesweepraw.raw $testdir
adb push $OUT/testcases/lvmtest/arm64/lvmtest $testdir
+adb push $OUT/testcases/snr/arm64/snr $testdir
flags_arr=(
"-csE"
"-eqE"
"-tE"
"-csE -tE -eqE"
- "-bE"
+ "-bE -M"
"-csE -tE"
"-csE -eqE" "-tE -eqE"
- "-csE -tE -bE -eqE"
+ "-csE -tE -bE -M -eqE"
)
fs_arr=(
@@ -53,21 +54,13 @@
192000
)
-ch_arr=(
- 1
- 2
- 4
- 6
- 8
-)
-
# run multichannel effects at different configs, saving only the stereo channel
# pair.
for flags in "${flags_arr[@]}"
do
for fs in ${fs_arr[*]}
do
- for ch in ${ch_arr[*]}
+ for ch in {1..8}
do
adb shell $testdir/lvmtest -i:$testdir/sinesweepraw.raw \
-o:$testdir/sinesweep_$((ch))_$((fs)).raw -ch:$ch -fs:$fs $flags
@@ -79,6 +72,10 @@
then
adb shell cmp $testdir/sinesweep_2_$((fs)).raw \
$testdir/sinesweep_$((ch))_$((fs)).raw
+ elif [[ $flags == *"-bE"* ]] && [ "$ch" -gt 2 ]
+ then
+ adb shell $testdir/snr $testdir/sinesweep_2_$((fs)).raw \
+ $testdir/sinesweep_$((ch))_$((fs)).raw -thr:90.308998
fi
done
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index 43271d2..416bdaa 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -15,6 +15,7 @@
*/
#include <assert.h>
#include <inttypes.h>
+#include <iterator>
#include <math.h>
#include <stdlib.h>
#include <string.h>
@@ -102,20 +103,15 @@
printf("\n -M");
printf("\n Mono mode (force all input audio channels to be identical)");
printf("\n -basslvl:<effect_level>");
- printf("\n A value that ranges between 0 - 15 default 0");
+ printf("\n A value that ranges between %d - %d default 0", LVM_BE_MIN_EFFECTLEVEL,
+ LVM_BE_MAX_EFFECTLEVEL);
printf("\n");
printf("\n -eqPreset:<preset Value>");
- printf("\n 0 - Normal");
- printf("\n 1 - Classical");
- printf("\n 2 - Dance");
- printf("\n 3 - Flat");
- printf("\n 4 - Folk");
- printf("\n 5 - Heavy Metal");
- printf("\n 6 - Hip Hop");
- printf("\n 7 - Jazz");
- printf("\n 8 - Pop");
- printf("\n 9 - Rock");
- printf("\n default 0");
+ const size_t numPresetLvls = std::size(gEqualizerPresets);
+ for (size_t i = 0; i < numPresetLvls; ++i) {
+ printf("\n %zu - %s", i, gEqualizerPresets[i].name);
+ }
+ printf("\n default - 0");
printf("\n -bE ");
printf("\n Enable Dynamic Bass Enhancement");
printf("\n");
@@ -571,17 +567,12 @@
0); /* Audio Time */
}
-int lvmMainProcess(lvmConfigParams_t *plvmConfigParams, FILE *finp, FILE *fout) {
- struct EffectContext context;
- LVM_ControlParams_t params;
-
- int errCode = lvmCreate(&context, plvmConfigParams, ¶ms);
- if (errCode) {
- ALOGE("Error: lvmCreate returned with %d\n", errCode);
- return errCode;
- }
-
- errCode = lvmControl(&context, plvmConfigParams, ¶ms);
+int lvmMainProcess(EffectContext *pContext,
+ LVM_ControlParams_t *pParams,
+ lvmConfigParams_t *plvmConfigParams,
+ FILE *finp,
+ FILE *fout) {
+ int errCode = lvmControl(pContext, plvmConfigParams, pParams);
if (errCode) {
ALOGE("Error: lvmControl returned with %d\n", errCode);
return errCode;
@@ -624,8 +615,8 @@
std::fill(fp + 1, fp + channelCount, *fp); // replicate ch 0
}
}
-#if 1
- errCode = lvmExecute(floatIn.data(), floatOut.data(), &context, plvmConfigParams);
+#ifndef BYPASS_EXEC
+ errCode = lvmExecute(floatIn.data(), floatOut.data(), pContext, plvmConfigParams);
if (errCode) {
printf("\nError: lvmExecute returned with %d\n", errCode);
return errCode;
@@ -654,14 +645,15 @@
}
lvmConfigParams_t lvmConfigParams{}; // default initialize
- FILE *finp = nullptr, *fout = nullptr;
+ const char *infile = nullptr;
+ const char *outfile = nullptr;
for (int i = 1; i < argc; i++) {
printf("%s ", argv[i]);
if (!strncmp(argv[i], "-i:", 3)) {
- finp = fopen(argv[i] + 3, "rb");
+ infile = argv[i] + 3;
} else if (!strncmp(argv[i], "-o:", 3)) {
- fout = fopen(argv[i] + 3, "wb");
+ outfile = argv[i] + 3;
} else if (!strncmp(argv[i], "-fs:", 4)) {
const int samplingFreq = atoi(argv[i] + 4);
if (samplingFreq != 8000 && samplingFreq != 11025 &&
@@ -671,21 +663,21 @@
samplingFreq != 48000 && samplingFreq != 88200 &&
samplingFreq != 96000 && samplingFreq != 176400 &&
samplingFreq != 192000) {
- ALOGE("\nError: Unsupported Sampling Frequency : %d\n", samplingFreq);
+ printf("Error: Unsupported Sampling Frequency : %d\n", samplingFreq);
return -1;
}
lvmConfigParams.samplingFreq = samplingFreq;
} else if (!strncmp(argv[i], "-ch:", 4)) {
const int nrChannels = atoi(argv[i] + 4);
if (nrChannels > 8 || nrChannels < 1) {
- ALOGE("\nError: Unsupported number of channels : %d\n", nrChannels);
+ printf("Error: Unsupported number of channels : %d\n", nrChannels);
return -1;
}
lvmConfigParams.nrChannels = nrChannels;
} else if (!strncmp(argv[i], "-fch:", 5)) {
const int fChannels = atoi(argv[i] + 5);
if (fChannels > 8 || fChannels < 1) {
- ALOGE("\nError: Unsupported number of file channels : %d\n", fChannels);
+ printf("Error: Unsupported number of file channels : %d\n", fChannels);
return -1;
}
lvmConfigParams.fChannels = fChannels;
@@ -693,8 +685,8 @@
lvmConfigParams.monoMode = true;
} else if (!strncmp(argv[i], "-basslvl:", 9)) {
const int bassEffectLevel = atoi(argv[i] + 9);
- if (bassEffectLevel > 15 || bassEffectLevel < 0) {
- ALOGE("\nError: Unsupported Bass Effect Level : %d\n",
+ if (bassEffectLevel > LVM_BE_MAX_EFFECTLEVEL || bassEffectLevel < LVM_BE_MIN_EFFECTLEVEL) {
+ printf("Error: Unsupported Bass Effect Level : %d\n",
bassEffectLevel);
printUsage();
return -1;
@@ -702,8 +694,9 @@
lvmConfigParams.bassEffectLevel = bassEffectLevel;
} else if (!strncmp(argv[i], "-eqPreset:", 10)) {
const int eqPresetLevel = atoi(argv[i] + 10);
- if (eqPresetLevel > 9 || eqPresetLevel < 0) {
- ALOGE("\nError: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
+ const int numPresetLvls = std::size(gEqualizerPresets);
+ if (eqPresetLevel >= numPresetLvls || eqPresetLevel < 0) {
+ printf("Error: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
printUsage();
return -1;
}
@@ -722,19 +715,47 @@
}
}
- if (finp == nullptr || fout == nullptr) {
- ALOGE("\nError: missing input/output files\n");
+ if (infile == nullptr || outfile == nullptr) {
+ printf("Error: missing input/output files\n");
printUsage();
- // ok not to close.
return -1;
}
- const int errCode = lvmMainProcess(&lvmConfigParams, finp, fout);
+ FILE *finp = fopen(infile, "rb");
+ if (finp == nullptr) {
+ printf("Cannot open input file %s", infile);
+ return -1;
+ }
+
+ FILE *fout = fopen(outfile, "wb");
+ if (fout == nullptr) {
+ printf("Cannot open output file %s", outfile);
+ fclose(finp);
+ return -1;
+ }
+
+ EffectContext context;
+ LVM_ControlParams_t params;
+ int errCode = lvmCreate(&context, &lvmConfigParams, ¶ms);
+ if (errCode == 0) {
+ errCode = lvmMainProcess(&context, ¶ms, &lvmConfigParams, finp, fout);
+ if (errCode != 0) {
+ printf("Error: lvmMainProcess returned with the error: %d",errCode);
+ }
+ } else {
+ printf("Error: lvmCreate returned with the error: %d", errCode);
+ }
fclose(finp);
fclose(fout);
+ /* Free the allocated buffers */
+ if (context.pBundledContext != nullptr) {
+ if (context.pBundledContext->hInstance != nullptr) {
+ LvmEffect_free(&context);
+ }
+ free(context.pBundledContext);
+ }
if (errCode) {
- ALOGE("Error: lvmMainProcess returns with the error: %d \n", errCode);
return -1;
}
return 0;
diff --git a/media/libeffects/lvm/tests/snr.cpp b/media/libeffects/lvm/tests/snr.cpp
new file mode 100644
index 0000000..88110c0
--- /dev/null
+++ b/media/libeffects/lvm/tests/snr.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <assert.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+template <typename T, typename A = float>
+std::pair<A, A> getSignalNoise(FILE *finp, FILE *fref) {
+ constexpr size_t framesize = 256;
+ std::vector<T> in(framesize);
+ std::vector<T> ref(framesize);
+ A signal{};
+ A noise{};
+
+ for (;;) {
+ size_t read_samples_in = fread(&in[0], sizeof(T), framesize, finp);
+ const size_t read_samples_ref = fread(&ref[0], sizeof(T), framesize, fref);
+ if (read_samples_in != read_samples_ref) {
+ printf("file sizes do not match (last %zu %zu)", read_samples_in, read_samples_ref);
+ read_samples_in = std::min(read_samples_in, read_samples_ref);
+ }
+ if (read_samples_in == 0) {
+ return { signal, noise };
+ }
+ for (size_t i = 0; i < read_samples_in; ++i) {
+ const A value(ref[i]);
+ const A diff(A(in[i]) - value);
+ signal += value * value;
+ noise += diff * diff;
+ }
+ }
+}
+
+void printUsage() {
+ printf("\nUsage: ");
+ printf("\n snr <ref_file> <test_file> [options]\n");
+ printf("\nwhere, \n <ref_file> is the reference file name");
+ printf("\n on which will be taken as pure signal");
+ printf("\n <test_file> is test file for snr calculation");
+ printf("\n and options are mentioned below");
+ printf("\n");
+ printf("\n -pcm_format:<pcm format of input files>");
+ printf("\n 0 - 16 bit pcm");
+ printf("\n 1 - 32 bit float");
+ printf("\n default 0");
+ printf("\n -thr:<threshold value>");
+ printf("\n default - negative infinity\n\n");
+}
+
+int main(int argc, const char *argv[]) {
+ if (argc < 3) {
+ printUsage();
+ return -1;
+ }
+ int pcm_format = 0;
+ float thr = - std::numeric_limits<float>::infinity();
+ FILE *fref = fopen(argv[1], "rb");
+ FILE *finp = fopen(argv[2], "rb");
+ for (int i = 3; i < argc; i++) {
+ if (!strncmp(argv[i], "-pcm_format:", 12)) {
+ pcm_format = atoi(argv[i] + 12);
+ } else if (!strncmp(argv[i], "-thr:", 5)) {
+ thr = atof(argv[i] + 5);
+ }
+ }
+ if (finp == nullptr || fref == nullptr) {
+ printf("\nError: missing input/reference files\n");
+ return -1;
+ }
+ auto sn = pcm_format == 0
+ ? getSignalNoise<short>(finp, fref)
+ : getSignalNoise<float>(finp, fref);
+ if (sn.first > 0.f && sn.second > 0.f) {
+ float snr = 10.f * log(sn.first / sn.second);
+ // compare the measured snr value with threshold
+ if (snr < thr) {
+ printf("%.6f less than threshold %.6f\n", snr, thr);
+ } else {
+ printf("%.6f\n", snr);
+ }
+ }
+ fclose(finp);
+ fclose(fref);
+
+ return 0;
+}
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 68dae56..9799cad 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -21,6 +21,7 @@
vndk: {
enabled: true,
},
+ double_loadable: true,
srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
cflags: [
"-Werror",
@@ -144,18 +145,10 @@
},
}
-filegroup {
- name: "mediaupdateservice_aidl",
- srcs: [
- "aidl/android/media/IMediaUpdateService.aidl",
- ],
-}
-
cc_library {
name: "libmedia",
srcs: [
- ":mediaupdateservice_aidl",
"IDataSource.cpp",
"BufferingSettings.cpp",
"mediaplayer.cpp",
@@ -266,6 +259,7 @@
name: "libmedia_player2_util",
srcs: [
+ "AudioParameter.cpp",
"BufferingSettings.cpp",
"DataSourceDesc.cpp",
"MediaCodecBuffer.cpp",
diff --git a/media/libmedia/BufferingSettings.cpp b/media/libmedia/BufferingSettings.cpp
index 271a238..1cc30c2 100644
--- a/media/libmedia/BufferingSettings.cpp
+++ b/media/libmedia/BufferingSettings.cpp
@@ -27,26 +27,6 @@
: mInitialMarkMs(kNoMark),
mResumePlaybackMarkMs(kNoMark) { }
-status_t BufferingSettings::readFromParcel(const Parcel* parcel) {
- if (parcel == nullptr) {
- return BAD_VALUE;
- }
- mInitialMarkMs = parcel->readInt32();
- mResumePlaybackMarkMs = parcel->readInt32();
-
- return OK;
-}
-
-status_t BufferingSettings::writeToParcel(Parcel* parcel) const {
- if (parcel == nullptr) {
- return BAD_VALUE;
- }
- parcel->writeInt32(mInitialMarkMs);
- parcel->writeInt32(mResumePlaybackMarkMs);
-
- return OK;
-}
-
String8 BufferingSettings::toString() const {
String8 s;
s.appendFormat(
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 990d260..5c6b981 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -28,6 +28,8 @@
#include <unicode/ucsdet.h>
#include <unicode/ustring.h>
+#include <cutils/properties.h>
+
namespace android {
CharacterEncodingDetector::CharacterEncodingDetector() {
@@ -38,6 +40,26 @@
ALOGE("could not create UConverter for UTF-8");
mUtf8Conv = NULL;
}
+
+ // Read system locale setting from system property and map to ICU encoding names.
+ mLocaleEnc = NULL;
+ char locale_value[PROPERTY_VALUE_MAX] = "";
+ if (property_get("persist.sys.locale", locale_value, NULL) > 0) {
+ const size_t len = strnlen(locale_value, sizeof(locale_value));
+
+ if (len == 3 && !strncmp(locale_value, "und", 3)) {
+ // Undetermined
+ } else if (!strncmp(locale_value, "th", 2)) { // Thai
+ mLocaleEnc = "windows-874-2000";
+ }
+ if (mLocaleEnc != NULL) {
+ ALOGV("System locale encoding = %s", mLocaleEnc);
+ } else {
+ ALOGV("Didn't recognize system locale setting, defaulting to en_US");
+ }
+ } else {
+ ALOGV("Couldn't read system locale setting, assuming en_US");
+ }
}
CharacterEncodingDetector::~CharacterEncodingDetector() {
@@ -157,7 +179,11 @@
}
}
- if (bestCombinedMatch != NULL) {
+ if (mLocaleEnc != NULL && !goodmatch && highest < 50) {
+ combinedenc = mLocaleEnc;
+ ALOGV("confidence is low but we have recognized predefined encoding, "
+ "so try this (%s) instead", mLocaleEnc);
+ } else if (bestCombinedMatch != NULL) {
combinedenc = ucsdet_getName(bestCombinedMatch, &status);
} else {
combinedenc = "ISO-8859-1";
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index e2eccdd..ea06665 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -179,7 +179,8 @@
{
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
- buffering.writeToParcel(&data);
+ data.writeInt32(buffering.mInitialMarkMs);
+ data.writeInt32(buffering.mResumePlaybackMarkMs);
remote()->transact(SET_BUFFERING_SETTINGS, data, &reply);
return reply.readInt32();
}
@@ -194,7 +195,8 @@
remote()->transact(GET_BUFFERING_SETTINGS, data, &reply);
status_t err = reply.readInt32();
if (err == OK) {
- err = buffering->readFromParcel(&reply);
+ buffering->mInitialMarkMs = reply.readInt32();
+ buffering->mResumePlaybackMarkMs = reply.readInt32();
}
return err;
}
@@ -696,7 +698,8 @@
case SET_BUFFERING_SETTINGS: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
BufferingSettings buffering;
- buffering.readFromParcel(&data);
+ buffering.mInitialMarkMs = data.readInt32();
+ buffering.mResumePlaybackMarkMs = data.readInt32();
reply->writeInt32(setBufferingSettings(buffering));
return NO_ERROR;
} break;
@@ -706,7 +709,8 @@
status_t err = getBufferingSettings(&buffering);
reply->writeInt32(err);
if (err == OK) {
- buffering.writeToParcel(reply);
+ reply->writeInt32(buffering.mInitialMarkMs);
+ reply->writeInt32(buffering.mResumePlaybackMarkMs);
}
return NO_ERROR;
} break;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index e7da488..4dece96 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -107,6 +107,7 @@
data.writeInterfaceToken(BpMediaSource::getInterfaceDescriptor());
status_t ret = remote()->transact(GETFORMAT, data, &reply);
if (ret == NO_ERROR) {
+ AutoMutex _l(mLock);
mMetaData = MetaData::createFromParcel(reply);
return mMetaData;
}
@@ -222,6 +223,8 @@
// NuPlayer passes pointers-to-metadata around, so we use this to keep the metadata alive
// XXX: could we use this for caching, or does metadata change on the fly?
sp<MetaData> mMetaData;
+ // ensure synchronize access to mMetaData
+ Mutex mLock;
// Cache all IMemory objects received from MediaExtractor.
// We gc IMemory objects that are no longer active (referenced by a MediaBuffer).
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index a073081..747b88f 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -552,7 +552,7 @@
};
IMPLEMENT_META_INTERFACE(OMX, "android.hardware.IOMX");
-IMPLEMENT_HYBRID_META_INTERFACE(OMXNode, IOmxNode, "android.hardware.IOMXNode");
+IMPLEMENT_HYBRID_META_INTERFACE(OMXNode, "android.hardware.IOMXNode");
////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 08c6a50..98c5497 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -92,6 +92,19 @@
{"highspeed720p", CAMCORDER_QUALITY_HIGH_SPEED_720P},
{"highspeed1080p", CAMCORDER_QUALITY_HIGH_SPEED_1080P},
{"highspeed2160p", CAMCORDER_QUALITY_HIGH_SPEED_2160P},
+
+ // Vendor-specific profiles
+ {"vga", CAMCORDER_QUALITY_VGA},
+ {"4kdci", CAMCORDER_QUALITY_4KDCI},
+ {"timelapsevga", CAMCORDER_QUALITY_TIME_LAPSE_VGA},
+ {"timelapse4kdci", CAMCORDER_QUALITY_TIME_LAPSE_4KDCI},
+ {"highspeedcif", CAMCORDER_QUALITY_HIGH_SPEED_CIF},
+ {"highspeedvga", CAMCORDER_QUALITY_HIGH_SPEED_VGA},
+ {"highspeed4kdci", CAMCORDER_QUALITY_HIGH_SPEED_4KDCI},
+ {"qhd", CAMCORDER_QUALITY_QHD},
+ {"2k", CAMCORDER_QUALITY_2k},
+ {"timelapseqhd", CAMCORDER_QUALITY_TIME_LAPSE_QHD},
+ {"timelapse2k", CAMCORDER_QUALITY_TIME_LAPSE_2k},
};
#if LOG_NDEBUG
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index 6dbc9b8..ea0547c 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -57,6 +57,7 @@
AMEDIAFORMAT_KEY_COLOR_STANDARD,
AMEDIAFORMAT_KEY_COLOR_TRANSFER,
AMEDIAFORMAT_KEY_COMPLEXITY,
+ AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED,
AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE,
AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK,
AMEDIAFORMAT_KEY_CRYPTO_MODE,
@@ -95,6 +96,7 @@
static const char *AMediaFormatKeyGroupInt64[] = {
AMEDIAFORMAT_KEY_DURATION,
+ AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER,
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER,
AMEDIAFORMAT_KEY_TIME_US,
};
@@ -126,6 +128,7 @@
static const char *AMediaFormatKeyGroupFloatInt32[] = {
AMEDIAFORMAT_KEY_FRAME_RATE,
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
+ AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER,
AMEDIAFORMAT_KEY_OPERATING_RATE,
};
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index c24e046..0301b21 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -209,6 +209,14 @@
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_1_0),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_0),
MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_MAT_2_1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_LC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V1),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_AAC_LATM_HE_V2),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_CELT),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_APTX_ADAPTIVE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC),
+ MAKE_STRING_FROM_ENUM(AUDIO_FORMAT_LHDC_LL),
TERMINATOR
};
@@ -220,6 +228,9 @@
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT0POINT2),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_2POINT1POINT2),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_TRI),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_TRI_BACK),
+ MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT1),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT0POINT2),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_3POINT1POINT2),
MAKE_STRING_FROM_ENUM(AUDIO_CHANNEL_OUT_QUAD),
@@ -380,6 +391,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_BYPASS_MUTE),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_LOW_LATENCY),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_DEEP_BUFFER),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_CAPTURE),
TERMINATOR
};
@@ -403,14 +415,6 @@
OutputDeviceConverter::fromString(literalDevice, device);
}
-bool deviceToString(audio_devices_t device, std::string& literalDevice) {
- if (device & AUDIO_DEVICE_BIT_IN) {
- return InputDeviceConverter::toString(device, literalDevice);
- } else {
- return OutputDeviceConverter::toString(device, literalDevice);
- }
-}
-
SampleRateTraits::Collection samplingRatesFromString(
const std::string &samplingRates, const char *del)
{
diff --git a/media/libmedia/aidl/android/media/IMediaUpdateService.aidl b/media/libmedia/aidl/android/media/IMediaUpdateService.aidl
deleted file mode 100644
index 4777969..0000000
--- a/media/libmedia/aidl/android/media/IMediaUpdateService.aidl
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * Service to reload media component plugins when update package is installed/uninstalled.
- * @hide
- */
-interface IMediaUpdateService {
- void loadPlugins(@utf8InCpp String apkPath);
-}
diff --git a/media/libmedia/include/media/BufferingSettings.h b/media/libmedia/include/media/BufferingSettings.h
index d2a3e40..d97cc00 100644
--- a/media/libmedia/include/media/BufferingSettings.h
+++ b/media/libmedia/include/media/BufferingSettings.h
@@ -17,11 +17,11 @@
#ifndef ANDROID_BUFFERING_SETTINGS_H
#define ANDROID_BUFFERING_SETTINGS_H
-#include <binder/Parcelable.h>
+#include <utils/String8.h>
namespace android {
-struct BufferingSettings : public Parcelable {
+struct BufferingSettings {
static const int kNoMark = -1;
int mInitialMarkMs;
@@ -32,9 +32,6 @@
BufferingSettings();
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
String8 toString() const;
};
diff --git a/media/libmedia/include/media/CharacterEncodingDetector.h b/media/libmedia/include/media/CharacterEncodingDetector.h
index deaa377..62564b1 100644
--- a/media/libmedia/include/media/CharacterEncodingDetector.h
+++ b/media/libmedia/include/media/CharacterEncodingDetector.h
@@ -54,6 +54,7 @@
StringArray mValues;
UConverter* mUtf8Conv;
+ const char* mLocaleEnc;
};
diff --git a/media/libmedia/include/media/DrmHal.h b/media/libmedia/include/media/DrmHal.h
index de0f3c7..a630bfd 100644
--- a/media/libmedia/include/media/DrmHal.h
+++ b/media/libmedia/include/media/DrmHal.h
@@ -38,6 +38,7 @@
using drm::V1_0::IDrmPlugin;
using drm::V1_0::IDrmPluginListener;
using drm::V1_0::KeyStatus;
+using drm::V1_1::SecurityLevel;
using drm::V1_2::OfflineLicenseState;
using ::android::hardware::hidl_vec;
using ::android::hardware::Return;
@@ -62,7 +63,10 @@
virtual status_t initCheck() const;
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType);
+ virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
+ const String8& mimeType,
+ DrmPlugin::SecurityLevel level,
+ bool *isSupported);
virtual status_t createPlugin(const uint8_t uuid[16],
const String8 &appPackageName);
@@ -223,6 +227,11 @@
status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
status_t getPropertyByteArrayInternal(String8 const &name,
Vector<uint8_t> &value) const;
+ status_t matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
+ const uint8_t uuid[16],
+ const String8 &mimeType,
+ DrmPlugin::SecurityLevel level,
+ bool *isSupported);
DISALLOW_EVIL_CONSTRUCTORS(DrmHal);
};
diff --git a/media/libmedia/include/media/IDrm.h b/media/libmedia/include/media/IDrm.h
index 49166c6..fbe80c6 100644
--- a/media/libmedia/include/media/IDrm.h
+++ b/media/libmedia/include/media/IDrm.h
@@ -34,7 +34,10 @@
virtual status_t initCheck() const = 0;
- virtual bool isCryptoSchemeSupported(const uint8_t uuid[16], const String8 &mimeType) = 0;
+ virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
+ const String8 &mimeType,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool *result) = 0;
virtual status_t createPlugin(const uint8_t uuid[16],
const String8 &appPackageName) = 0;
diff --git a/media/libmedia/include/media/MediaProfiles.h b/media/libmedia/include/media/MediaProfiles.h
index 0feb4f3..3e8e7c8 100644
--- a/media/libmedia/include/media/MediaProfiles.h
+++ b/media/libmedia/include/media/MediaProfiles.h
@@ -34,7 +34,11 @@
CAMCORDER_QUALITY_1080P = 6,
CAMCORDER_QUALITY_QVGA = 7,
CAMCORDER_QUALITY_2160P = 8,
- CAMCORDER_QUALITY_LIST_END = 8,
+ CAMCORDER_QUALITY_VGA = 9,
+ CAMCORDER_QUALITY_4KDCI = 10,
+ CAMCORDER_QUALITY_QHD = 11,
+ CAMCORDER_QUALITY_2k = 12,
+ CAMCORDER_QUALITY_LIST_END = 12,
CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000,
@@ -46,7 +50,11 @@
CAMCORDER_QUALITY_TIME_LAPSE_1080P = 1006,
CAMCORDER_QUALITY_TIME_LAPSE_QVGA = 1007,
CAMCORDER_QUALITY_TIME_LAPSE_2160P = 1008,
- CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1008,
+ CAMCORDER_QUALITY_TIME_LAPSE_VGA = 1009,
+ CAMCORDER_QUALITY_TIME_LAPSE_4KDCI = 1010,
+ CAMCORDER_QUALITY_TIME_LAPSE_QHD = 1011,
+ CAMCORDER_QUALITY_TIME_LAPSE_2k = 1012,
+ CAMCORDER_QUALITY_TIME_LAPSE_LIST_END = 1012,
CAMCORDER_QUALITY_HIGH_SPEED_LIST_START = 2000,
CAMCORDER_QUALITY_HIGH_SPEED_LOW = 2000,
@@ -55,7 +63,10 @@
CAMCORDER_QUALITY_HIGH_SPEED_720P = 2003,
CAMCORDER_QUALITY_HIGH_SPEED_1080P = 2004,
CAMCORDER_QUALITY_HIGH_SPEED_2160P = 2005,
- CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
+ CAMCORDER_QUALITY_HIGH_SPEED_CIF = 2006,
+ CAMCORDER_QUALITY_HIGH_SPEED_VGA = 2007,
+ CAMCORDER_QUALITY_HIGH_SPEED_4KDCI = 2008,
+ CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2008,
};
enum video_decoder {
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 418e09c..3acfe98 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -233,8 +233,6 @@
bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
-bool deviceToString(audio_devices_t device, std::string& literalDevice);
-
SampleRateTraits::Collection samplingRatesFromString(
const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);
@@ -255,47 +253,53 @@
OutputChannelTraits::Collection outputChannelMasksFromString(
const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
-static inline std::string toString(audio_usage_t usage)
+// counting enumerations
+template <typename T, std::enable_if_t<std::is_same<T, audio_content_type_t>::value
+ || std::is_same<T, audio_mode_t>::value
+ || std::is_same<T, audio_source_t>::value
+ || std::is_same<T, audio_stream_type_t>::value
+ || std::is_same<T, audio_usage_t>::value
+ , int> = 0>
+static inline std::string toString(const T& value)
{
- std::string usageLiteral;
- if (!android::UsageTypeConverter::toString(usage, usageLiteral)) {
- ALOGV("failed to convert usage: %d", usage);
- return "AUDIO_USAGE_UNKNOWN";
- }
- return usageLiteral;
+ std::string result;
+ return TypeConverter<DefaultTraits<T>>::toString(value, result)
+ ? result : std::to_string(static_cast<int>(value));
+
}
-static inline std::string toString(audio_content_type_t content)
+// flag enumerations
+template <typename T, std::enable_if_t<std::is_same<T, audio_gain_mode_t>::value
+ || std::is_same<T, audio_input_flags_t>::value
+ || std::is_same<T, audio_output_flags_t>::value
+ , int> = 0>
+static inline std::string toString(const T& value)
{
- std::string contentLiteral;
- if (!android::AudioContentTypeConverter::toString(content, contentLiteral)) {
- ALOGV("failed to convert content type: %d", content);
- return "AUDIO_CONTENT_TYPE_UNKNOWN";
- }
- return contentLiteral;
+ std::string result;
+ TypeConverter<DefaultTraits<T>>::maskToString(value, result);
+ return result;
}
-static inline std::string toString(audio_stream_type_t stream)
+static inline std::string toString(const audio_devices_t& devices)
{
- std::string streamLiteral;
- if (!android::StreamTypeConverter::toString(stream, streamLiteral)) {
- ALOGV("failed to convert stream: %d", stream);
- return "AUDIO_STREAM_DEFAULT";
+ std::string result;
+ if ((devices & AUDIO_DEVICE_BIT_IN) != 0) {
+ InputDeviceConverter::maskToString(devices, result);
+ } else {
+ OutputDeviceConverter::maskToString(devices, result);
}
- return streamLiteral;
+ return result;
}
-static inline std::string toString(audio_source_t source)
+// TODO: Remove when FormatTraits uses DefaultTraits.
+static inline std::string toString(const audio_format_t& format)
{
- std::string sourceLiteral;
- if (!android::SourceTypeConverter::toString(source, sourceLiteral)) {
- ALOGV("failed to convert source: %d", source);
- return "AUDIO_SOURCE_DEFAULT";
- }
- return sourceLiteral;
+ std::string result;
+ return TypeConverter<VectorTraits<audio_format_t>>::toString(format, result)
+ ? result : std::to_string(static_cast<int>(format));
}
-static inline std::string toString(const audio_attributes_t &attributes)
+static inline std::string toString(const audio_attributes_t& attributes)
{
std::ostringstream result;
result << "{ Content type: " << toString(attributes.content_type)
@@ -308,16 +312,6 @@
return result.str();
}
-static inline std::string toString(audio_mode_t mode)
-{
- std::string modeLiteral;
- if (!android::AudioModeConverter::toString(mode, modeLiteral)) {
- ALOGV("failed to convert mode: %d", mode);
- return "AUDIO_MODE_INVALID";
- }
- return modeLiteral;
-}
-
}; // namespace android
#endif /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/media/libmedia/include/media/omx/1.0/WOmxNode.h b/media/libmedia/include/media/omx/1.0/WOmxNode.h
index eebc8c6..1db4248 100644
--- a/media/libmedia/include/media/omx/1.0/WOmxNode.h
+++ b/media/libmedia/include/media/omx/1.0/WOmxNode.h
@@ -59,7 +59,7 @@
* - TW = Treble Wrapper --- It wraps a legacy object inside a Treble object.
*/
-struct LWOmxNode : public H2BConverter<IOmxNode, IOMXNode, BnOMXNode> {
+struct LWOmxNode : public H2BConverter<IOmxNode, BnOMXNode> {
LWOmxNode(sp<IOmxNode> const& base) : CBase(base) {}
status_t freeNode() override;
status_t sendCommand(
diff --git a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
index 31d1df9..1ed1d07 100644
--- a/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libmedia/omx/1.0/WGraphicBufferSource.cpp
@@ -32,7 +32,7 @@
BnStatus LWGraphicBufferSource::configure(
const sp<IOMXNode>& omxNode, int32_t dataSpace) {
- sp<IOmxNode> hOmxNode = omxNode->getHalInterface();
+ sp<IOmxNode> hOmxNode = omxNode->getHalInterface<IOmxNode>();
return toBinderStatus(mBase->configure(
hOmxNode == nullptr ? new TWOmxNode(omxNode) : hOmxNode,
toHardwareDataspace(dataSpace)));
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index e188e54..15ea578 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -1,6 +1,4 @@
-// TODO: change it back to cc_library_shared when there is a way to
-// expose media metrics as stable API.
-cc_library {
+cc_library_shared {
name: "libmediametrics",
srcs: [
@@ -32,12 +30,13 @@
cfi: true,
},
- // enumerate the stable interface
-// this would mean nobody can use the C++ interface. have to rework some things.
-// stubs: {
-// symbol_file: "libmediametrics.map.txt",
-// versions: [
-// "1" ,
-// ]
-// },
+ // enumerate stable entry points, for apex use
+ stubs: {
+ symbol_file: "libmediametrics.map.txt",
+ versions: [
+ "1" ,
+ ]
+ },
}
+
+
diff --git a/media/libmediametrics/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
index 28a7746..9114927 100644
--- a/media/libmediametrics/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -142,7 +142,7 @@
CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
bool forcenew;
- MediaAnalyticsItem *item = new MediaAnalyticsItem;
+ MediaAnalyticsItem *item = MediaAnalyticsItem::create();
data.readBool(&forcenew);
item->readFromParcel(data);
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index 448e2d9..02c23b1 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -52,6 +52,17 @@
const char * const MediaAnalyticsItem::EnabledPropertyPersist = "persist.media.metrics.enabled";
const int MediaAnalyticsItem::EnabledProperty_default = 1;
+// So caller doesn't need to know size of allocated space
+MediaAnalyticsItem *MediaAnalyticsItem::create()
+{
+ return MediaAnalyticsItem::create(kKeyNone);
+}
+
+MediaAnalyticsItem *MediaAnalyticsItem::create(MediaAnalyticsItem::Key key)
+{
+ MediaAnalyticsItem *item = new MediaAnalyticsItem(key);
+ return item;
+}
// access functions for the class
MediaAnalyticsItem::MediaAnalyticsItem()
@@ -642,6 +653,19 @@
//
int32_t MediaAnalyticsItem::readFromParcel(const Parcel& data) {
+ int32_t version = data.readInt32();
+
+ switch(version) {
+ case 0:
+ return readFromParcel0(data);
+ break;
+ default:
+ ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
+ return -1;
+ }
+}
+
+int32_t MediaAnalyticsItem::readFromParcel0(const Parcel& data) {
// into 'this' object
// .. we make a copy of the string to put away.
mKey = data.readCString();
@@ -691,8 +715,23 @@
}
int32_t MediaAnalyticsItem::writeToParcel(Parcel *data) {
+
if (data == NULL) return -1;
+ int32_t version = 0;
+ data->writeInt32(version);
+
+ switch(version) {
+ case 0:
+ return writeToParcel0(data);
+ break;
+ default:
+ ALOGE("Unsupported MediaAnalyticsItem Parcel version: %d", version);
+ return -1;
+ }
+}
+
+int32_t MediaAnalyticsItem::writeToParcel0(Parcel *data) {
data->writeCString(mKey.c_str());
data->writeInt32(mPid);
@@ -737,7 +776,6 @@
return 0;
}
-
const char *MediaAnalyticsItem::toCString() {
return toCString(PROTO_LAST);
}
@@ -876,8 +914,6 @@
}
return true;
} else {
- std::string p = this->toString();
- ALOGW("Unable to record: %s [forcenew=%d]", p.c_str(), forcenew);
return false;
}
}
@@ -1035,5 +1071,170 @@
return true;
}
+// a byte array; contents are
+// overall length (uint32) including the length field itself
+// encoding version (uint32)
+// count of properties (uint32)
+// N copies of:
+// property name as length(int16), bytes
+// the bytes WILL include the null terminator of the name
+// type (uint8 -- 1 byte)
+// size of value field (int16 -- 2 bytes)
+// value (size based on type)
+// int32, int64, double -- little endian 4/8/8 bytes respectively
+// cstring -- N bytes of value [WITH terminator]
+
+enum { kInt32 = 0, kInt64, kDouble, kRate, kCString};
+
+bool MediaAnalyticsItem::dumpAttributes(char **pbuffer, size_t *plength) {
+
+ char *build = NULL;
+
+ if (pbuffer == NULL || plength == NULL)
+ return false;
+
+ // consistency for the caller, who owns whatever comes back in this pointer.
+ *pbuffer = NULL;
+
+ // first, let's calculate sizes
+ int32_t goal = 0;
+ int32_t version = 0;
+
+ goal += sizeof(uint32_t); // overall length, including the length field
+ goal += sizeof(uint32_t); // encoding version
+ goal += sizeof(uint32_t); // # properties
+
+ int32_t count = mPropCount;
+ for (int i = 0 ; i < count; i++ ) {
+ Prop *prop = &mProps[i];
+ goal += sizeof(uint16_t); // name length
+ goal += strlen(prop->mName) + 1; // string + null
+ goal += sizeof(uint8_t); // type
+ goal += sizeof(uint16_t); // size of value
+ switch (prop->mType) {
+ case MediaAnalyticsItem::kTypeInt32:
+ goal += sizeof(uint32_t);
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ goal += sizeof(uint64_t);
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ goal += sizeof(double);
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ goal += 2 * sizeof(uint64_t);
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ // length + actual string + null
+ goal += strlen(prop->u.CStringValue) + 1;
+ break;
+ default:
+ ALOGE("found bad Prop type: %d, idx %d, name %s",
+ prop->mType, i, prop->mName);
+ return false;
+ }
+ }
+
+ // now that we have a size... let's allocate and fill
+ build = (char *)malloc(goal);
+ if (build == NULL)
+ return false;
+
+ memset(build, 0, goal);
+
+ char *filling = build;
+
+#define _INSERT(val, size) \
+ { memcpy(filling, &(val), (size)); filling += (size);}
+#define _INSERTSTRING(val, size) \
+ { memcpy(filling, (val), (size)); filling += (size);}
+
+ _INSERT(goal, sizeof(int32_t));
+ _INSERT(version, sizeof(int32_t));
+ _INSERT(count, sizeof(int32_t));
+
+ for (int i = 0 ; i < count; i++ ) {
+ Prop *prop = &mProps[i];
+ int16_t attrNameLen = strlen(prop->mName) + 1;
+ _INSERT(attrNameLen, sizeof(int16_t));
+ _INSERTSTRING(prop->mName, attrNameLen); // termination included
+ int8_t elemtype;
+ int16_t elemsize;
+ switch (prop->mType) {
+ case MediaAnalyticsItem::kTypeInt32:
+ {
+ elemtype = kInt32;
+ _INSERT(elemtype, sizeof(int8_t));
+ elemsize = sizeof(int32_t);
+ _INSERT(elemsize, sizeof(int16_t));
+
+ _INSERT(prop->u.int32Value, sizeof(int32_t));
+ break;
+ }
+ case MediaAnalyticsItem::kTypeInt64:
+ {
+ elemtype = kInt64;
+ _INSERT(elemtype, sizeof(int8_t));
+ elemsize = sizeof(int64_t);
+ _INSERT(elemsize, sizeof(int16_t));
+
+ _INSERT(prop->u.int64Value, sizeof(int64_t));
+ break;
+ }
+ case MediaAnalyticsItem::kTypeDouble:
+ {
+ elemtype = kDouble;
+ _INSERT(elemtype, sizeof(int8_t));
+ elemsize = sizeof(double);
+ _INSERT(elemsize, sizeof(int16_t));
+
+ _INSERT(prop->u.doubleValue, sizeof(double));
+ break;
+ }
+ case MediaAnalyticsItem::kTypeRate:
+ {
+ elemtype = kRate;
+ _INSERT(elemtype, sizeof(int8_t));
+ elemsize = 2 * sizeof(uint64_t);
+ _INSERT(elemsize, sizeof(int16_t));
+
+ _INSERT(prop->u.rate.count, sizeof(uint64_t));
+ _INSERT(prop->u.rate.duration, sizeof(uint64_t));
+ break;
+ }
+ case MediaAnalyticsItem::kTypeCString:
+ {
+ elemtype = kCString;
+ _INSERT(elemtype, sizeof(int8_t));
+ elemsize = strlen(prop->u.CStringValue) + 1;
+ _INSERT(elemsize, sizeof(int16_t));
+
+ _INSERTSTRING(prop->u.CStringValue, elemsize);
+ break;
+ }
+ default:
+ // error if can't encode; warning if can't decode
+ ALOGE("found bad Prop type: %d, idx %d, name %s",
+ prop->mType, i, prop->mName);
+ goto badness;
+ }
+ }
+
+ if (build + goal != filling) {
+ ALOGE("problems populating; wrote=%d planned=%d",
+ (int)(filling-build), goal);
+ goto badness;
+ }
+
+ *pbuffer = build;
+ *plength = goal;
+
+ return true;
+
+ badness:
+ free(build);
+ return false;
+}
+
} // namespace android
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 9b08aa7..6109190 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -34,7 +34,7 @@
// manage the overall record
mediametrics_handle_t mediametrics_create(mediametricskey_t key) {
- android::MediaAnalyticsItem *item = new android::MediaAnalyticsItem(key);
+ android::MediaAnalyticsItem *item = android::MediaAnalyticsItem::create(key);
return (mediametrics_handle_t) item;
}
@@ -187,18 +187,9 @@
return android::MediaAnalyticsItem::isEnabled();
}
-#if 0
-// do not expose this as is.
-// need to revisit (or redefine) how the android::Parcel parameter is handled
-// so that it meets the stable-API criteria for updateable components.
-//
-int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel) {
+bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length) {
android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
- if (item == NULL) {
- return -1;
- }
- return item->writeToParcel(parcel);
+ if (item == NULL) return false;
+ return item->dumpAttributes(buffer, length);
+
}
-#endif
-
-
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index b99cd91..4a36f6a 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -17,9 +17,10 @@
#ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
-#include <cutils/properties.h>
#include <string>
#include <sys/types.h>
+
+#include <cutils/properties.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
@@ -81,12 +82,19 @@
PROTO_LAST = PROTO_V1,
};
+ private:
+ // use the ::create() method instead
+ MediaAnalyticsItem();
+ MediaAnalyticsItem(Key);
+ MediaAnalyticsItem(const MediaAnalyticsItem&);
+ MediaAnalyticsItem &operator=(const MediaAnalyticsItem&);
public:
+ static MediaAnalyticsItem* create(Key key);
+ static MediaAnalyticsItem* create();
+
// access functions for the class
- MediaAnalyticsItem();
- MediaAnalyticsItem(Key);
~MediaAnalyticsItem();
// SessionID ties multiple submissions for same key together
@@ -175,6 +183,9 @@
int32_t writeToParcel(Parcel *);
int32_t readFromParcel(const Parcel&);
+ // supports the stable interface
+ bool dumpAttributes(char **pbuffer, size_t *plength);
+
std::string toString();
std::string toString(int version);
const char *toCString();
@@ -183,6 +194,11 @@
// are we collecting analytics data
static bool isEnabled();
+ private:
+ // handle Parcel version 0
+ int32_t writeToParcel0(Parcel *);
+ int32_t readFromParcel0(const Parcel&);
+
protected:
// merge fields from arg into this
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
index 4d2f352..a4e1ed2 100644
--- a/media/libmediametrics/include/MediaMetrics.h
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -85,13 +85,9 @@
void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid);
bool mediametrics_isEnabled();
-#if 0
-// do not expose this as is.
-// need to revisit (or redefine) how the android::Parcel parameter is handled
-// so that it meets the stable-API criteria for updateable components.
-//
-int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel);
-#endif
+// serialized copy of the attributes/values, mostly for upstream getMetrics() calls
+// caller owns the buffer allocated as part of this call.
+bool mediametrics_getAttributes(mediametrics_handle_t handle, char **buffer, size_t *length);
__END_DECLS
diff --git a/media/libmediametrics/libmediametrics.map.txt b/media/libmediametrics/libmediametrics.map.txt
new file mode 100644
index 0000000..c46281a
--- /dev/null
+++ b/media/libmediametrics/libmediametrics.map.txt
@@ -0,0 +1,29 @@
+LIBMEDIAMETRICS_1 {
+ global:
+ mediametrics_addDouble; # apex
+ mediametrics_addInt32; # apex
+ mediametrics_addInt64; # apex
+ mediametrics_addRate; # apex
+ mediametrics_count; # apex
+ mediametrics_create; # apex
+ mediametrics_delete; # apex
+ mediametrics_freeCString; # apex
+ mediametrics_getAttributes; # apex
+ mediametrics_getCString; # apex
+ mediametrics_getDouble; # apex
+ mediametrics_getInt32; # apex
+ mediametrics_getInt64; # apex
+ mediametrics_getKey; # apex
+ mediametrics_getRate; # apex
+ mediametrics_isEnabled; # apex
+ mediametrics_readable; # apex
+ mediametrics_selfRecord; # apex
+ mediametrics_setCString; # apex
+ mediametrics_setDouble; # apex
+ mediametrics_setInt32; # apex
+ mediametrics_setInt64; # apex
+ mediametrics_setRate; # apex
+ mediametrics_setUid; # apex
+ local:
+ *;
+};
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index b3f7404..08519cd 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -21,7 +21,6 @@
"libgui",
"liblog",
"libmedia_omx",
- "libstagefright_foundation",
"libui",
"libutils",
@@ -55,6 +54,7 @@
"libmediaplayer2-protos",
"libmedia_player2_util",
"libprotobuf-cpp-lite",
+ "libstagefright_foundation_without_imemory",
"libstagefright_nuplayer2",
"libstagefright_player2",
"libstagefright_rtsp",
@@ -123,9 +123,6 @@
"signed-integer-overflow",
],
cfi: true,
- diag: {
- cfi: true,
- },
},
}
diff --git a/media/libmediaplayer2/JMedia2HTTPConnection.cpp b/media/libmediaplayer2/JMedia2HTTPConnection.cpp
index d264a7f..e1baa10 100644
--- a/media/libmediaplayer2/JMedia2HTTPConnection.cpp
+++ b/media/libmediaplayer2/JMedia2HTTPConnection.cpp
@@ -21,11 +21,10 @@
#include <mediaplayer2/JavaVMHelper.h>
#include <mediaplayer2/JMedia2HTTPConnection.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <nativehelper/ScopedLocalRef.h>
+#include <nativehelper/scoped_local_ref.h>
#include "log/log.h"
#include "jni.h"
-#include <nativehelper/JNIHelp.h>
namespace android {
diff --git a/media/libmediaplayer2/JMedia2HTTPService.cpp b/media/libmediaplayer2/JMedia2HTTPService.cpp
index 264c15d..20e3573 100644
--- a/media/libmediaplayer2/JMedia2HTTPService.cpp
+++ b/media/libmediaplayer2/JMedia2HTTPService.cpp
@@ -25,8 +25,7 @@
#include <mediaplayer2/JMedia2HTTPConnection.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <nativehelper/JNIHelp.h>
-#include <nativehelper/ScopedLocalRef.h>
+#include <nativehelper/scoped_local_ref.h>
namespace android {
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
index 98a3e75..4de92ad 100644
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -22,7 +22,6 @@
#include <cutils/properties.h> // for property_get
#include <utils/Log.h>
-#include <media/AudioPolicyHelper.h>
#include <media/stagefright/foundation/ADebug.h>
namespace {
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
index 0c8d016..7804a62 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -214,6 +214,8 @@
virtual status_t setParameter(int key, const Parcel &request) = 0;
virtual status_t getParameter(int key, Parcel *reply) = 0;
+ virtual status_t getMetrics(char **buffer, size_t *length) = 0;
+
// Invoke a generic method on the player by using opaque parcels
// for the request and reply.
//
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index 78865c4..2993ab1 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -102,6 +102,7 @@
status_t setAudioAttributes(const jobject attributes);
jobject getAudioAttributes();
status_t getParameter(int key, Parcel* reply);
+ status_t getMetrics(char **buffer, size_t *length);
// Modular DRM
status_t prepareDrm(int64_t srcId,
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index f75380c..ae7ac59 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -21,7 +21,6 @@
#include <android/binder_ibinder.h>
#include <media/AudioSystem.h>
#include <media/DataSourceDesc.h>
-#include <media/MediaAnalyticsItem.h>
#include <media/MemoryLeakTrackUtil.h>
#include <media/NdkWrapper.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -979,6 +978,22 @@
return status;
}
+// for mediametrics
+status_t MediaPlayer2::getMetrics(char **buffer, size_t *length) {
+ ALOGD("MediaPlayer2::getMetrics()");
+ Mutex::Autolock _l(mLock);
+ if (mPlayer == NULL) {
+ ALOGV("getMetrics: no active player");
+ return INVALID_OPERATION;
+ }
+
+ status_t status = mPlayer->getMetrics(buffer, length);
+ if (status != OK) {
+ ALOGD("getMetrics returns %d", status);
+ }
+ return status;
+}
+
void MediaPlayer2::notify(int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *obj) {
ALOGV("message received srcId=%lld, msg=%d, ext1=%d, ext2=%d",
(long long)srcId, msg, ext1, ext2);
@@ -1109,8 +1124,10 @@
// completed) so the state change to "prepared" might not have happened yet (e.g., buffering).
// Still, we can allow prepareDrm for the use case of being called in OnDrmInfoListener.
if (!(mCurrentState & (MEDIA_PLAYER2_PREPARING | MEDIA_PLAYER2_PREPARED))) {
- ALOGE("prepareDrm is called in the wrong state (%d).", mCurrentState);
- return INVALID_OPERATION;
+ ALOGW("prepareDrm(%lld) called in non-prepare state(%d)", (long long)srcId, mCurrentState);
+ if (srcId == mSrcId) {
+ return INVALID_OPERATION;
+ }
}
if (drmSessionId.isEmpty()) {
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
index 71cd50f..0f69b2e 100644
--- a/media/libmediaplayer2/nuplayer2/Android.bp
+++ b/media/libmediaplayer2/nuplayer2/Android.bp
@@ -51,6 +51,7 @@
"libui",
"libgui",
"libmedia",
+ "libmediametrics",
"libmediandk",
"libmediandk_utils",
"libpowermanager",
diff --git a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
index bbd22bc..89703de 100644
--- a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
+++ b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
@@ -22,7 +22,6 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/Utils.h>
-#include <nativehelper/JNIHelp.h>
#include <utils/Log.h>
#include "log/log.h"
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index 5da6e24..d608d4a 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -1289,6 +1289,7 @@
} else if (what == DecoderBase::kWhatShutdownCompleted) {
ALOGV("%s shutdown completed", audio ? "audio" : "video");
if (audio) {
+ Mutex::Autolock autoLock(mDecoderLock);
mAudioDecoder.clear();
mAudioDecoderError = false;
++mAudioDecoderGeneration;
@@ -1296,6 +1297,7 @@
CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
mFlushingAudio = SHUT_DOWN;
} else {
+ Mutex::Autolock autoLock(mDecoderLock);
mVideoDecoder.clear();
mVideoDecoderError = false;
++mVideoDecoderGeneration;
@@ -1967,6 +1969,7 @@
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
+ Mutex::Autolock autoLock(mDecoderLock);
mAudioDecoder.clear();
mAudioDecoderError = false;
++mAudioDecoderGeneration;
@@ -1988,11 +1991,21 @@
closeAudioSink();
mRenderer->flush(true /* audio */, false /* notifyComplete */);
if (mVideoDecoder != NULL) {
- mRenderer->flush(false /* audio */, false /* notifyComplete */);
+ mDeferredActions.push_back(
+ new FlushDecoderAction(FLUSH_CMD_NONE /* audio */,
+ FLUSH_CMD_FLUSH /* video */));
+ mDeferredActions.push_back(
+ new SeekAction(currentPositionUs,
+ MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
+ // After a flush without shutdown, decoder is paused.
+ // Don't resume it until source seek is done, otherwise it could
+ // start pulling stale data too soon.
+ mDeferredActions.push_back(new ResumeDecoderAction(false));
+ processDeferredActions();
+ } else {
+ performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
}
- performSeek(currentPositionUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */);
-
if (forceNonOffload) {
mRenderer->signalDisableOffloadAudio();
mOffloadAudio = false;
@@ -2085,6 +2098,8 @@
}
}
+ Mutex::Autolock autoLock(mDecoderLock);
+
if (audio) {
sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
++mAudioDecoderGeneration;
@@ -2395,6 +2410,8 @@
CHECK(mTrackStats != NULL);
mTrackStats->clear();
+
+ Mutex::Autolock autoLock(mDecoderLock);
if (mVideoDecoder != NULL) {
mTrackStats->push_back(mVideoDecoder->getStats());
}
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
index 798c725..b8fb988 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
@@ -197,6 +197,7 @@
sp<DecoderBase> mVideoDecoder;
bool mOffloadAudio;
sp<DecoderBase> mAudioDecoder;
+ Mutex mDecoderLock; // guard |mAudioDecoder| and |mVideoDecoder|.
sp<CCDecoder> mCCDecoder;
sp<Renderer> mRenderer;
sp<ALooper> mRendererLooper;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
index a5bd62d..66bfae5 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
@@ -107,7 +107,12 @@
mStats->setInt64("frames-total", mNumFramesTotal);
mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
- return mStats;
+ mStats->setFloat("frame-rate-total", mFrameRateTotal);
+
+ // i'm mutexed right now.
+ // make our own copy, so we aren't victim to any later changes.
+ sp<AMessage> copiedStats = mStats->dup();
+ return copiedStats;
}
status_t NuPlayer2::Decoder::setVideoSurface(const sp<ANativeWindowWrapper> &nww) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
index 56e9471..1876496 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
@@ -81,7 +81,7 @@
};
// key for media statistics
-static const char *kKeyPlayer = "nuplayer";
+static const char *kKeyPlayer = "nuplayer2";
// attrs for media statistics
// NB: these are matched with public Java API constants defined
// in frameworks/base/media/java/android/media/MediaPlayer2.java
@@ -92,6 +92,7 @@
static const char *kPlayerHeight = "android.media.mediaplayer.height";
static const char *kPlayerFrames = "android.media.mediaplayer.frames";
static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
+static const char *kPlayerFrameRate = "android.media.mediaplayer.fps";
static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
@@ -107,6 +108,8 @@
static const char *kPlayerRebufferingCount = "android.media.mediaplayer.rebuffers";
static const char *kPlayerRebufferingAtExit = "android.media.mediaplayer.rebufferExit";
+static const char *kPlayerVersion = "android.media.mediaplayer.version";
+
NuPlayer2Driver::NuPlayer2Driver(pid_t pid, uid_t uid, const sp<JObjectHolder> &context)
: mState(STATE_IDLE),
@@ -125,7 +128,8 @@
mMediaClock(new MediaClock),
mPlayer(new NuPlayer2(pid, uid, mMediaClock, context)),
mPlayerFlags(0),
- mAnalyticsItem(NULL),
+ mMetricsHandle(0),
+ mPlayerVersion(0),
mClientUid(uid),
mAtEOS(false),
mLooping(false),
@@ -136,9 +140,13 @@
mMediaClock->init();
- // set up an analytics record
- mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
- mAnalyticsItem->setUid(mClientUid);
+ // XXX: what version are we?
+ // Ideally, this ticks with the apk version info for the APEX packaging
+
+ // set up media metrics record
+ mMetricsHandle = mediametrics_create(kKeyPlayer);
+ mediametrics_setUid(mMetricsHandle, mClientUid);
+ mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
mNuPlayer2Looper->start(
false, /* runOnCallingThread */
@@ -159,10 +167,7 @@
updateMetrics("destructor");
logMetrics("destructor");
- if (mAnalyticsItem != NULL) {
- delete mAnalyticsItem;
- mAnalyticsItem = NULL;
- }
+ mediametrics_delete(mMetricsHandle);
}
status_t NuPlayer2Driver::initCheck() {
@@ -453,15 +458,15 @@
if (mime.startsWith("video/")) {
int32_t width, height;
- mAnalyticsItem->setCString(kPlayerVMime, mime.c_str());
+ mediametrics_setCString(mMetricsHandle, kPlayerVMime, mime.c_str());
if (!name.empty()) {
- mAnalyticsItem->setCString(kPlayerVCodec, name.c_str());
+ mediametrics_setCString(mMetricsHandle, kPlayerVCodec, name.c_str());
}
if (stats->findInt32("width", &width)
&& stats->findInt32("height", &height)) {
- mAnalyticsItem->setInt32(kPlayerWidth, width);
- mAnalyticsItem->setInt32(kPlayerHeight, height);
+ mediametrics_setInt32(mMetricsHandle, kPlayerWidth, width);
+ mediametrics_setInt32(mMetricsHandle, kPlayerHeight, height);
}
int64_t numFramesTotal = 0;
@@ -469,14 +474,18 @@
stats->findInt64("frames-total", &numFramesTotal);
stats->findInt64("frames-dropped-output", &numFramesDropped);
- mAnalyticsItem->setInt64(kPlayerFrames, numFramesTotal);
- mAnalyticsItem->setInt64(kPlayerFramesDropped, numFramesDropped);
+ mediametrics_setInt64(mMetricsHandle, kPlayerFrames, numFramesTotal);
+ mediametrics_setInt64(mMetricsHandle, kPlayerFramesDropped, numFramesDropped);
+ float frameRate = 0;
+ if (stats->findFloat("frame-rate-output", &frameRate)) {
+ mediametrics_setInt64(mMetricsHandle, kPlayerFrameRate, frameRate);
+ }
} else if (mime.startsWith("audio/")) {
- mAnalyticsItem->setCString(kPlayerAMime, mime.c_str());
+ mediametrics_setCString(mMetricsHandle, kPlayerAMime, mime.c_str());
if (!name.empty()) {
- mAnalyticsItem->setCString(kPlayerACodec, name.c_str());
+ mediametrics_setCString(mMetricsHandle, kPlayerACodec, name.c_str());
}
}
}
@@ -487,17 +496,17 @@
// getDuration() uses mLock for mutex -- careful where we use it.
int64_t duration_ms = -1;
getDuration(&duration_ms);
- mAnalyticsItem->setInt64(kPlayerDuration, duration_ms);
+ mediametrics_setInt64(mMetricsHandle, kPlayerDuration, duration_ms);
- mAnalyticsItem->setInt64(kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
+ mediametrics_setInt64(mMetricsHandle, kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
if (mRebufferingEvents != 0) {
- mAnalyticsItem->setInt64(kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
- mAnalyticsItem->setInt32(kPlayerRebufferingCount, mRebufferingEvents);
- mAnalyticsItem->setInt32(kPlayerRebufferingAtExit, mRebufferingAtExit);
+ mediametrics_setInt64(mMetricsHandle, kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
+ mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingCount, mRebufferingEvents);
+ mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingAtExit, mRebufferingAtExit);
}
- mAnalyticsItem->setCString(kPlayerDataSourceType, mPlayer->getDataSourceType());
+ mediametrics_setCString(mMetricsHandle, kPlayerDataSourceType, mPlayer->getDataSourceType());
}
@@ -507,7 +516,7 @@
}
ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
- if (mAnalyticsItem == NULL || mAnalyticsItem->isEnabled() == false) {
+ if (mMetricsHandle == 0 || mediametrics_isEnabled() == false) {
return;
}
@@ -516,16 +525,13 @@
// and that always injects 3 fields (duration, playing time, and
// datasource) into the record.
// So the canonical "empty" record has 3 elements in it.
- if (mAnalyticsItem->count() > 3) {
-
- mAnalyticsItem->selfrecord();
-
+ if (mediametrics_count(mMetricsHandle) > 3) {
+ mediametrics_selfRecord(mMetricsHandle);
// re-init in case we prepare() and start() again.
- delete mAnalyticsItem ;
- mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
- if (mAnalyticsItem) {
- mAnalyticsItem->setUid(mClientUid);
- }
+ mediametrics_delete(mMetricsHandle);
+ mMetricsHandle = mediametrics_create(kKeyPlayer);
+ mediametrics_setUid(mMetricsHandle, mClientUid);
+ mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
} else {
ALOGV("did not have anything to record");
}
@@ -649,19 +655,18 @@
return INVALID_OPERATION;
}
-status_t NuPlayer2Driver::getParameter(int key, Parcel *reply) {
-
- if (key == FOURCC('m','t','r','X')) {
- // mtrX -- a play on 'metrics' (not matrix)
- // gather current info all together, parcel it, and send it back
- updateMetrics("api");
- mAnalyticsItem->writeToParcel(reply);
- return OK;
- }
-
+status_t NuPlayer2Driver::getParameter(int key __unused, Parcel *reply __unused) {
return INVALID_OPERATION;
}
+status_t NuPlayer2Driver::getMetrics(char **buffer, size_t *length) {
+ updateMetrics("api");
+ if (mediametrics_getAttributes(mMetricsHandle, buffer, length))
+ return OK;
+ else
+ return FAILED_TRANSACTION;
+}
+
void NuPlayer2Driver::notifyResetComplete(int64_t /* srcId */) {
ALOGD("notifyResetComplete(%p)", this);
Mutex::Autolock autoLock(mLock);
@@ -867,11 +872,11 @@
// ext1 is our primary 'error type' value. Only add ext2 when non-zero.
// [test against msg is due to fall through from previous switch value]
if (msg == MEDIA2_ERROR) {
- mAnalyticsItem->setInt32(kPlayerError, ext1);
+ mediametrics_setInt32(mMetricsHandle, kPlayerError, ext1);
if (ext2 != 0) {
- mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
+ mediametrics_setInt32(mMetricsHandle, kPlayerErrorCode, ext2);
}
- mAnalyticsItem->setCString(kPlayerErrorState, stateString(mState).c_str());
+ mediametrics_setCString(mMetricsHandle, kPlayerErrorState, stateString(mState).c_str());
}
mAtEOS = true;
break;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
index 0ec3a4b..c97e247 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
@@ -16,7 +16,7 @@
#include <mediaplayer2/MediaPlayer2Interface.h>
-#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
#include <media/stagefright/foundation/ABase.h>
#include <mediaplayer2/JObjectHolder.h>
@@ -61,6 +61,7 @@
virtual void setAudioSink(const sp<AudioSink> &audioSink) override;
virtual status_t setParameter(int key, const Parcel &request) override;
virtual status_t getParameter(int key, Parcel *reply) override;
+ virtual status_t getMetrics(char **buf, size_t *length) override;
virtual status_t dump(int fd, const Vector<String16> &args) const override;
@@ -132,7 +133,8 @@
sp<AudioSink> mAudioSink;
uint32_t mPlayerFlags;
- MediaAnalyticsItem *mAnalyticsItem;
+ mediametrics_handle_t mMetricsHandle;
+ int64_t mPlayerVersion;
uid_t mClientUid;
bool mAtEOS;
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index 3be7e36..a8c9932 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -1148,8 +1148,7 @@
ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
return 0;
}
- // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
- return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+ return (int64_t)(numFrames * 1000000LL / sampleRate);
}
// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 55867a5..46a1c24 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -12,6 +12,7 @@
shared_libs: [
"android.hardware.media.omx@1.0",
+ "libbase",
"libaudioclient",
"libbinder",
"libcamera_client",
@@ -63,8 +64,6 @@
name: "libmediaplayerservice",
- compile_multilib: "32",
-
sanitize: {
cfi: true,
},
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9bcfc83..da95817 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -45,7 +45,6 @@
#include <utils/Timers.h>
#include <utils/Vector.h>
-#include <media/AudioPolicyHelper.h>
#include <media/IMediaHTTPService.h>
#include <media/IRemoteDisplay.h>
#include <media/IRemoteDisplayClient.h>
@@ -1627,7 +1626,7 @@
mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
if (mAttributes != NULL) {
memcpy(mAttributes, attr, sizeof(audio_attributes_t));
- mStreamType = audio_attributes_to_stream_type(attr);
+ mStreamType = AudioSystem::attributesToStreamType(*attr);
}
} else {
mAttributes = NULL;
@@ -1816,7 +1815,7 @@
mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
}
memcpy(mAttributes, attributes, sizeof(audio_attributes_t));
- mStreamType = audio_attributes_to_stream_type(attributes);
+ mStreamType = AudioSystem::attributesToStreamType(*attributes);
}
}
@@ -1906,10 +1905,16 @@
if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
return NO_INIT;
}
+ if (afSampleRate == 0) {
+ return NO_INIT;
+ }
const size_t framesPerBuffer =
(unsigned long long)sampleRate * afFrameCount / afSampleRate;
if (bufferCount == 0) {
+ if (framesPerBuffer == 0) {
+ return NO_INIT;
+ }
// use suggestedFrameCount
bufferCount = (suggestedFrameCount + framesPerBuffer - 1) / framesPerBuffer;
}
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index f2a3038..d111313 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -24,6 +24,7 @@
#include <algorithm>
+#include <android-base/properties.h>
#include <android/hardware/ICamera.h>
#include <binder/IPCThreadState.h>
@@ -202,7 +203,7 @@
}
mAnalyticsDirty = false;
if (reinitialize) {
- mAnalyticsItem = new MediaAnalyticsItem(kKeyRecorder);
+ mAnalyticsItem = MediaAnalyticsItem::create(kKeyRecorder);
}
}
@@ -1761,13 +1762,26 @@
}
}
+ // Enable temporal layering if the expected (max) playback frame rate is greater than ~11% of
+ // the minimum display refresh rate on a typical device. Add layers until the base layer falls
+ // under this limit. Allow device manufacturers to override this limit.
+
+ // TODO: make this configurable by the application
+ std::string maxBaseLayerFpsProperty =
+ ::android::base::GetProperty("ro.media.recorder-max-base-layer-fps", "");
+ float maxBaseLayerFps = (float)::atof(maxBaseLayerFpsProperty.c_str());
+ // TRICKY: use !> to fix up any NaN values
+ if (!(maxBaseLayerFps >= kMinTypicalDisplayRefreshingRate / 0.9)) {
+ maxBaseLayerFps = kMinTypicalDisplayRefreshingRate / 0.9;
+ }
+
for (uint32_t tryLayers = 1; tryLayers <= kMaxNumVideoTemporalLayers; ++tryLayers) {
if (tryLayers > tsLayers) {
tsLayers = tryLayers;
}
// keep going until the base layer fps falls below the typical display refresh rate
float baseLayerFps = maxPlaybackFps / (1 << (tryLayers - 1));
- if (baseLayerFps < kMinTypicalDisplayRefreshingRate / 0.9) {
+ if (baseLayerFps < maxBaseLayerFps) {
break;
}
}
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
index 3119950..0ad4d04 100644
--- a/media/libmediaplayerservice/include/MediaPlayerInterface.h
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -151,13 +151,13 @@
virtual media::VolumeShaper::Status applyVolumeShaper(
const sp<media::VolumeShaper::Configuration>& configuration,
- const sp<media::VolumeShaper::Operation>& operation);
- virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id);
+ const sp<media::VolumeShaper::Operation>& operation) = 0;
+ virtual sp<media::VolumeShaper::State> getVolumeShaperState(int id) = 0;
// AudioRouting
- virtual status_t setOutputDevice(audio_port_handle_t deviceId);
- virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
- virtual status_t enableAudioDeviceCallback(bool enabled);
+ virtual status_t setOutputDevice(audio_port_handle_t deviceId) = 0;
+ virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId) = 0;
+ virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
};
MediaPlayerBase() {}
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 1e85804..5a58aa0 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -94,6 +94,7 @@
mDisconnected = false;
mUri.clear();
mUriHeaders.clear();
+ mSources.clear();
if (mFd >= 0) {
close(mFd);
mFd = -1;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 5cf6bbd..3388097 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1829,11 +1829,21 @@
closeAudioSink();
mRenderer->flush(true /* audio */, false /* notifyComplete */);
if (mVideoDecoder != NULL) {
- mRenderer->flush(false /* audio */, false /* notifyComplete */);
+ mDeferredActions.push_back(
+ new FlushDecoderAction(FLUSH_CMD_NONE /* audio */,
+ FLUSH_CMD_FLUSH /* video */));
+ mDeferredActions.push_back(
+ new SeekAction(currentPositionUs,
+ MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
+ // After a flush without shutdown, decoder is paused.
+ // Don't resume it until source seek is done, otherwise it could
+ // start pulling stale data too soon.
+ mDeferredActions.push_back(new ResumeDecoderAction(false));
+ processDeferredActions();
+ } else {
+ performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
}
- performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
-
if (forceNonOffload) {
mRenderer->signalDisableOffloadAudio();
mOffloadAudio = false;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index df1ffde..2f0da2d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -107,11 +107,16 @@
}
sp<AMessage> NuPlayer::Decoder::getStats() const {
+
mStats->setInt64("frames-total", mNumFramesTotal);
mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
mStats->setFloat("frame-rate-total", mFrameRateTotal);
- return mStats;
+
+ // i'm mutexed right now.
+ // make our own copy, so we aren't victim to any later changes.
+ sp<AMessage> copiedStats = mStats->dup();
+ return copiedStats;
}
status_t NuPlayer::Decoder::setVideoSurface(const sp<Surface> &surface) {
@@ -678,7 +683,7 @@
msg->setSize("buffer-ix", index);
sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
- ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
+ ALOGV("[%s] resubmitting CSD", mComponentName.c_str());
msg->setBuffer("buffer", buffer);
mCSDsToSubmit.removeAt(0);
if (!onInputBufferFetched(msg)) {
@@ -749,7 +754,7 @@
reply->setSize("size", size);
if (eos) {
- ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
+ ALOGV("[%s] saw output EOS", mIsAudio ? "audio" : "video");
buffer->meta()->setInt32("eos", true);
reply->setInt32("eos", true);
@@ -1029,7 +1034,7 @@
int64_t resumeAtMediaTimeUs;
if (extra->findInt64(
"resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
- ALOGI("[%s] suppressing rendering until %lld us",
+ ALOGV("[%s] suppressing rendering until %lld us",
mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index ba3ebaa..2b813e7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -97,7 +97,7 @@
mMediaClock->init();
// set up an analytics record
- mAnalyticsItem = new MediaAnalyticsItem(kKeyPlayer);
+ mAnalyticsItem = MediaAnalyticsItem::create(kKeyPlayer);
mLooper->start(
false, /* runOnCallingThread */
@@ -329,7 +329,7 @@
}
status_t NuPlayerDriver::start() {
- ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
+ ALOGV("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
Mutex::Autolock autoLock(mLock);
return start_l();
}
@@ -471,7 +471,7 @@
}
status_t NuPlayerDriver::seekTo(int msec, MediaPlayerSeekMode mode) {
- ALOGD("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
+ ALOGV("seekTo(%p) (%d ms, %d) at state %d", this, msec, mode, mState);
Mutex::Autolock autoLock(mLock);
int64_t seekTimeUs = msec * 1000LL;
@@ -635,7 +635,7 @@
// re-init in case we prepare() and start() again.
delete mAnalyticsItem ;
- mAnalyticsItem = new MediaAnalyticsItem("nuplayer");
+ mAnalyticsItem = MediaAnalyticsItem::create("nuplayer");
if (mAnalyticsItem) {
mAnalyticsItem->setUid(mClientUid);
}
@@ -778,7 +778,7 @@
status_t NuPlayerDriver::getParameter(int key, Parcel *reply) {
- if (key == FOURCC('m','t','r','X')) {
+ if (key == FOURCC('m','t','r','X') && mAnalyticsItem != NULL) {
// mtrX -- a play on 'metrics' (not matrix)
// gather current info all together, parcel it, and send it back
updateMetrics("api");
@@ -965,7 +965,7 @@
void NuPlayerDriver::notifyListener_l(
int msg, int ext1, int ext2, const Parcel *in) {
- ALOGD("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
+ ALOGV("notifyListener_l(%p), (%d, %d, %d, %d), loop setting(%d, %d)",
this, msg, ext1, ext2, (in == NULL ? -1 : (int)in->dataSize()), mAutoLoop, mLooping);
switch (msg) {
case MEDIA_PLAYBACK_COMPLETE:
@@ -1006,7 +1006,7 @@
// when we have an error, add it to the analytics for this playback.
// ext1 is our primary 'error type' value. Only add ext2 when non-zero.
// [test against msg is due to fall through from previous switch value]
- if (msg == MEDIA_ERROR) {
+ if (msg == MEDIA_ERROR && mAnalyticsItem != NULL) {
mAnalyticsItem->setInt32(kPlayerError, ext1);
if (ext2 != 0) {
mAnalyticsItem->setInt32(kPlayerErrorCode, ext2);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 8d876da..2d0c9e0 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -159,8 +159,12 @@
if (drm != NULL) {
for (size_t i = 0; i < psshDRMs.size(); i++) {
DrmUUID uuid = psshDRMs[i];
- if (drm->isCryptoSchemeSupported(uuid.ptr(), String8()))
+ bool isSupported = false;
+ status = drm->isCryptoSchemeSupported(uuid.ptr(), String8(),
+ DrmPlugin::kSecurityLevelUnknown, &isSupported);
+ if (status == OK && isSupported) {
supportedDRMs.add(uuid);
+ }
}
drm.clear();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index c990b2a..65d6d61 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1161,8 +1161,8 @@
ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
return 0;
}
- // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
- return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+
+ return (int64_t)(numFrames * 1000000LL / sampleRate);
}
// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index 4749a8b..f8c89e5 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -14,8 +14,6 @@
"android.hardware.drm@1.2",
],
- compile_multilib: "32",
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libnblog/ReportPerformance.cpp b/media/libnblog/ReportPerformance.cpp
index f632e40..b050b83 100644
--- a/media/libnblog/ReportPerformance.cpp
+++ b/media/libnblog/ReportPerformance.cpp
@@ -168,7 +168,7 @@
return false;
}
- std::unique_ptr<MediaAnalyticsItem> item(new MediaAnalyticsItem("audiothread"));
+ std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("audiothread"));
const Histogram &workHist = data.workHist;
if (workHist.totalCount() > 0) {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a1a2660..1dee4f7 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -37,6 +37,7 @@
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/PersistentSurface.h>
@@ -171,11 +172,7 @@
}
struct CodecObserver : public BnOMXObserver {
- CodecObserver() {}
-
- void setNotificationMessage(const sp<AMessage> &msg) {
- mNotify = msg;
- }
+ explicit CodecObserver(const sp<AMessage> &msg) : mNotify(msg) {}
// from IOMXObserver
virtual void onMessages(const std::list<omx_message> &messages) {
@@ -251,7 +248,7 @@
virtual ~CodecObserver() {}
private:
- sp<AMessage> mNotify;
+ const sp<AMessage> mNotify;
DISALLOW_EVIL_CONSTRUCTORS(CodecObserver);
};
@@ -1248,6 +1245,7 @@
info.mRenderInfo = NULL;
info.mGraphicBuffer = graphicBuffer;
info.mNewGraphicBuffer = false;
+ info.mDequeuedAt = mDequeueCounter;
// TODO: We shouln't need to create MediaCodecBuffer. In metadata mode
// OMX doesn't use the shared memory buffer, but some code still
@@ -1820,20 +1818,19 @@
}
if (!msg->findInt64(
- "repeat-previous-frame-after",
- &mRepeatFrameDelayUs)) {
+ KEY_REPEAT_PREVIOUS_FRAME_AFTER, &mRepeatFrameDelayUs)) {
mRepeatFrameDelayUs = -1LL;
}
// only allow 32-bit value, since we pass it as U32 to OMX.
- if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
+ if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
mMaxPtsGapUs = 0LL;
} else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
mMaxPtsGapUs = 0LL;
}
- if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
+ if (!msg->findFloat(KEY_MAX_FPS_TO_ENCODER, &mMaxFps)) {
mMaxFps = -1;
}
@@ -1847,8 +1844,8 @@
}
if (!msg->findInt32(
- "create-input-buffers-suspended",
- (int32_t*)&mCreateInputBuffersSuspended)) {
+ KEY_CREATE_INPUT_SURFACE_SUSPENDED,
+ (int32_t*)&mCreateInputBuffersSuspended)) {
mCreateInputBuffersSuspended = false;
}
}
@@ -2092,7 +2089,8 @@
if (usingSwRenderer) {
outputFormat->setInt32("using-sw-renderer", 1);
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
@@ -4297,24 +4295,27 @@
int maxDimension = max(width, height);
static const int limits[][5] = {
- /* MBps MB dim bitrate level */
- { 1485, 99, 28, 64, OMX_VIDEO_AVCLevel1 },
- { 1485, 99, 28, 128, OMX_VIDEO_AVCLevel1b },
- { 3000, 396, 56, 192, OMX_VIDEO_AVCLevel11 },
- { 6000, 396, 56, 384, OMX_VIDEO_AVCLevel12 },
- { 11880, 396, 56, 768, OMX_VIDEO_AVCLevel13 },
- { 11880, 396, 56, 2000, OMX_VIDEO_AVCLevel2 },
- { 19800, 792, 79, 4000, OMX_VIDEO_AVCLevel21 },
- { 20250, 1620, 113, 4000, OMX_VIDEO_AVCLevel22 },
- { 40500, 1620, 113, 10000, OMX_VIDEO_AVCLevel3 },
- { 108000, 3600, 169, 14000, OMX_VIDEO_AVCLevel31 },
- { 216000, 5120, 202, 20000, OMX_VIDEO_AVCLevel32 },
- { 245760, 8192, 256, 20000, OMX_VIDEO_AVCLevel4 },
- { 245760, 8192, 256, 50000, OMX_VIDEO_AVCLevel41 },
- { 522240, 8704, 263, 50000, OMX_VIDEO_AVCLevel42 },
- { 589824, 22080, 420, 135000, OMX_VIDEO_AVCLevel5 },
- { 983040, 36864, 543, 240000, OMX_VIDEO_AVCLevel51 },
- { 2073600, 36864, 543, 240000, OMX_VIDEO_AVCLevel52 },
+ /* MBps MB dim bitrate level */
+ { 1485, 99, 28, 64, OMX_VIDEO_AVCLevel1 },
+ { 1485, 99, 28, 128, OMX_VIDEO_AVCLevel1b },
+ { 3000, 396, 56, 192, OMX_VIDEO_AVCLevel11 },
+ { 6000, 396, 56, 384, OMX_VIDEO_AVCLevel12 },
+ { 11880, 396, 56, 768, OMX_VIDEO_AVCLevel13 },
+ { 11880, 396, 56, 2000, OMX_VIDEO_AVCLevel2 },
+ { 19800, 792, 79, 4000, OMX_VIDEO_AVCLevel21 },
+ { 20250, 1620, 113, 4000, OMX_VIDEO_AVCLevel22 },
+ { 40500, 1620, 113, 10000, OMX_VIDEO_AVCLevel3 },
+ { 108000, 3600, 169, 14000, OMX_VIDEO_AVCLevel31 },
+ { 216000, 5120, 202, 20000, OMX_VIDEO_AVCLevel32 },
+ { 245760, 8192, 256, 20000, OMX_VIDEO_AVCLevel4 },
+ { 245760, 8192, 256, 50000, OMX_VIDEO_AVCLevel41 },
+ { 522240, 8704, 263, 50000, OMX_VIDEO_AVCLevel42 },
+ { 589824, 22080, 420, 135000, OMX_VIDEO_AVCLevel5 },
+ { 983040, 36864, 543, 240000, OMX_VIDEO_AVCLevel51 },
+ { 2073600, 36864, 543, 240000, OMX_VIDEO_AVCLevel52 },
+ { 4177920, 139264, 1055, 240000, OMX_VIDEO_AVCLevel6 },
+ { 8355840, 139264, 1055, 480000, OMX_VIDEO_AVCLevel61 },
+ { 16711680, 139264, 1055, 800000, OMX_VIDEO_AVCLevel62 },
};
for (size_t i = 0; i < ARRAY_SIZE(limits); i++) {
@@ -4427,9 +4428,9 @@
h264type.nRefFrames = 2;
h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
- // disable B-frames until MPEG4Writer can guarantee finalizing files with B-frames
- // h264type.nRefFrames = 1;
- // h264type.nBFrames = 0;
+ // disable B-frames until we have explicit settings for enabling the feature.
+ h264type.nRefFrames = 1;
+ h264type.nBFrames = 0;
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
@@ -6535,8 +6536,10 @@
if (mDeathNotifier != NULL) {
if (mCodec->mOMXNode != NULL) {
- auto tOmxNode = mCodec->mOMXNode->getHalInterface();
- tOmxNode->unlinkToDeath(mDeathNotifier);
+ auto tOmxNode = mCodec->mOMXNode->getHalInterface<IOmxNode>();
+ if (tOmxNode) {
+ tOmxNode->unlinkToDeath(mDeathNotifier);
+ }
}
mDeathNotifier.clear();
}
@@ -6623,7 +6626,8 @@
CHECK(mCodec->mOMXNode == NULL);
- sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
+ sp<AMessage> notify = new AMessage(kWhatOMXMessageList, mCodec);
+ notify->setInt32("generation", mCodec->mNodeGeneration + 1);
sp<RefBase> obj;
CHECK(msg->findObject("codecInfo", &obj));
@@ -6638,7 +6642,7 @@
AString componentName;
CHECK(msg->findString("componentName", &componentName));
- sp<CodecObserver> observer = new CodecObserver;
+ sp<CodecObserver> observer = new CodecObserver(notify);
sp<IOMX> omx;
sp<IOMXNode> omxNode;
@@ -6664,14 +6668,12 @@
}
mDeathNotifier = new DeathNotifier(notify);
- auto tOmxNode = omxNode->getHalInterface();
- if (!tOmxNode->linkToDeath(mDeathNotifier, 0)) {
+ auto tOmxNode = omxNode->getHalInterface<IOmxNode>();
+ if (tOmxNode && !tOmxNode->linkToDeath(mDeathNotifier, 0)) {
mDeathNotifier.clear();
}
- notify = new AMessage(kWhatOMXMessageList, mCodec);
- notify->setInt32("generation", ++mCodec->mNodeGeneration);
- observer->setNotificationMessage(notify);
+ ++mCodec->mNodeGeneration;
mCodec->mComponentName = componentName;
mCodec->mRenderTracker.setComponentName(componentName);
@@ -7429,7 +7431,7 @@
}
int64_t timeOffsetUs;
- if (params->findInt64("time-offset-us", &timeOffsetUs)) {
+ if (params->findInt64(PARAMETER_KEY_OFFSET_TIME, &timeOffsetUs)) {
if (mGraphicBufferSource == NULL) {
ALOGE("[%s] Invalid to set input buffer time offset without surface",
mComponentName.c_str());
@@ -7465,7 +7467,7 @@
}
int32_t dropInputFrames;
- if (params->findInt32("drop-input-frames", &dropInputFrames)) {
+ if (params->findInt32(PARAMETER_KEY_SUSPEND, &dropInputFrames)) {
if (mGraphicBufferSource == NULL) {
ALOGE("[%s] Invalid to set suspend without surface",
mComponentName.c_str());
@@ -7473,7 +7475,7 @@
}
int64_t suspendStartTimeUs = -1;
- (void) params->findInt64("drop-start-time-us", &suspendStartTimeUs);
+ (void) params->findInt64(PARAMETER_KEY_SUSPEND_TIME, &suspendStartTimeUs);
status_t err = statusFromBinderStatus(
mGraphicBufferSource->setSuspend(dropInputFrames != 0, suspendStartTimeUs));
@@ -8161,6 +8163,10 @@
OMX_CommandPortEnable, kPortIndexOutput);
}
+ // Clear the RenderQueue in which queued GraphicBuffers hold the
+ // actual buffer references in order to free them early.
+ mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
+
if (err == OK) {
err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
@@ -8566,7 +8572,7 @@
}
sp<IOMX> omx = client.interface();
- sp<CodecObserver> observer = new CodecObserver;
+ sp<CodecObserver> observer = new CodecObserver(new AMessage);
sp<IOMXNode> omxNode;
err = omx->allocateNode(name, observer, &omxNode);
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index f45cc58..2bd7288 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -77,6 +77,41 @@
},
}
+cc_library_static {
+ name: "libstagefright_mpeg2extractor",
+
+ srcs: [
+ "Utils.cpp",
+ "MediaSource.cpp",
+ "HevcUtils.cpp",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libmedia",
+ "libmedia_omx",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ cfi: true,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+}
+
cc_library {
name: "libstagefright",
@@ -160,7 +195,6 @@
"libstagefright_codecbase",
"libstagefright_foundation",
"libstagefright_omx_utils",
- "libstagefright_opus_common",
"libRScpp",
"libhidlallocatorutils",
"libhidlbase",
@@ -186,6 +220,7 @@
],
header_libs:[
+ "libnativeloader-dummy-headers",
"libstagefright_xmlparser_headers",
"media_ndk_headers",
],
@@ -230,9 +265,11 @@
srcs: [
"ClearFileSource.cpp",
"DataURISource.cpp",
+ "DataSourceBase.cpp",
"HTTPBase.cpp",
"HevcUtils.cpp",
"MediaClock.cpp",
+ "MediaSource.cpp",
"NdkUtils.cpp",
"Utils.cpp",
"VideoFrameSchedulerBase.cpp",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 42b98b1..18a6bd8 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -431,7 +431,7 @@
|| !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
if (frameTimeUs < 0) {
- int64_t thumbNailTime;
+ int64_t thumbNailTime = -1ll;
if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
|| thumbNailTime < 0) {
thumbNailTime = 0;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7df1a2d..6259b15 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -85,7 +85,7 @@
static const int kTimestampDebugCount = 10;
static const int kItemIdBase = 10000;
static const char kExifHeader[] = {'E', 'x', 'i', 'f', '\0', '\0'};
-static const int32_t kTiffHeaderOffset = htonl(sizeof(kExifHeader));
+static const uint8_t kExifApp1Marker[] = {'E', 'x', 'i', 'f', 0xff, 0xe1};
static const uint8_t kMandatoryHevcNalUnitTypes[3] = {
kHevcNalUnitTypeVps,
@@ -125,7 +125,7 @@
bool isAudio() const { return mIsAudio; }
bool isMPEG4() const { return mIsMPEG4; }
bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
- bool isExifData(const MediaBufferBase *buffer) const;
+ bool isExifData(MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const;
void addChunkOffset(off64_t offset);
void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
void flushItemRefs();
@@ -364,7 +364,7 @@
Vector<uint16_t> mProperties;
ItemRefs mDimgRefs;
- ItemRefs mCdscRefs;
+ Vector<uint16_t> mExifList;
uint16_t mImageItemId;
int32_t mIsPrimary;
int32_t mWidth, mHeight;
@@ -1368,14 +1368,16 @@
}
off64_t MPEG4Writer::addSample_l(
- MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten) {
+ MediaBuffer *buffer, bool usePrefix,
+ uint32_t tiffHdrOffset, size_t *bytesWritten) {
off64_t old_offset = mOffset;
if (usePrefix) {
addMultipleLengthPrefixedSamples_l(buffer);
} else {
- if (isExif) {
- ::write(mFd, &kTiffHeaderOffset, 4); // exif_tiff_header_offset field
+ if (tiffHdrOffset > 0) {
+ tiffHdrOffset = htonl(tiffHdrOffset);
+ ::write(mFd, &tiffHdrOffset, 4); // exif_tiff_header_offset field
mOffset += 4;
}
@@ -1803,7 +1805,6 @@
mStartTimestampUs(-1),
mRotation(0),
mDimgRefs("dimg"),
- mCdscRefs("cdsc"),
mImageItemId(0),
mIsPrimary(0),
mWidth(0),
@@ -1984,11 +1985,34 @@
return OK;
}
-bool MPEG4Writer::Track::isExifData(const MediaBufferBase *buffer) const {
- return mIsHeic
- && (buffer->range_length() > sizeof(kExifHeader))
- && !memcmp((uint8_t *)buffer->data() + buffer->range_offset(),
- kExifHeader, sizeof(kExifHeader));
+bool MPEG4Writer::Track::isExifData(
+ MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const {
+ if (!mIsHeic) {
+ return false;
+ }
+
+ // Exif block starting with 'Exif\0\0'
+ size_t length = buffer->range_length();
+ uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
+ if ((length > sizeof(kExifHeader))
+ && !memcmp(data, kExifHeader, sizeof(kExifHeader))) {
+ *tiffHdrOffset = sizeof(kExifHeader);
+ return true;
+ }
+
+ // Exif block starting with fourcc 'Exif' followed by APP1 marker
+ if ((length > sizeof(kExifApp1Marker) + 2 + sizeof(kExifHeader))
+ && !memcmp(data, kExifApp1Marker, sizeof(kExifApp1Marker))
+ && !memcmp(data + sizeof(kExifApp1Marker) + 2, kExifHeader, sizeof(kExifHeader))) {
+ // skip 'Exif' fourcc
+ buffer->set_range(4, buffer->range_length() - 4);
+
+ // 2-byte APP1 + 2-byte size followed by kExifHeader
+ *tiffHdrOffset = 2 + 2 + sizeof(kExifHeader);
+ return true;
+ }
+
+ return false;
}
void MPEG4Writer::Track::addChunkOffset(off64_t offset) {
@@ -2014,7 +2038,7 @@
}
if (isExif) {
- mCdscRefs.value.push_back(mOwner->addItem_l({
+ mExifList.push_back(mOwner->addItem_l({
.itemType = "Exif",
.isPrimary = false,
.isHidden = false,
@@ -2117,7 +2141,16 @@
if (mImageItemId > 0) {
mOwner->addRefs_l(mImageItemId, mDimgRefs);
- mOwner->addRefs_l(mImageItemId, mCdscRefs);
+
+ if (!mExifList.empty()) {
+ // The "cdsc" ref is from the metadata/exif item to the image item.
+ // So the refs all contain the image item.
+ ItemRefs cdscRefs("cdsc");
+ cdscRefs.value.push_back(mImageItemId);
+ for (uint16_t exifItem : mExifList) {
+ mOwner->addRefs_l(exifItem, cdscRefs);
+ }
+ }
}
}
@@ -2269,14 +2302,16 @@
while (!chunk->mSamples.empty()) {
List<MediaBuffer *>::iterator it = chunk->mSamples.begin();
- int32_t isExif;
- if (!(*it)->meta_data().findInt32(kKeyIsExif, &isExif)) {
- isExif = 0;
+ uint32_t tiffHdrOffset;
+ if (!(*it)->meta_data().findInt32(
+ kKeyExifTiffOffset, (int32_t*)&tiffHdrOffset)) {
+ tiffHdrOffset = 0;
}
+ bool isExif = (tiffHdrOffset > 0);
bool usePrefix = chunk->mTrack->usePrefix() && !isExif;
size_t bytesWritten;
- off64_t offset = addSample_l(*it, usePrefix, isExif, &bytesWritten);
+ off64_t offset = addSample_l(*it, usePrefix, tiffHdrOffset, &bytesWritten);
if (chunk->mTrack->isHeic()) {
chunk->mTrack->addItemOffsetAndSize(offset, bytesWritten, isExif);
@@ -3002,10 +3037,11 @@
}
bool isExif = false;
+ uint32_t tiffHdrOffset = 0;
int32_t isMuxerData;
if (buffer->meta_data().findInt32(kKeyIsMuxerData, &isMuxerData) && isMuxerData) {
// We only support one type of muxer data, which is Exif data block.
- isExif = isExifData(buffer);
+ isExif = isExifData(buffer, &tiffHdrOffset);
if (!isExif) {
ALOGW("Ignoring bad Exif data block");
buffer->release();
@@ -3027,7 +3063,7 @@
buffer = NULL;
if (isExif) {
- copy->meta_data().setInt32(kKeyIsExif, 1);
+ copy->meta_data().setInt32(kKeyExifTiffOffset, tiffHdrOffset);
}
bool usePrefix = this->usePrefix() && !isExif;
@@ -3300,7 +3336,8 @@
}
if (!hasMultipleTracks) {
size_t bytesWritten;
- off64_t offset = mOwner->addSample_l(copy, usePrefix, isExif, &bytesWritten);
+ off64_t offset = mOwner->addSample_l(
+ copy, usePrefix, tiffHdrOffset, &bytesWritten);
if (mIsHeic) {
addItemOffsetAndSize(offset, bytesWritten, isExif);
@@ -3558,7 +3595,7 @@
}
int64_t MPEG4Writer::Track::getDurationUs() const {
- return mTrackDurationUs + getStartTimeOffsetTimeUs();
+ return mTrackDurationUs + getStartTimeOffsetTimeUs() + mOwner->getStartTimeOffsetBFramesUs();
}
int64_t MPEG4Writer::Track::getEstimatedTrackSizeBytes() const {
@@ -4022,7 +4059,7 @@
// Prepone video playback.
if (mMinCttsOffsetTicks != mMaxCttsOffsetTicks) {
int32_t mvhdTimeScale = mOwner->getTimeScale();
- uint32_t tkhdDuration = (mTrackDurationUs * mvhdTimeScale + 5E5) / 1E6;
+ uint32_t tkhdDuration = (getDurationUs() * mvhdTimeScale + 5E5) / 1E6;
int64_t mediaTime = ((kMaxCttsOffsetTimeUs - getMinCttsOffsetTimeUs())
* mTimeScale + 5E5) / 1E6;
if (tkhdDuration > 0 && mediaTime > 0) {
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c7da7c7..9c58e05 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -550,7 +550,7 @@
void MediaCodec::initAnalyticsItem() {
if (mAnalyticsItem == NULL) {
- mAnalyticsItem = new MediaAnalyticsItem(kCodecKeyName);
+ mAnalyticsItem = MediaAnalyticsItem::create(kCodecKeyName);
}
mLatencyHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 93478e9..3d58d4b 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -77,7 +77,8 @@
return profilingNeeded;
}
-OmxInfoBuilder sOmxInfoBuilder;
+OmxInfoBuilder sOmxInfoBuilder{true /* allowSurfaceEncoders */};
+OmxInfoBuilder sOmxNoSurfaceEncoderInfoBuilder{false /* allowSurfaceEncoders */};
Mutex sCodec2InfoBuilderMutex;
std::unique_ptr<MediaCodecListBuilderBase> sCodec2InfoBuilder;
@@ -98,7 +99,11 @@
sp<PersistentSurface> surfaceTest =
StagefrightPluginLoader::GetCCodecInstance()->createInputSurface();
if (surfaceTest == nullptr) {
+ ALOGD("Allowing all OMX codecs");
builders.push_back(&sOmxInfoBuilder);
+ } else {
+ ALOGD("Allowing only non-surface-encoder OMX codecs");
+ builders.push_back(&sOmxNoSurfaceEncoderInfoBuilder);
}
builders.push_back(GetCodec2InfoBuilder());
return builders;
@@ -219,6 +224,21 @@
return info1 == nullptr
|| (info2 != nullptr && info1->getRank() < info2->getRank());
});
+
+ // remove duplicate entries
+ bool dedupe = property_get_bool("debug.stagefright.dedupe-codecs", true);
+ if (dedupe) {
+ std::set<std::string> codecsSeen;
+ for (auto it = mCodecInfos.begin(); it != mCodecInfos.end(); ) {
+ std::string codecName = (*it)->getCodecName();
+ if (codecsSeen.count(codecName) == 0) {
+ codecsSeen.emplace(codecName);
+ it++;
+ } else {
+ it = mCodecInfos.erase(it);
+ }
+ }
+ }
}
MediaCodecList::~MediaCodecList() {
@@ -268,10 +288,17 @@
}
ssize_t MediaCodecList::findCodecByName(const char *name) const {
+ Vector<AString> aliases;
for (size_t i = 0; i < mCodecInfos.size(); ++i) {
if (strcmp(mCodecInfos[i]->getCodecName(), name) == 0) {
return i;
}
+ mCodecInfos[i]->getAliases(&aliases);
+ for (const AString &alias : aliases) {
+ if (alias == name) {
+ return i;
+ }
+ }
}
return -ENOENT;
diff --git a/media/libstagefright/MediaCodecListWriter.cpp b/media/libstagefright/MediaCodecListWriter.cpp
index b32e470..c4fb199 100644
--- a/media/libstagefright/MediaCodecListWriter.cpp
+++ b/media/libstagefright/MediaCodecListWriter.cpp
@@ -37,6 +37,16 @@
new MediaCodecInfoWriter(info.get()));
}
+std::unique_ptr<MediaCodecInfoWriter>
+ MediaCodecListWriter::findMediaCodecInfo(const char *name) {
+ for (const sp<MediaCodecInfo> &info : mCodecInfos) {
+ if (!strcmp(info->getCodecName(), name)) {
+ return std::unique_ptr<MediaCodecInfoWriter>(new MediaCodecInfoWriter(info.get()));
+ }
+ }
+ return nullptr;
+}
+
void MediaCodecListWriter::writeGlobalSettings(
const sp<AMessage> &globalSettings) const {
for (const std::pair<std::string, std::string> &kv : mGlobalSettings) {
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 0f75822..c3d85ee 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaErrors.h>
@@ -362,7 +363,7 @@
status_t MediaCodecSource::setInputBufferTimeOffset(int64_t timeOffsetUs) {
sp<AMessage> msg = new AMessage(kWhatSetInputBufferTimeOffset, mReflector);
- msg->setInt64("time-offset-us", timeOffsetUs);
+ msg->setInt64(PARAMETER_KEY_OFFSET_TIME, timeOffsetUs);
return postSynchronouslyAndReturnError(msg);
}
@@ -490,7 +491,7 @@
mCodecLooper->start();
if (mFlags & FLAG_USE_SURFACE_INPUT) {
- mOutputFormat->setInt32("create-input-buffers-suspended", 1);
+ mOutputFormat->setInt32(KEY_CREATE_INPUT_SURFACE_SUSPENDED, 1);
}
AString outputMIME;
@@ -643,6 +644,10 @@
output->mBufferQueue.clear();
output->mEncoderReachedEOS = true;
output->mErrorCode = err;
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ mStopping = true;
+ mPuller->stop();
+ }
output->mCond.signal();
reachedEOS = true;
@@ -673,9 +678,9 @@
CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
if (mEncoder != NULL) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", false);
+ params->setInt32(PARAMETER_KEY_SUSPEND, false);
if (resumeStartTimeUs > 0) {
- params->setInt64("drop-start-time-us", resumeStartTimeUs);
+ params->setInt64(PARAMETER_KEY_SUSPEND_TIME, resumeStartTimeUs);
}
mEncoder->setParameters(params);
}
@@ -761,8 +766,8 @@
}
status_t MediaCodecSource::onStart(MetaData *params) {
- if (mStopping) {
- ALOGE("Failed to start while we're stopping");
+ if (mStopping || mOutput.lock()->mEncoderReachedEOS) {
+ ALOGE("Failed to start while we're stopping or encoder already stopped due to EOS error");
return INVALID_OPERATION;
}
int64_t startTimeUs;
@@ -795,7 +800,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
if (mEncoder != NULL) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", false);
+ params->setInt32(PARAMETER_KEY_SUSPEND, false);
if (startTimeUs >= 0) {
params->setInt64("skip-frames-before", startTimeUs);
}
@@ -828,8 +833,8 @@
void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", true);
- params->setInt64("drop-start-time-us", pauseStartTimeUs);
+ params->setInt32(PARAMETER_KEY_SUSPEND, true);
+ params->setInt64(PARAMETER_KEY_SUSPEND_TIME, pauseStartTimeUs);
mEncoder->setParameters(params);
} else {
CHECK(mPuller != NULL);
@@ -1092,12 +1097,12 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
status_t err = OK;
- CHECK(msg->findInt64("time-offset-us", &mInputBufferTimeOffsetUs));
+ CHECK(msg->findInt64(PARAMETER_KEY_OFFSET_TIME, &mInputBufferTimeOffsetUs));
// Propagate the timestamp offset to GraphicBufferSource.
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
- params->setInt64("time-offset-us", mInputBufferTimeOffsetUs);
+ params->setInt64(PARAMETER_KEY_OFFSET_TIME, mInputBufferTimeOffsetUs);
err = mEncoder->setParameters(params);
}
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index 86402ce..a309ee4 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -23,38 +23,19 @@
#include <binder/PermissionCache.h>
#include <binder/IServiceManager.h>
#include <media/DataSource.h>
-#include <media/MediaAnalyticsItem.h>
-#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaExtractorFactory.h>
#include <media/IMediaExtractor.h>
#include <media/IMediaExtractorService.h>
+#include <nativeloader/dlext_namespaces.h>
#include <private/android_filesystem_config.h>
#include <cutils/properties.h>
#include <utils/String8.h>
-#include <ziparchive/zip_archive.h>
#include <dirent.h>
#include <dlfcn.h>
-// Copied from GraphicsEnv.cpp
-// TODO(b/37049319) Get this from a header once one exists
-extern "C" {
- android_namespace_t* android_create_namespace(const char* name,
- const char* ld_library_path,
- const char* default_library_path,
- uint64_t type,
- const char* permitted_when_isolated_path,
- android_namespace_t* parent);
- bool android_link_namespaces(android_namespace_t* from,
- android_namespace_t* to,
- const char* shared_libs_sonames);
- enum {
- ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
- };
-}
-
namespace android {
// static
@@ -89,7 +70,7 @@
ALOGV("MediaExtractorFactory::CreateFromService %s", mime);
- UpdateExtractors(nullptr);
+ UpdateExtractors();
// initialize source decryption if needed
source->DrmInitialization(nullptr /* mime */);
@@ -122,13 +103,6 @@
return CreateIMediaExtractorFromMediaExtractor(ex, source, plugin);
}
-//static
-void MediaExtractorFactory::LoadPlugins(const ::std::string& apkPath) {
- // TODO: Verify apk path with package manager in extractor process.
- ALOGV("Load plugins from: %s", apkPath.c_str());
- UpdateExtractors(apkPath.empty() ? nullptr : apkPath.c_str());
-}
-
struct ExtractorPlugin : public RefBase {
ExtractorDef def;
void *libHandle;
@@ -153,13 +127,6 @@
std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
bool MediaExtractorFactory::gPluginsRegistered = false;
bool MediaExtractorFactory::gIgnoreVersion = false;
-std::string MediaExtractorFactory::gLinkedLibraries;
-
-// static
-void MediaExtractorFactory::SetLinkedLibraries(const std::string& linkedLibraries) {
- Mutex::Autolock autoLock(gPluginMutex);
- gLinkedLibraries = linkedLibraries;
-}
// static
void *MediaExtractorFactory::sniff(
@@ -258,108 +225,10 @@
}
//static
-void MediaExtractorFactory::RegisterExtractorsInApk(
- const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList) {
- ALOGV("search for plugins at %s", apkPath);
- ZipArchiveHandle zipHandle;
- int32_t ret = OpenArchive(apkPath, &zipHandle);
- if (ret == 0) {
- char abi[PROPERTY_VALUE_MAX];
- property_get("ro.product.cpu.abi", abi, "arm64-v8a");
- String8 prefix8 = String8::format("lib/%s/", abi);
- ZipString prefix(prefix8.c_str());
- ZipString suffix("extractor.so");
- void* cookie;
- ret = StartIteration(zipHandle, &cookie, &prefix, &suffix);
- if (ret == 0) {
- ZipEntry entry;
- ZipString name;
- while (Next(cookie, &entry, &name) == 0) {
- String8 libPath = String8(apkPath) + "!/" +
- String8(reinterpret_cast<const char*>(name.name), name.name_length);
- // TODO: Open with a linker namespace so that it can be linked with sub-libraries
- // within the apk instead of system libraries already loaded.
- void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
- if (libHandle) {
- GetExtractorDef getDef =
- (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
- if (getDef) {
- ALOGV("registering sniffer for %s", libPath.string());
- RegisterExtractor(
- new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
- } else {
- ALOGW("%s does not contain sniffer", libPath.string());
- dlclose(libHandle);
- }
- } else {
- ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
- }
- }
- EndIteration(cookie);
- } else {
- ALOGW("couldn't find plugins from %s, %d", apkPath, ret);
- }
- CloseArchive(zipHandle);
- } else {
- ALOGW("couldn't open(%s) %d", apkPath, ret);
- }
-}
-
-//static
-void MediaExtractorFactory::RegisterExtractorsInSystem(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
+void MediaExtractorFactory::RegisterExtractors(
+ const char *libDirPath, const android_dlextinfo* dlextinfo,
+ std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", libDirPath);
- DIR *libDir = opendir(libDirPath);
- if (libDir) {
- struct dirent* libEntry;
- while ((libEntry = readdir(libDir))) {
- if (libEntry->d_name[0] == '.') {
- continue;
- }
- String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
- void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
- if (libHandle) {
- GetExtractorDef getDef =
- (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
- if (getDef) {
- ALOGV("registering sniffer for %s", libPath.string());
- RegisterExtractor(
- new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
- } else {
- ALOGW("%s does not contain sniffer", libPath.string());
- dlclose(libHandle);
- }
- } else {
- ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
- }
- }
-
- closedir(libDir);
- } else {
- ALOGE("couldn't opendir(%s)", libDirPath);
- }
-}
-
-//static
-void MediaExtractorFactory::RegisterExtractorsInApex(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
- ALOGV("search for plugins at %s", libDirPath);
- ALOGV("linked libs %s", gLinkedLibraries.c_str());
-
- android_namespace_t *extractorNs = android_create_namespace("extractor",
- nullptr, // ld_library_path
- libDirPath,
- ANDROID_NAMESPACE_TYPE_ISOLATED,
- nullptr, // permitted_when_isolated_path
- nullptr); // parent
- if (!android_link_namespaces(extractorNs, nullptr, gLinkedLibraries.c_str())) {
- ALOGE("Failed to link namespace. Failed to load extractor plug-ins in apex.");
- return;
- }
- const android_dlextinfo dlextinfo = {
- .flags = ANDROID_DLEXT_USE_NAMESPACE,
- .library_namespace = extractorNs,
- };
DIR *libDir = opendir(libDirPath);
if (libDir) {
@@ -374,7 +243,7 @@
}
void *libHandle = android_dlopen_ext(
libPath.string(),
- RTLD_NOW | RTLD_LOCAL, &dlextinfo);
+ RTLD_NOW | RTLD_LOCAL, dlextinfo);
if (libHandle) {
GetExtractorDef getDef =
(GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
@@ -403,11 +272,9 @@
static std::unordered_set<std::string> gSupportedExtensions;
// static
-void MediaExtractorFactory::UpdateExtractors(const char *newUpdateApkPath) {
+void MediaExtractorFactory::UpdateExtractors() {
Mutex::Autolock autoLock(gPluginMutex);
- if (newUpdateApkPath != nullptr) {
- gPluginsRegistered = false;
- }
+
if (gPluginsRegistered) {
return;
}
@@ -416,22 +283,28 @@
std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
- RegisterExtractorsInApex("/apex/com.android.media/lib"
+ android_namespace_t *mediaNs = android_get_exported_namespace("media");
+ if (mediaNs != NULL) {
+ const android_dlextinfo dlextinfo = {
+ .flags = ANDROID_DLEXT_USE_NAMESPACE,
+ .library_namespace = mediaNs,
+ };
+ RegisterExtractors("/apex/com.android.media/lib"
#ifdef __LP64__
- "64"
+ "64"
#endif
- , *newList);
+ "/extractors", &dlextinfo, *newList);
- RegisterExtractorsInSystem("/system/lib"
-#ifdef __LP64__
- "64"
-#endif
- "/extractors", *newList);
-
- if (newUpdateApkPath != nullptr) {
- RegisterExtractorsInApk(newUpdateApkPath, *newList);
+ } else {
+ ALOGE("couldn't find media namespace.");
}
+ RegisterExtractors("/system/lib"
+#ifdef __LP64__
+ "64"
+#endif
+ "/extractors", NULL, *newList);
+
newList->sort(compareFunc);
gPlugins = newList;
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index ad55c56..5c13983 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -30,7 +30,7 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/OggWriter.h>
#include <media/stagefright/foundation/ADebug.h>
-#include "OpusHeader.h"
+#include <media/stagefright/foundation/OpusHeader.h>
extern "C" {
#include <ogg/ogg.h>
@@ -114,30 +114,17 @@
}
mSampleRate = sampleRate;
-
- OpusHeader header;
- header.channels = nChannels;
- header.num_streams = nChannels;
- header.num_coupled = 0;
- header.channel_mapping = ((nChannels > 8) ? 255 : (nChannels > 2));
- header.gain_db = 0;
- header.skip_samples = 0;
-
- // headers are 21-bytes + something driven by channel count
- // expect numbers in the low 30's here. WriteOpusHeader() will tell us
- // if things are bad.
- unsigned char header_data[100];
- ogg_packet op;
- ogg_page og;
-
- const int packet_size = WriteOpusHeader(header, mSampleRate, (uint8_t*)header_data,
- sizeof(header_data));
-
- if (packet_size < 0) {
- ALOGE("opus header writing failed");
+ uint32_t type;
+ const void *header_data;
+ size_t packet_size;
+ if (!source->getFormat()->findData(kKeyOpusHeader, &type, &header_data, &packet_size)) {
+ ALOGE("opus header not found");
return UNKNOWN_ERROR;
}
- op.packet = header_data;
+
+ ogg_packet op;
+ ogg_page og;
+ op.packet = (unsigned char *)header_data;
op.bytes = packet_size;
op.b_o_s = 1;
op.e_o_s = 0;
diff --git a/media/libstagefright/OmxInfoBuilder.cpp b/media/libstagefright/OmxInfoBuilder.cpp
index 382c947..8910463 100644
--- a/media/libstagefright/OmxInfoBuilder.cpp
+++ b/media/libstagefright/OmxInfoBuilder.cpp
@@ -21,8 +21,8 @@
#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
#endif
+#include <android-base/properties.h>
#include <utils/Log.h>
-#include <cutils/properties.h>
#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/OmxInfoBuilder.h>
@@ -53,7 +53,7 @@
namespace /* unnamed */ {
bool hasPrefix(const hidl_string& s, const char* prefix) {
- return strncmp(s.c_str(), prefix, strlen(prefix)) == 0;
+ return strncasecmp(s.c_str(), prefix, strlen(prefix)) == 0;
}
status_t queryCapabilities(
@@ -87,7 +87,8 @@
} // unnamed namespace
-OmxInfoBuilder::OmxInfoBuilder() {
+OmxInfoBuilder::OmxInfoBuilder(bool allowSurfaceEncoders)
+ : mAllowSurfaceEncoders(allowSurfaceEncoders) {
}
status_t OmxInfoBuilder::buildMediaCodecList(MediaCodecListWriter* writer) {
@@ -135,81 +136,80 @@
// Convert roles to lists of codecs
// codec name -> index into swCodecs/hwCodecs
- std::map<hidl_string, std::unique_ptr<MediaCodecInfoWriter>>
- swCodecName2Info, hwCodecName2Info;
+ std::map<hidl_string, std::unique_ptr<MediaCodecInfoWriter>> codecName2Info;
- char rank[PROPERTY_VALUE_MAX];
- uint32_t defaultRank = 0x100;
- if (property_get("debug.stagefright.omx_default_rank", rank, nullptr)) {
- defaultRank = std::strtoul(rank, nullptr, 10);
- }
+ uint32_t defaultRank =
+ ::android::base::GetUintProperty("debug.stagefright.omx_default_rank", 0x100u);
+ uint32_t defaultSwAudioRank =
+ ::android::base::GetUintProperty("debug.stagefright.omx_default_rank.sw-audio", 0x10u);
+ uint32_t defaultSwOtherRank =
+ ::android::base::GetUintProperty("debug.stagefright.omx_default_rank.sw-other", 0x210u);
+
for (const IOmxStore::RoleInfo& role : roles) {
const hidl_string& typeName = role.type;
bool isEncoder = role.isEncoder;
- bool preferPlatformNodes = role.preferPlatformNodes;
- // If preferPlatformNodes is true, hardware nodes must be added after
- // platform (software) nodes. hwCodecs is used to hold hardware nodes
- // that need to be added after software nodes for the same role.
- std::vector<const IOmxStore::NodeInfo*> hwCodecs;
- for (const IOmxStore::NodeInfo& node : role.nodes) {
+ bool isAudio = hasPrefix(role.type, "audio/");
+ bool isVideoOrImage = hasPrefix(role.type, "video/") || hasPrefix(role.type, "image/");
+
+ for (const IOmxStore::NodeInfo &node : role.nodes) {
const hidl_string& nodeName = node.name;
+
+ // currently image and video encoders use surface input
+ if (!mAllowSurfaceEncoders && isVideoOrImage && isEncoder) {
+ ALOGD("disabling %s for media type %s because we are not using OMX input surface",
+ nodeName.c_str(), role.type.c_str());
+ continue;
+ }
+
bool isSoftware = hasPrefix(nodeName, "OMX.google");
- MediaCodecInfoWriter* info;
- if (isSoftware) {
- auto c2i = swCodecName2Info.find(nodeName);
- if (c2i == swCodecName2Info.end()) {
- // Create a new MediaCodecInfo for a new node.
- c2i = swCodecName2Info.insert(std::make_pair(
- nodeName, writer->addMediaCodecInfo())).first;
- info = c2i->second.get();
- info->setName(nodeName.c_str());
- info->setOwner(node.owner.c_str());
- info->setAttributes(
- // all OMX codecs are vendor codecs (in the vendor partition), but
- // treat OMX.google codecs as non-hardware-accelerated and non-vendor
- (isEncoder ? MediaCodecInfo::kFlagIsEncoder : 0));
- info->setRank(defaultRank);
- } else {
- // The node has been seen before. Simply retrieve the
- // existing MediaCodecInfoWriter.
- info = c2i->second.get();
- }
- } else {
- auto c2i = hwCodecName2Info.find(nodeName);
- if (c2i == hwCodecName2Info.end()) {
- // Create a new MediaCodecInfo for a new node.
- if (!preferPlatformNodes) {
- c2i = hwCodecName2Info.insert(std::make_pair(
- nodeName, writer->addMediaCodecInfo())).first;
- info = c2i->second.get();
- info->setName(nodeName.c_str());
- info->setOwner(node.owner.c_str());
- typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
- MediaCodecInfo::kFlagIsVendor;
- if (isEncoder) {
- attrs |= MediaCodecInfo::kFlagIsEncoder;
- }
- if (std::count_if(
- node.attributes.begin(), node.attributes.end(),
- [](const IOmxStore::Attribute &i) -> bool {
- return i.key == "attribute::software-codec";
- })) {
- attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
- }
- info->setAttributes(attrs);
- info->setRank(defaultRank);
- } else {
- // If preferPlatformNodes is true, this node must be
- // added after all software nodes.
- hwCodecs.push_back(&node);
- continue;
+ uint32_t rank = isSoftware
+ ? (isAudio ? defaultSwAudioRank : defaultSwOtherRank)
+ : defaultRank;
+ // get rank from IOmxStore via attribute
+ for (const IOmxStore::Attribute& attribute : node.attributes) {
+ if (attribute.key == "rank") {
+ uint32_t oldRank = rank;
+ char dummy;
+ if (sscanf(attribute.value.c_str(), "%u%c", &rank, &dummy) != 1) {
+ rank = oldRank;
}
- } else {
- // The node has been seen before. Simply retrieve the
- // existing MediaCodecInfoWriter.
- info = c2i->second.get();
+ break;
}
}
+
+ MediaCodecInfoWriter* info;
+ auto c2i = codecName2Info.find(nodeName);
+ if (c2i == codecName2Info.end()) {
+ // Create a new MediaCodecInfo for a new node.
+ c2i = codecName2Info.insert(std::make_pair(
+ nodeName, writer->addMediaCodecInfo())).first;
+ info = c2i->second.get();
+ info->setName(nodeName.c_str());
+ info->setOwner(node.owner.c_str());
+ info->setRank(rank);
+
+ typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs = 0;
+ // all OMX codecs are vendor codecs (in the vendor partition), but
+ // treat OMX.google codecs as non-hardware-accelerated and non-vendor
+ if (!isSoftware) {
+ attrs |= MediaCodecInfo::kFlagIsVendor;
+ if (std::count_if(
+ node.attributes.begin(), node.attributes.end(),
+ [](const IOmxStore::Attribute &i) -> bool {
+ return i.key == "attribute::software-codec";
+ })) {
+ attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
+ }
+ }
+ if (isEncoder) {
+ attrs |= MediaCodecInfo::kFlagIsEncoder;
+ }
+ info->setAttributes(attrs);
+ } else {
+ // The node has been seen before. Simply retrieve the
+ // existing MediaCodecInfoWriter.
+ info = c2i->second.get();
+ }
std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
info->addMediaType(typeName.c_str());
if (queryCapabilities(
@@ -219,54 +219,8 @@
info->removeMediaType(typeName.c_str());
}
}
-
- // If preferPlatformNodes is true, hardware nodes will not have been
- // added in the loop above, but rather saved in hwCodecs. They are
- // going to be added here.
- if (preferPlatformNodes) {
- for (const IOmxStore::NodeInfo *node : hwCodecs) {
- MediaCodecInfoWriter* info;
- const hidl_string& nodeName = node->name;
- auto c2i = hwCodecName2Info.find(nodeName);
- if (c2i == hwCodecName2Info.end()) {
- // Create a new MediaCodecInfo for a new node.
- c2i = hwCodecName2Info.insert(std::make_pair(
- nodeName, writer->addMediaCodecInfo())).first;
- info = c2i->second.get();
- info->setName(nodeName.c_str());
- info->setOwner(node->owner.c_str());
- typename std::underlying_type<MediaCodecInfo::Attributes>::type attrs =
- MediaCodecInfo::kFlagIsVendor;
- if (isEncoder) {
- attrs |= MediaCodecInfo::kFlagIsEncoder;
- }
- if (std::count_if(
- node->attributes.begin(), node->attributes.end(),
- [](const IOmxStore::Attribute &i) -> bool {
- return i.key == "attribute::software-codec";
- })) {
- attrs |= MediaCodecInfo::kFlagIsHardwareAccelerated;
- }
- info->setRank(defaultRank);
- } else {
- // The node has been seen before. Simply retrieve the
- // existing MediaCodecInfoWriter.
- info = c2i->second.get();
- }
- std::unique_ptr<MediaCodecInfo::CapabilitiesWriter> caps =
- info->addMediaType(typeName.c_str());
- if (queryCapabilities(
- *node, typeName.c_str(), isEncoder, caps.get()) != OK) {
- ALOGW("Fail to add media type %s to codec %s "
- "after software codecs",
- typeName.c_str(), nodeName.c_str());
- info->removeMediaType(typeName.c_str());
- }
- }
- }
}
return OK;
}
} // namespace android
-
diff --git a/media/libstagefright/RemoteMediaExtractor.cpp b/media/libstagefright/RemoteMediaExtractor.cpp
index 9d2c42b..b0ce688 100644
--- a/media/libstagefright/RemoteMediaExtractor.cpp
+++ b/media/libstagefright/RemoteMediaExtractor.cpp
@@ -49,7 +49,7 @@
mAnalyticsItem = nullptr;
if (MEDIA_LOG) {
- mAnalyticsItem = new MediaAnalyticsItem(kKeyExtractor);
+ mAnalyticsItem = MediaAnalyticsItem::create(kKeyExtractor);
// track the container format (mpeg, aac, wvm, etc)
size_t ntracks = extractor->countTracks();
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index f34d54c..fa3d372 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -165,6 +165,9 @@
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (!meta) {
+ continue;
+ }
ALOGV("getting track %zu of %zu, meta=%s", i, n, meta->toString().c_str());
const char *mime;
@@ -186,6 +189,9 @@
}
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (!trackMeta) {
+ return NULL;
+ }
if (metaOnly) {
return FrameDecoder::getMetadataOnly(trackMeta, colorFormat, thumbnail);
@@ -280,6 +286,9 @@
size_t i;
for (i = 0; i < n; ++i) {
sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ if (!meta) {
+ continue;
+ }
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
@@ -296,6 +305,9 @@
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
i, MediaExtractor::kIncludeExtensiveMetaData);
+ if (!trackMeta) {
+ return UNKNOWN_ERROR;
+ }
if (metaOnly) {
if (outFrame != NULL) {
@@ -529,6 +541,9 @@
String8 timedTextLang;
for (size_t i = 0; i < numTracks; ++i) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
+ if (!trackMeta) {
+ continue;
+ }
int64_t durationUs;
if (trackMeta->findInt64(kKeyDuration, &durationUs)) {
@@ -667,8 +682,9 @@
!strcasecmp(fileMIME, "video/x-matroska")) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(0);
const char *trackMIME;
- CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
-
+ if (trackMeta != nullptr) {
+ CHECK(trackMeta->findCString(kKeyMIMEType, &trackMIME));
+ }
if (!strncasecmp("audio/", trackMIME, 6)) {
// The matroska file only contains a single audio track,
// rewrite its mime type.
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 49e485a..16b3319 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -37,6 +37,7 @@
#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ByteUtils.h>
+#include <media/stagefright/foundation/OpusHeader.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
#include <media/AudioSystem.h>
@@ -191,6 +192,9 @@
{ 50, OMX_VIDEO_AVCLevel5 },
{ 51, OMX_VIDEO_AVCLevel51 },
{ 52, OMX_VIDEO_AVCLevel52 },
+ { 60, OMX_VIDEO_AVCLevel6 },
+ { 61, OMX_VIDEO_AVCLevel61 },
+ { 62, OMX_VIDEO_AVCLevel62 },
};
const static ALookup<uint8_t, OMX_VIDEO_AVCPROFILETYPE> profiles {
{ 66, OMX_VIDEO_AVCProfileBaseline },
@@ -584,6 +588,7 @@
{ "genre", kKeyGenre },
{ "location", kKeyLocation },
{ "lyricist", kKeyWriter },
+ { "manufacturer", kKeyManufacturer },
{ "title", kKeyTitle },
{ "year", kKeyYear },
}
@@ -1183,6 +1188,16 @@
}
parseHevcProfileLevelFromHvcc((const uint8_t *)data, dataSize, msg);
+ } else if (meta->findData(kKeyAV1C, &type, &data, &size)) {
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-0", buffer);
} else if (meta->findData(kKeyESDS, &type, &data, &size)) {
ESDS esds((const char *)data, size);
if (esds.InitCheck() != (status_t)OK) {
@@ -1689,6 +1704,11 @@
meta->setInt32(kKeyIsADTS, isADTS);
}
+ int32_t aacProfile = -1;
+ if (msg->findInt32("aac-profile", &aacProfile)) {
+ meta->setInt32(kKeyAACAOT, aacProfile);
+ }
+
int32_t pcmEncoding;
if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
meta->setInt32(kKeyPcmEncoding, pcmEncoding);
@@ -1742,15 +1762,39 @@
std::vector<uint8_t> hvcc(csd0size + 1024);
size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
+ } else if (mime == MEDIA_MIMETYPE_VIDEO_AV1) {
+ meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
} else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
} else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
- meta->setData(kKeyOpusHeader, 0, csd0->data(), csd0->size());
+ size_t opusHeadSize = csd0->size();
+ size_t codecDelayBufSize = 0;
+ size_t seekPreRollBufSize = 0;
+ void *opusHeadBuf = csd0->data();
+ void *codecDelayBuf = NULL;
+ void *seekPreRollBuf = NULL;
if (msg->findBuffer("csd-1", &csd1)) {
- meta->setData(kKeyOpusCodecDelay, 0, csd1->data(), csd1->size());
+ codecDelayBufSize = csd1->size();
+ codecDelayBuf = csd1->data();
}
if (msg->findBuffer("csd-2", &csd2)) {
- meta->setData(kKeyOpusSeekPreRoll, 0, csd2->data(), csd2->size());
+ seekPreRollBufSize = csd2->size();
+ seekPreRollBuf = csd2->data();
+ }
+ /* Extract codec delay and seek pre roll from csd-0,
+ * if csd-1 and csd-2 are not present */
+ if (!codecDelayBuf && !seekPreRollBuf) {
+ GetOpusHeaderBuffers(csd0->data(), csd0->size(), &opusHeadBuf,
+ &opusHeadSize, &codecDelayBuf,
+ &codecDelayBufSize, &seekPreRollBuf,
+ &seekPreRollBufSize);
+ }
+ meta->setData(kKeyOpusHeader, 0, opusHeadBuf, opusHeadSize);
+ if (codecDelayBuf) {
+ meta->setData(kKeyOpusCodecDelay, 0, codecDelayBuf, codecDelayBufSize);
+ }
+ if (seekPreRollBuf) {
+ meta->setData(kKeyOpusSeekPreRoll, 0, seekPreRollBuf, seekPreRollBufSize);
}
} else if (mime == MEDIA_MIMETYPE_AUDIO_VORBIS) {
meta->setData(kKeyVorbisInfo, 0, csd0->data(), csd0->size());
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index c14983a..877cb5a 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -23,6 +23,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
+#define MAX_CHANNEL_COUNT 6 /* maximum number of audio channels that can be decoded */
+
namespace android {
template<class T>
@@ -184,7 +186,7 @@
return OMX_ErrorUndefined;
}
- if (pcmParams->nChannels < 1 || pcmParams->nChannels > 2) {
+ if (pcmParams->nChannels < 1 || pcmParams->nChannels > MAX_CHANNEL_COUNT) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index 8a86a0d..da86758 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1178,6 +1178,8 @@
int i_target_loudness;
unsigned int i_sbr_mode;
int i;
+ int ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
#ifdef ENABLE_MPEG_D_DRC
{
@@ -1228,6 +1230,29 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+
+ if (pv_alloc_ptr == NULL) {
+ ALOGE("Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
diff --git a/media/libstagefright/data/media_codecs_google_c2_audio.xml b/media/libstagefright/data/media_codecs_google_c2_audio.xml
index 0b554a2..47a9715 100644
--- a/media/libstagefright/data/media_codecs_google_c2_audio.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_audio.xml
@@ -17,51 +17,61 @@
<Included>
<Decoders>
<MediaCodec name="c2.android.mp3.decoder" type="audio/mpeg">
+ <Alias name="OMX.google.mp3.decoder" />
<Limit name="channel-count" max="2" />
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-320000" />
</MediaCodec>
<MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
+ <Alias name="OMX.google.amrnb.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
</MediaCodec>
<MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
+ <Alias name="OMX.google.amrwb.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
</MediaCodec>
<MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
+ <Alias name="OMX.google.aac.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-960000" />
</MediaCodec>
<MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
+ <Alias name="OMX.google.g711.alaw.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
</MediaCodec>
<MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
+ <Alias name="OMX.google.g711.mlaw.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
</MediaCodec>
<MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
+ <Alias name="OMX.google.vorbis.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-96000" />
<Limit name="bitrate" range="32000-500000" />
</MediaCodec>
<MediaCodec name="c2.android.opus.decoder" type="audio/opus">
+ <Alias name="OMX.google.opus.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="48000" />
<Limit name="bitrate" range="6000-510000" />
</MediaCodec>
<MediaCodec name="c2.android.raw.decoder" type="audio/raw">
+ <Alias name="OMX.google.raw.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-96000" />
<Limit name="bitrate" range="1-10000000" />
</MediaCodec>
<MediaCodec name="c2.android.flac.decoder" type="audio/flac">
+ <Alias name="OMX.google.flac.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="1-655350" />
<Limit name="bitrate" range="1-21000000" />
@@ -69,29 +79,40 @@
</Decoders>
<Encoders>
<MediaCodec name="c2.android.aac.encoder" type="audio/mp4a-latm">
+ <Alias name="OMX.google.aac.encoder" />
<Limit name="channel-count" max="6" />
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<!-- also may support 64000, 88200 and 96000 Hz -->
<Limit name="bitrate" range="8000-960000" />
</MediaCodec>
<MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
+ <Alias name="OMX.google.amrnb.encoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
<Feature name="bitrate-modes" value="CBR" />
</MediaCodec>
<MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
+ <Alias name="OMX.google.amrwb.encoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
<Feature name="bitrate-modes" value="CBR" />
</MediaCodec>
<MediaCodec name="c2.android.flac.encoder" type="audio/flac">
+ <Alias name="OMX.google.flac.encoder" />
<Limit name="channel-count" max="2" />
<Limit name="sample-rate" ranges="1-655350" />
<Limit name="bitrate" range="1-21000000" />
<Limit name="complexity" range="0-8" default="5" />
<Feature name="bitrate-modes" value="CQ" />
</MediaCodec>
+ <MediaCodec name="c2.android.opus.encoder" type="audio/opus">
+ <Limit name="channel-count" max="2" />
+ <Limit name="sample-rate" ranges="8000,12000,16000,24000,48000" />
+ <Limit name="bitrate" range="500-512000" />
+ <Limit name="complexity" range="0-10" default="5" />
+ <Feature name="bitrate-modes" value="CQ" />
+ </MediaCodec>
</Encoders>
</Included>
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
similarity index 60%
copy from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
copy to media/libstagefright/data/media_codecs_google_c2_telephony.xml
index 5d9193b..950b092 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_telephony.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<?xml version="1.0" encoding="utf-8" ?>
<!-- Copyright (C) 2018 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,13 +13,14 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-<!--
- These are the minimum required criteria to be used by Audio HAL to ensure a basic
- user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
- <xi:include href="policy_criterion_types.xml"/>
- <xi:include href="policy_criteria.xml"/>
-
-</configuration>
+<Included>
+ <Decoders>
+ <MediaCodec name="c2.android.gsm.decoder" type="audio/gsm">
+ <Alias name="OMX.google.gsm.decoder" />
+ <Limit name="channel-count" max="1" />
+ <Limit name="sample-rate" ranges="8000" />
+ <Limit name="bitrate" range="13000" />
+ </MediaCodec>
+ </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_tv.xml b/media/libstagefright/data/media_codecs_google_c2_tv.xml
new file mode 100644
index 0000000..1b00dc9
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_c2_tv.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<Included>
+ <Decoders>
+ <MediaCodec name="c2.android.mpeg2.decoder" type="video/mpeg2">
+ <Alias name="OMX.google.mpeg2.decoder" />
+ <!-- profiles and levels: ProfileMain : LevelHL -->
+ <Limit name="size" min="16x16" max="1920x1088" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" range="1-244800" />
+ <Limit name="bitrate" range="1-20000000" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
+ </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index adb45b3..e20174f 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -17,6 +17,7 @@
<Included>
<Decoders>
<MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
+ <Alias name="OMX.google.mpeg4.decoder" />
<!-- profiles and levels: ProfileSimple : Level3 -->
<Limit name="size" min="2x2" max="352x288" />
<Limit name="alignment" value="2x2" />
@@ -26,6 +27,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
+ <Alias name="OMX.google.h263.decoder" />
<!-- profiles and levels: ProfileBaseline : Level30, ProfileBaseline : Level45
ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
<Limit name="size" min="2x2" max="352x288" />
@@ -34,6 +36,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.avc.decoder" type="video/avc">
+ <Alias name="OMX.google.h264.decoder" />
<!-- profiles and levels: ProfileHigh : Level52 -->
<Limit name="size" min="2x2" max="4080x4080" />
<Limit name="alignment" value="2x2" />
@@ -44,6 +47,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.hevc.decoder" type="video/hevc">
+ <Alias name="OMX.google.hevc.decoder" />
<!-- profiles and levels: ProfileMain : MainTierLevel51 -->
<Limit name="size" min="2x2" max="4096x4096" />
<Limit name="alignment" value="2x2" />
@@ -54,6 +58,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
+ <Alias name="OMX.google.vp8.decoder" />
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
@@ -63,6 +68,7 @@
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
+ <Alias name="OMX.google.vp9.decoder" />
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
@@ -71,16 +77,27 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
+ <MediaCodec name="c2.android.av1.decoder" type="video/av01">
+ <Limit name="size" min="96x96" max="1920x1080" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="16x16" />
+ <Limit name="blocks-per-second" min="24" max="2073600" />
+ <Limit name="bitrate" range="1-120000000" />
+ <Limit name="frame-rate" range="1-60" />
+ <Feature name="adaptive-playback" />
+ </MediaCodec>
</Decoders>
<Encoders>
<MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
+ <Alias name="OMX.google.h263.encoder" />
<!-- profiles and levels: ProfileBaseline : Level45 -->
<Limit name="size" min="176x144" max="176x144" />
<Limit name="alignment" value="16x16" />
<Limit name="bitrate" range="1-128000" />
</MediaCodec>
<MediaCodec name="c2.android.avc.encoder" type="video/avc">
+ <Alias name="OMX.google.h264.encoder" />
<!-- profiles and levels: ProfileBaseline : Level41 -->
<Limit name="size" min="16x16" max="2048x2048" />
<Limit name="alignment" value="2x2" />
@@ -90,7 +107,17 @@
<Limit name="bitrate" range="1-12000000" />
<Feature name="intra-refresh" />
</MediaCodec>
+ <MediaCodec name="c2.android.hevc.encoder" type="video/hevc">
+ <!-- profiles and levels: ProfileMain : MainTierLevel51 -->
+ <Limit name="size" min="320x128" max="512x512" />
+ <Limit name="alignment" value="2x2" />
+ <Limit name="block-size" value="8x8" />
+ <Limit name="block-count" range="1-4096" /> <!-- max 512x512 -->
+ <Limit name="blocks-per-second" range="1-122880" />
+ <Limit name="bitrate" range="1-10000000" />
+ </MediaCodec>
<MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
+ <Alias name="OMX.google.mpeg4.encoder" />
<!-- profiles and levels: ProfileCore : Level2 -->
<Limit name="size" min="16x16" max="176x144" />
<Limit name="alignment" value="16x16" />
@@ -99,6 +126,7 @@
<Limit name="bitrate" range="1-64000" />
</MediaCodec>
<MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8">
+ <Alias name="OMX.google.vp8.encoder" />
<!-- profiles and levels: ProfileMain : Level_Version0-3 -->
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
@@ -109,6 +137,7 @@
<Feature name="bitrate-modes" value="VBR,CBR" />
</MediaCodec>
<MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9">
+ <Alias name="OMX.google.vp9.encoder" />
<!-- profiles and levels: ProfileMain : Level_Version0-3 -->
<Limit name="size" min="2x2" max="2048x2048" />
<Limit name="alignment" value="2x2" />
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index dd1d904..533cd72 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -72,6 +72,7 @@
"MediaKeys.cpp",
"MetaData.cpp",
"MetaDataBase.cpp",
+ "OpusHeader.cpp",
"avc_utils.cpp",
"base64.cpp",
"hexdump.cpp",
diff --git a/media/libstagefright/opus/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
similarity index 69%
rename from media/libstagefright/opus/OpusHeader.cpp
rename to media/libstagefright/foundation/OpusHeader.cpp
index e4a460c..9faede1 100644
--- a/media/libstagefright/opus/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -16,7 +16,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "SoftOpus"
-
+#include <algorithm>
#include <cstring>
#include <stdint.h>
@@ -43,9 +43,6 @@
{0, 6, 1, 2, 3, 4, 5, 7},
};
-// Opus always has a 48kHz output rate. This is true for all Opus, not just this
-// implementation.
-constexpr int kRate = 48000;
// Size of the Opus header excluding optional mapping information.
constexpr size_t kOpusHeaderSize = 19;
// Offset to magic string that starts Opus header.
@@ -76,15 +73,12 @@
constexpr size_t kOpusHeaderNumCoupledStreamsOffset = 20;
// Offset to the stream to channel mapping in the Opus header.
constexpr size_t kOpusHeaderStreamMapOffset = 21;
-// Maximum packet size used in Xiph's opusdec.
-constexpr int kMaxOpusOutputPacketSizeSamples = 960 * 6;
// Default audio output channel layout. Used to initialize |stream_map| in
// OpusHeader, and passed to opus_multistream_decoder_create() when the header
// does not contain mapping information. The values are valid only for mono and
// stereo output: Opus streams with more than 2 channels require a stream map.
constexpr int kMaxChannelsWithDefaultLayout = 2;
-constexpr uint8_t kDefaultOpusChannelLayout[kMaxChannelsWithDefaultLayout] = {0, 1};
static uint16_t ReadLE16(const uint8_t* data, size_t data_size, uint32_t read_offset) {
// check whether the 2nd byte is within the buffer
@@ -182,4 +176,88 @@
}
}
+int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
+ uint8_t* output, size_t outputSize, uint64_t codecDelay,
+ uint64_t seekPreRoll) {
+ if (outputSize < AOPUS_UNIFIED_CSD_MINSIZE) {
+ ALOGD("Buffer not large enough to hold unified OPUS CSD");
+ return -1;
+ }
+
+ int headerLen = WriteOpusHeader(header, inputSampleRate, output,
+ outputSize);
+ if (headerLen < 0) {
+ ALOGD("WriteOpusHeader failed");
+ return -1;
+ }
+ if (headerLen >= (outputSize - 2 * AOPUS_TOTAL_CSD_SIZE)) {
+ ALOGD("Buffer not large enough to hold codec delay and seek pre roll");
+ return -1;
+ }
+
+ uint64_t length = AOPUS_LENGTH;
+
+ /*
+ Following is the CSD syntax for signalling codec delay and
+ seek pre-roll which is to be appended after OpusHeader
+
+ Marker (8 bytes) | Length (8 bytes) | Samples (8 bytes)
+
+ Markers supported:
+ AOPUSDLY - Signals Codec Delay
+ AOPUSPRL - Signals seek pre roll
+
+ Length should be 8.
+ */
+
+ // Add codec delay
+ memcpy(output + headerLen, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE);
+ headerLen += AOPUS_MARKER_SIZE;
+ memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
+ headerLen += AOPUS_LENGTH_SIZE;
+ memcpy(output + headerLen, &codecDelay, AOPUS_CSD_SIZE);
+ headerLen += AOPUS_CSD_SIZE;
+
+ // Add skip pre roll
+ memcpy(output + headerLen, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE);
+ headerLen += AOPUS_MARKER_SIZE;
+ memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
+ headerLen += AOPUS_LENGTH_SIZE;
+ memcpy(output + headerLen, &seekPreRoll, AOPUS_CSD_SIZE);
+ headerLen += AOPUS_CSD_SIZE;
+
+ return headerLen;
+}
+
+void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+ void **opusHeadBuf, size_t *opusHeadSize,
+ void **codecDelayBuf, size_t *codecDelaySize,
+ void **seekPreRollBuf, size_t *seekPreRollSize) {
+ *codecDelayBuf = NULL;
+ *codecDelaySize = 0;
+ *seekPreRollBuf = NULL;
+ *seekPreRollSize = 0;
+ *opusHeadBuf = (void *)data;
+ *opusHeadSize = data_size;
+ if (data_size >= AOPUS_UNIFIED_CSD_MINSIZE) {
+ size_t i = 0;
+ while (i < data_size - AOPUS_TOTAL_CSD_SIZE) {
+ uint8_t *csdBuf = (uint8_t *)data + i;
+ if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
+ *opusHeadSize = std::min(*opusHeadSize, i);
+ *codecDelayBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+ *codecDelaySize = AOPUS_CSD_SIZE;
+ i += AOPUS_TOTAL_CSD_SIZE;
+ } else if (!memcmp(csdBuf, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE)) {
+ *opusHeadSize = std::min(*opusHeadSize, i);
+ *seekPreRollBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+ *seekPreRollSize = AOPUS_CSD_SIZE;
+ i += AOPUS_TOTAL_CSD_SIZE;
+ } else {
+ i++;
+ }
+ }
+ }
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
new file mode 100644
index 0000000..9bffccb
--- /dev/null
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * The Opus specification is part of IETF RFC 6716:
+ * http://tools.ietf.org/html/rfc6716
+ */
+
+#ifndef OPUS_HEADER_H_
+#define OPUS_HEADER_H_
+
+namespace android {
+
+/* Constants used for delimiting Opus CSD */
+#define AOPUS_CSD_CODEC_DELAY_MARKER "AOPUSDLY"
+#define AOPUS_CSD_SEEK_PREROLL_MARKER "AOPUSPRL"
+#define AOPUS_CSD_SIZE 8
+#define AOPUS_LENGTH 8
+#define AOPUS_MARKER_SIZE 8
+#define AOPUS_LENGTH_SIZE 8
+#define AOPUS_TOTAL_CSD_SIZE \
+ ((AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_CSD_SIZE))
+#define AOPUS_CSD0_MINSIZE 19
+#define AOPUS_UNIFIED_CSD_MINSIZE \
+ ((AOPUS_CSD0_MINSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+
+/* CSD0 at max can be 22 bytes + max number of channels (255) */
+#define AOPUS_CSD0_MAXSIZE 277
+#define AOPUS_UNIFIED_CSD_MAXSIZE \
+ ((AOPUS_CSD0_MAXSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+
+struct OpusHeader {
+ int channels;
+ int channel_mapping;
+ int num_streams;
+ int num_coupled;
+ int16_t gain_db;
+ int skip_samples;
+ uint8_t stream_map[8];
+};
+
+bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
+int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
+void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+ void **opusHeadBuf, size_t *opusHeadSize,
+ void **codecDelayBuf, size_t *codecDelaySize,
+ void **seekPreRollBuf, size_t *seekPreRollSize);
+int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
+ uint8_t* output, size_t outputSize, uint64_t codecDelay,
+ uint64_t seekPreRoll);
+} // namespace android
+
+#endif // OPUS_HEADER_H_
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 2ecfa43..5e7f90a 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -1234,7 +1234,7 @@
const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
ssize_t index = mFetcherInfos.indexOfKey(uri);
if (index < 0) {
- ALOGE("did not find fetcher for uri: %s", uri.c_str());
+ ALOGE("did not find fetcher for uri: %s", uriDebugString(uri).c_str());
return false;
}
@@ -2005,7 +2005,7 @@
if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
ALOGW("swapping stream type %d %s to empty stream",
- stream, mStreams[idx].mUri.c_str());
+ stream, uriDebugString(mStreams[idx].mUri).c_str());
}
mStreams[idx].mUri = mStreams[idx].mNewUri;
mStreams[idx].mNewUri.clear();
@@ -2033,7 +2033,7 @@
CHECK(idx >= 0);
if (mStreams[idx].mNewUri.empty()) {
ALOGW("swapping extra stream type %d %s to empty stream",
- stream, mStreams[idx].mUri.c_str());
+ stream, uriDebugString(mStreams[idx].mUri).c_str());
}
mStreams[idx].mUri = mStreams[idx].mNewUri;
mStreams[idx].mNewUri.clear();
@@ -2138,7 +2138,7 @@
ALOGV("stopping newUri = %s", newUri.c_str());
ssize_t index = mFetcherInfos.indexOfKey(newUri);
if (index < 0) {
- ALOGE("did not find fetcher for newUri: %s", newUri.c_str());
+ ALOGE("did not find fetcher for newUri: %s", uriDebugString(newUri).c_str());
continue;
}
FetcherInfo &info = mFetcherInfos.editValueAt(index);
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 16179d3..b2361b8 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -706,6 +706,12 @@
++lineNo;
}
+ // playlist has no item, would cause exception
+ if (mItems.size() == 0) {
+ ALOGE("playlist has no item");
+ return ERROR_MALFORMED;
+ }
+
// error checking of all fields that's required to appear once
// (currently only checking "target-duration"), and
// initialization of playlist properties (eg. mTargetDurationUs)
@@ -1199,8 +1205,7 @@
if (val.size() < 2
|| val.c_str()[0] != '"'
|| val.c_str()[val.size() - 1] != '"') {
- ALOGE("Expected quoted string for URI, got '%s' instead.",
- val.c_str());
+ ALOGE("Expected quoted string for URI.");
return ERROR_MALFORMED;
}
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 562c625..d153598 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -365,10 +365,10 @@
if (err == ERROR_NOT_CONNECTED) {
return ERROR_NOT_CONNECTED;
} else if (err < 0) {
- ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
+ ALOGE("failed to fetch cipher key from '%s'.", uriDebugString(keyURI).c_str());
return ERROR_IO;
} else if (key->size() != 16) {
- ALOGE("key file '%s' wasn't 16 bytes in size.", keyURI.c_str());
+ ALOGE("key file '%s' wasn't 16 bytes in size.", uriDebugString(keyURI).c_str());
return ERROR_MALFORMED;
}
@@ -1366,7 +1366,7 @@
}
if (bytesRead < 0) {
status_t err = bytesRead;
- ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+ ALOGE("failed to fetch .ts segment at url '%s'", uriDebugString(uri).c_str());
notifyError(err);
return;
}
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 1abef8c..803155d 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -257,7 +257,9 @@
void initInternal(int fd, bool isFirstSession);
// Acquire lock before calling these methods
- off64_t addSample_l(MediaBuffer *buffer, bool usePrefix, bool isExif, size_t *bytesWritten);
+ off64_t addSample_l(
+ MediaBuffer *buffer, bool usePrefix,
+ uint32_t tiffHdrOffset, size_t *bytesWritten);
void addLengthPrefixedSample_l(MediaBuffer *buffer);
void addMultipleLengthPrefixedSamples_l(MediaBuffer *buffer);
uint16_t addProperty_l(const ItemProperty &);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index c06c288..2dca5c3 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -752,6 +752,7 @@
constexpr char KEY_COLOR_STANDARD[] = "color-standard";
constexpr char KEY_COLOR_TRANSFER[] = "color-transfer";
constexpr char KEY_COMPLEXITY[] = "complexity";
+constexpr char KEY_CREATE_INPUT_SURFACE_SUSPENDED[] = "create-input-buffers-suspended";
constexpr char KEY_DURATION[] = "durationUs";
constexpr char KEY_FEATURE_[] = "feature-";
constexpr char KEY_FLAC_COMPRESSION_LEVEL[] = "flac-compression-level";
@@ -772,8 +773,10 @@
constexpr char KEY_LATENCY[] = "latency";
constexpr char KEY_LEVEL[] = "level";
constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
+constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
constexpr char KEY_MAX_HEIGHT[] = "max-height";
constexpr char KEY_MAX_INPUT_SIZE[] = "max-input-size";
+constexpr char KEY_MAX_PTS_GAP_TO_ENCODER[] = "max-pts-gap-to-encoder";
constexpr char KEY_MAX_WIDTH[] = "max-width";
constexpr char KEY_MIME[] = "mime";
constexpr char KEY_OPERATING_RATE[] = "operating-rate";
@@ -828,8 +831,10 @@
constexpr int32_t INFO_TRY_AGAIN_LATER = -1;
constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT = 1;
constexpr int32_t VIDEO_SCALING_MODE_SCALE_TO_FIT_WITH_CROPPING = 2;
+constexpr char PARAMETER_KEY_OFFSET_TIME[] = "time-offset-us";
constexpr char PARAMETER_KEY_REQUEST_SYNC_FRAME[] = "request-sync";
constexpr char PARAMETER_KEY_SUSPEND[] = "drop-input-frames";
+constexpr char PARAMETER_KEY_SUSPEND_TIME[] = "drop-start-time-us";
constexpr char PARAMETER_KEY_VIDEO_BITRATE[] = "video-bitrate";
}
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
index 59f57c7..f53b23e 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecListWriter.h
@@ -48,6 +48,13 @@
* added `MediaCodecInfo` object.
*/
std::unique_ptr<MediaCodecInfoWriter> addMediaCodecInfo();
+ /**
+ * Find an existing `MediaCodecInfo` object for a codec name and return a
+ * `MediaCodecInfoWriter` object associated with the found added `MediaCodecInfo`.
+ *
+ * @return The `MediaCodecInfoWriter` object if found, or nullptr if not found.
+ */
+ std::unique_ptr<MediaCodecInfoWriter> findMediaCodecInfo(const char *codecName);
private:
MediaCodecListWriter() = default;
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index ba6631c..ea87948 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -21,6 +21,7 @@
#include <stdio.h>
#include <unordered_set>
+#include <android/dlext.h>
#include <media/IMediaExtractor.h>
namespace android {
@@ -34,24 +35,18 @@
const sp<DataSource> &source, const char *mime = NULL);
static sp<IMediaExtractor> CreateFromService(
const sp<DataSource> &source, const char *mime = NULL);
- static void LoadPlugins(const ::std::string& apkPath);
static status_t dump(int fd, const Vector<String16>& args);
static std::unordered_set<std::string> getSupportedTypes();
- static void SetLinkedLibraries(const std::string& linkedLibraries);
private:
static Mutex gPluginMutex;
static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
static bool gPluginsRegistered;
static bool gIgnoreVersion;
- static std::string gLinkedLibraries;
- static void RegisterExtractorsInApk(
- const char *apkPath, std::list<sp<ExtractorPlugin>> &pluginList);
- static void RegisterExtractorsInSystem(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
- static void RegisterExtractorsInApex(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
+ static void RegisterExtractors(
+ const char *libDirPath, const android_dlextinfo* dlextinfo,
+ std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractor(
const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
@@ -59,7 +54,7 @@
float *confidence, void **meta, FreeMetaFunc *freeMeta,
sp<ExtractorPlugin> &plugin, uint32_t *creatorVersion);
- static void UpdateExtractors(const char *newUpdateApkPath);
+ static void UpdateExtractors();
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index b99c14c..a0407af 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -59,6 +59,7 @@
kKeyAACProfile = 'aacp', // int32_t
kKeyAVCC = 'avcc', // raw data
kKeyHVCC = 'hvcc', // raw data
+ kKeyAV1C = 'av1c', // raw data
kKeyThumbnailHVCC = 'thvc', // raw data
kKeyD263 = 'd263', // raw data
kKeyVorbisInfo = 'vinf', // raw data
@@ -143,6 +144,9 @@
// The language code for this media
kKeyMediaLanguage = 'lang', // cstring
+ // The manufacturer code for this media
+ kKeyManufacturer = 'manu', // cstring
+
// To store the timed text format data
kKeyTextFormatData = 'text', // raw data
@@ -221,7 +225,8 @@
kKeyFrameCount = 'nfrm', // int32_t, total number of frame in video track
kKeyExifOffset = 'exof', // int64_t, Exif data offset
kKeyExifSize = 'exsz', // int64_t, Exif data size
- kKeyIsExif = 'exif', // bool (int32_t) buffer contains exif data block
+ kKeyExifTiffOffset = 'thdr', // int32_t, if > 0, buffer contains exif data block with
+ // tiff hdr at specified offset
kKeyPcmBigEndian = 'pcmb', // bool (int32_t)
// Key for ALAC Magic Cookie
@@ -235,6 +240,7 @@
kTypeESDS = 'esds',
kTypeAVCC = 'avcc',
kTypeHVCC = 'hvcc',
+ kTypeAV1C = 'av1c',
kTypeD263 = 'd263',
};
diff --git a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
index 28f6094..1410a16 100644
--- a/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
+++ b/media/libstagefright/include/media/stagefright/OmxInfoBuilder.h
@@ -23,8 +23,11 @@
namespace android {
class OmxInfoBuilder : public MediaCodecListBuilderBase {
+private:
+ bool mAllowSurfaceEncoders; // allow surface encoders
+
public:
- OmxInfoBuilder();
+ explicit OmxInfoBuilder(bool allowSurfaceEncoders);
~OmxInfoBuilder() override = default;
status_t buildMediaCodecList(MediaCodecListWriter* writer) override;
};
diff --git a/media/libstagefright/omx/1.0/OmxStore.cpp b/media/libstagefright/omx/1.0/OmxStore.cpp
index 447af6f..2e041e3 100644
--- a/media/libstagefright/omx/1.0/OmxStore.cpp
+++ b/media/libstagefright/omx/1.0/OmxStore.cpp
@@ -61,10 +61,7 @@
role.role = rolePair.first;
role.type = rolePair.second.type;
role.isEncoder = rolePair.second.isEncoder;
- // TODO: Currently, preferPlatformNodes information is not available in
- // the xml file. Once we have a way to provide this information, it
- // should be parsed properly.
- role.preferPlatformNodes = rolePair.first.compare(0, 5, "audio") == 0;
+ role.preferPlatformNodes = false; // deprecated and ignored, using rank instead
hidl_vec<NodeInfo>& nodeList = role.nodes;
nodeList.resize(rolePair.second.nodeList.size());
size_t j = 0;
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 362b7f5..4383004 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -87,6 +87,7 @@
vndk: {
enabled: true,
},
+ double_loadable: true,
srcs: ["OMXUtils.cpp"],
export_include_dirs: [
"include",
diff --git a/media/libstagefright/opus/Android.bp b/media/libstagefright/opus/Android.bp
deleted file mode 100644
index c5086ec..0000000
--- a/media/libstagefright/opus/Android.bp
+++ /dev/null
@@ -1,21 +0,0 @@
-cc_library_shared {
- name: "libstagefright_opus_common",
- vendor_available: true,
-
- export_include_dirs: ["include"],
-
- srcs: ["OpusHeader.cpp"],
-
- shared_libs: ["liblog"],
-
- cflags: ["-Werror"],
-
- sanitize: {
- integer_overflow: true,
- cfi: true,
- diag: {
- integer_overflow: true,
- cfi: true,
- },
- },
-}
\ No newline at end of file
diff --git a/media/libstagefright/opus/include/OpusHeader.h b/media/libstagefright/opus/include/OpusHeader.h
deleted file mode 100644
index f9f79cd..0000000
--- a/media/libstagefright/opus/include/OpusHeader.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * The Opus specification is part of IETF RFC 6716:
- * http://tools.ietf.org/html/rfc6716
- */
-
-#ifndef OPUS_HEADER_H_
-#define OPUS_HEADER_H_
-
-namespace android {
-
-struct OpusHeader {
- int channels;
- int channel_mapping;
- int num_streams;
- int num_coupled;
- int16_t gain_db;
- int skip_samples;
- uint8_t stream_map[8];
-};
-
-bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
-int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
-} // namespace android
-
-#endif // OPUS_HEADER_H_
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 20cb415..789e62a 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -255,7 +255,7 @@
struct hostent *ent = gethostbyname(host.c_str());
if (ent == NULL) {
- ALOGE("Unknown host %s", host.c_str());
+ ALOGE("Unknown host %s", uriDebugString(host).c_str());
reply->setInt32("result", -ENOENT);
reply->post();
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 325084c..9263565 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -80,7 +80,7 @@
return false;
}
- ALOGI("%s", line.c_str());
+ ALOGV("%s", line.c_str());
switch (line.c_str()[0]) {
case 'v':
@@ -261,7 +261,7 @@
return false;
}
- if (strncmp(value.c_str(), "npt=", 4)) {
+ if (strncmp(value.c_str(), "npt=", 4) && strncmp(value.c_str(), "npt:", 4)) {
return false;
}
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 5d993db..b4515e4 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -345,8 +345,7 @@
struct hostent *ent = gethostbyname(mSessionHost.c_str());
if (ent == NULL) {
- ALOGE("Failed to look up address of session host '%s'",
- mSessionHost.c_str());
+ ALOGE("Failed to look up address of session host");
return false;
}
@@ -531,7 +530,7 @@
mSessionURL.append(AStringPrintf("%u", port));
mSessionURL.append(path);
- ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
+ ALOGV("rewritten session url: '%s'", mSessionURL.c_str());
}
sp<AMessage> reply = new AMessage('conn', this);
@@ -1913,7 +1912,7 @@
mLastMediaTimeUs = mediaTimeUs;
}
- if (mediaTimeUs < 0) {
+ if (mediaTimeUs < 0 && !mSeekable) {
ALOGV("dropping early accessUnit.");
return false;
}
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 97e1ec6..6935655 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -44,9 +44,6 @@
"signed-integer-overflow",
],
cfi: true,
- diag: {
- cfi: true,
- },
},
include_dirs: [
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 1f840b7..64ecc2d 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -28,7 +28,6 @@
shared_libs: [
"libstagefright_foundation",
- "libstagefright_opus_common",
"libutils",
"liblog",
],
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 7b4b23a..b0a303e 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -24,7 +24,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/hexdump.h>
-#include <OpusHeader.h>
+#include <media/stagefright/foundation/OpusHeader.h>
#include <utils/Errors.h>
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index bebfb3b..819058c 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -10,6 +10,7 @@
vndk: {
enabled: true,
},
+ double_loadable: true,
srcs: [
"MediaCodecsXmlParser.cpp",
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 6e541ba..7046f61 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -502,6 +502,7 @@
const char *name = nullptr;
const char *type = nullptr;
const char *update = nullptr;
+ const char *rank = nullptr;
size_t i = 0;
while (attrs[i] != nullptr) {
@@ -523,6 +524,12 @@
return BAD_VALUE;
}
update = attrs[i];
+ } else if (strEq(attrs[i], "rank")) {
+ if (attrs[++i] == nullptr) {
+ ALOGE("addMediaCodecFromAttributes: rank is null");
+ return BAD_VALUE;
+ }
+ rank = attrs[i];
} else {
ALOGE("addMediaCodecFromAttributes: unrecognized attribute: %s", attrs[i]);
return BAD_VALUE;
@@ -579,6 +586,15 @@
}
}
+ if (rank != nullptr) {
+ if (!mCurrentCodec->second.rank.empty() && mCurrentCodec->second.rank != rank) {
+ ALOGE("addMediaCodecFromAttributes: code \"%s\" rank changed from \"%s\" to \"%s\"",
+ name, mCurrentCodec->second.rank.c_str(), rank);
+ return BAD_VALUE;
+ }
+ mCurrentCodec->second.rank = rank;
+ }
+
return OK;
}
@@ -1035,6 +1051,7 @@
const auto& codecName = codec.first;
bool isEncoder = codec.second.isEncoder;
size_t order = codec.second.order;
+ std::string rank = codec.second.rank;
const auto& typeMap = codec.second.typeMap;
for (const auto& type : typeMap) {
const auto& typeName = type.first;
@@ -1090,6 +1107,9 @@
nodeInfo.attributeList.push_back(Attribute{quirk, "present"});
}
}
+ if (!rank.empty()) {
+ nodeInfo.attributeList.push_back(Attribute{"rank", rank});
+ }
nodeList->insert(std::make_pair(
std::move(order), std::move(nodeInfo)));
}
diff --git a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
index fd949da..7a986b7 100644
--- a/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
+++ b/media/libstagefright/xmlparser/include/media/stagefright/xmlparser/MediaCodecsXmlParser.h
@@ -66,6 +66,7 @@
QuirkSet quirkSet; ///< Set of quirks requested by this codec
TypeMap typeMap; ///< Map of types supported by this codec
std::vector<std::string> aliases; ///< Name aliases for this codec
+ std::string rank; ///< Rank of this codec. This is a numeric string.
};
typedef std::pair<std::string, CodecProperties> Codec;
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
new file mode 100644
index 0000000..f01947a
--- /dev/null
+++ b/media/mediaserver/Android.bp
@@ -0,0 +1,43 @@
+
+cc_library_static {
+ name: "libregistermsext",
+ srcs: ["register.cpp"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
+
+cc_binary {
+ name: "mediaserver",
+
+ srcs: ["main_mediaserver.cpp"],
+
+ shared_libs: [
+ "libresourcemanagerservice",
+ "liblog",
+ "libmediaplayerservice",
+ "libutils",
+ "libbinder",
+ "libandroidicu",
+ ],
+
+ static_libs: [
+ "libicuandroid_utils",
+ "libregistermsext",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/libmediaplayerservice",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+
+ init_rc: ["mediaserver.rc"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+}
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
deleted file mode 100644
index 1fbb85e..0000000
--- a/media/mediaserver/Android.mk
+++ /dev/null
@@ -1,41 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-ifneq ($(BOARD_USE_CUSTOM_MEDIASERVEREXTENSIONS),true)
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := register.cpp
-LOCAL_MODULE := libregistermsext
-LOCAL_MODULE_TAGS := optional
-LOCAL_CFLAGS := -Werror -Wall
-include $(BUILD_STATIC_LIBRARY)
-endif
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- main_mediaserver.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libresourcemanagerservice \
- liblog \
- libmediaplayerservice \
- libutils \
- libbinder \
- libandroidicu \
- android.hardware.media.omx@1.0 \
-
-LOCAL_STATIC_LIBRARIES := \
- libicuandroid_utils \
- libregistermsext
-
-LOCAL_C_INCLUDES := \
- frameworks/av/media/libmediaplayerservice \
- frameworks/av/services/mediaresourcemanager \
-
-LOCAL_MODULE:= mediaserver
-LOCAL_32_BIT_ONLY := true
-
-LOCAL_INIT_RC := mediaserver.rc
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
diff --git a/media/mediaserver/mediaserver.rc b/media/mediaserver/mediaserver.rc
index f6c325c..8cfcd79 100644
--- a/media/mediaserver/mediaserver.rc
+++ b/media/mediaserver/mediaserver.rc
@@ -2,5 +2,7 @@
class main
user media
group audio camera inet net_bt net_bt_admin net_bw_acct drmrpc mediadrm
+ # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+ updatable
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
diff --git a/media/mtp/IMtpDatabase.h b/media/mtp/IMtpDatabase.h
index 1245092..81fa60c 100644
--- a/media/mtp/IMtpDatabase.h
+++ b/media/mtp/IMtpDatabase.h
@@ -112,8 +112,8 @@
MtpObjectHandle handle, bool succeeded) = 0;
virtual MtpResponseCode beginCopyObject(MtpObjectHandle handle, MtpObjectHandle newParent,
- MtpStorageID newStorage);
- virtual void endCopyObject(MtpObjectHandle handle, bool succeeded);
+ MtpStorageID newStorage) = 0;
+ virtual void endCopyObject(MtpObjectHandle handle, bool succeeded) = 0;
};
}; // namespace android
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 74754ea..0b274a7 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -76,7 +76,7 @@
"libbinder",
"libmedia",
"libmedia_omx",
- "libmedia_jni",
+ "libmedia_jni_utils",
"libmediadrm",
"libstagefright",
"libstagefright_foundation",
@@ -84,7 +84,7 @@
"liblog",
"libutils",
"libcutils",
- "libandroid",
+ "libnativewindow",
"libandroid_runtime",
"libbinder",
"libhidlbase",
@@ -106,6 +106,10 @@
symbol_file: "libmediandk.map.txt",
versions: ["29"],
},
+
+ // Bug: http://b/124522995 libmediandk has linker errors when built with
+ // coverage
+ native_coverage: false,
}
llndk_library {
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 20b1667..1883f63 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -23,7 +23,7 @@
#include "NdkImageReaderPriv.h"
#include <android_media_Utils.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <utils/Log.h>
#include "hardware/camera3.h"
@@ -190,7 +190,7 @@
auto lockedBuffer = std::make_unique<CpuConsumer::LockedBuffer>();
- uint64_t grallocUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ uint64_t grallocUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
status_t ret =
lockImageFromBuffer(mBuffer, grallocUsage, mBuffer->mFence->dup(), lockedBuffer.get());
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 1a0c3b1..b010aa9 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -26,7 +26,7 @@
#include <utils/Log.h>
#include <android_media_Utils.h>
#include <android_runtime/android_view_Surface.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <grallocusage/GrallocUsageConversion.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
@@ -69,6 +69,8 @@
case AIMAGE_FORMAT_DEPTH16:
case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
case AIMAGE_FORMAT_Y8:
+ case AIMAGE_FORMAT_HEIC:
+ case AIMAGE_FORMAT_DEPTH_JPEG:
return true;
case AIMAGE_FORMAT_PRIVATE:
// For private format, cpu usage is prohibited.
@@ -96,6 +98,8 @@
case AIMAGE_FORMAT_DEPTH16:
case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
case AIMAGE_FORMAT_Y8:
+ case AIMAGE_FORMAT_HEIC:
+ case AIMAGE_FORMAT_DEPTH_JPEG:
return 1;
case AIMAGE_FORMAT_PRIVATE:
return 0;
@@ -270,7 +274,7 @@
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
- mHalUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
@@ -308,6 +312,9 @@
ALOGE("Failed to set BufferItemConsumer buffer dataSpace");
return AMEDIA_ERROR_UNKNOWN;
}
+ if (mUsage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
+ gbConsumer->setConsumerIsProtected(true);
+ }
mSurface = new Surface(mProducer, /*controlledByApp*/true);
if (mSurface == nullptr) {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 55afb33..2deb1a4 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -274,7 +274,10 @@
}
String8 mimeStr = mimeType ? String8(mimeType) : String8("");
- return drm->isCryptoSchemeSupported(uuid, mimeStr);
+ bool isSupported = false;
+ status_t status = drm->isCryptoSchemeSupported(uuid, mimeStr,
+ DrmPlugin::kSecurityLevelUnknown, &isSupported);
+ return (status == OK) && isSupported;
}
EXPORT
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 8296598..28e4f12 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -46,6 +46,18 @@
sp<ABuffer> mPsshBuf;
};
+sp<ABuffer> U32ArrayToSizeBuf(size_t numSubSamples, uint32_t *data) {
+ if (numSubSamples > SIZE_MAX / sizeof(size_t)) {
+ return NULL;
+ }
+ sp<ABuffer> sizebuf = new ABuffer(numSubSamples * sizeof(size_t));
+ size_t *sizes = (size_t *)sizebuf->data();
+ for (size_t i = 0; sizes != NULL && i < numSubSamples; i++) {
+ sizes[i] = data[i];
+ }
+ return sizebuf;
+}
+
extern "C" {
EXPORT
@@ -339,7 +351,7 @@
if (!meta->findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
return NULL;
}
- size_t numSubSamples = cryptedsize / sizeof(size_t);
+ size_t numSubSamples = cryptedsize / sizeof(uint32_t);
const void *cleardata;
size_t clearsize;
@@ -373,6 +385,16 @@
mode = CryptoPlugin::kMode_AES_CTR;
}
+ if (sizeof(uint32_t) != sizeof(size_t)) {
+ sp<ABuffer> clearbuf = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)cleardata);
+ sp<ABuffer> cryptedbuf = U32ArrayToSizeBuf(numSubSamples, (uint32_t *)crypteddata);
+ cleardata = clearbuf == NULL ? NULL : clearbuf->data();
+ crypteddata = crypteddata == NULL ? NULL : cryptedbuf->data();
+ if(crypteddata == NULL || cleardata == NULL) {
+ return NULL;
+ }
+ }
+
return AMediaCodecCryptoInfo_new(
numSubSamples,
(uint8_t*) key,
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index fcb706d..7cc7f16 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -292,6 +292,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_COMPILATION = "compilation";
EXPORT const char* AMEDIAFORMAT_KEY_COMPLEXITY = "complexity";
EXPORT const char* AMEDIAFORMAT_KEY_COMPOSER = "composer";
+EXPORT const char* AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED = "create-input-buffers-suspended";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE = "crypto-default-iv-size";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK = "crypto-encrypted-byte-block";
EXPORT const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES = "crypto-encrypted-sizes";
@@ -341,9 +342,12 @@
EXPORT const char* AMEDIAFORMAT_KEY_LOCATION = "location";
EXPORT const char* AMEDIAFORMAT_KEY_LOOP = "loop";
EXPORT const char* AMEDIAFORMAT_KEY_LYRICIST = "lyricist";
+EXPORT const char* AMEDIAFORMAT_KEY_MANUFACTURER = "manufacturer";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE = "max-bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER = "max-fps-to-encoder";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
+EXPORT const char* AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER = "max-pts-gap-to-encoder";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 15b340c..3e60de0 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -526,7 +526,24 @@
* (in bytes) between adjacent rows.</p>
*
*/
- AIMAGE_FORMAT_Y8 = 0x20203859
+ AIMAGE_FORMAT_Y8 = 0x20203859,
+
+ /**
+ * Compressed HEIC format.
+ *
+ * <p>This format defines the HEIC brand of High Efficiency Image File
+ * Format as described in ISO/IEC 23008-12.</p>
+ */
+ AIMAGE_FORMAT_HEIC = 0x48454946,
+
+ /**
+ * Depth augmented compressed JPEG format.
+ *
+ * <p>JPEG compressed main image along with XMP embedded depth metadata
+ * following ISO 16684-1:2011(E).</p>
+ */
+ AIMAGE_FORMAT_DEPTH_JPEG = 0x69656963,
+
};
/**
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 2551228..56bcaab 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -188,6 +188,7 @@
extern const char* AMEDIAFORMAT_KEY_CDTRACKNUMBER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_COMPILATION __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_COMPOSER __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES __INTRODUCED_IN(29);
@@ -213,7 +214,10 @@
extern const char* AMEDIAFORMAT_KEY_LOCATION __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LOOP __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_LYRICIST __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MANUFACTURER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_PSSH __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index c50084e..9756926 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -56,6 +56,7 @@
AMEDIAFORMAT_KEY_COMPILATION; # var introduced=29
AMEDIAFORMAT_KEY_COMPLEXITY; # var introduced=28
AMEDIAFORMAT_KEY_COMPOSER; # var introduced=29
+ AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK; # var introduced=29
AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_SIZES; # var introduced=29
@@ -104,9 +105,12 @@
AMEDIAFORMAT_KEY_LOCATION; # var introduced=29
AMEDIAFORMAT_KEY_LOOP; # var introduced=29
AMEDIAFORMAT_KEY_LYRICIST; # var introduced=29
+ AMEDIAFORMAT_KEY_MANUFACTURER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_BIT_RATE; # var introduced=29
+ AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+ AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
AMEDIAFORMAT_KEY_MIME; # var introduced=21
AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 1c54aec..599c446 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -85,7 +85,7 @@
return false;
}
} else {
- if (appOps.noteOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+ if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
ALOGE("Request denied by app op: %d", op);
return false;
}
diff --git a/packages/MediaComponents/apex/Android.bp b/packages/MediaComponents/apex/Android.bp
deleted file mode 100644
index d89eb77..0000000
--- a/packages/MediaComponents/apex/Android.bp
+++ /dev/null
@@ -1,41 +0,0 @@
-filegroup {
- name: "media_aidl",
- srcs: [
- "java/android/media/**/*.aidl",
- "java/android/service/**/*.aidl",
- ],
- exclude_srcs: [
- // Exclude these aidls to avoid errors such as
- // "Refusing to generate code with unstructured parcelables."
- "java/android/media/MediaDescription.aidl",
- "java/android/media/MediaMetadata.aidl",
- // TODO(insun): check why MediaParceledListSlice.aidl should be added here
- "java/android/media/MediaParceledListSlice.aidl",
- "java/android/media/Rating.aidl",
- "java/android/media/browse/MediaBrowser.aidl",
- "java/android/media/session/MediaSession.aidl",
- "java/android/media/session/ParcelableVolumeInfo.aidl",
- "java/android/media/session/PlaybackState.aidl",
- ],
-}
-
-java_library {
- name: "media",
- installable: true,
- sdk_version: "system_current",
- srcs: [
- "java/android/media/**/*.java",
- "java/android/service/**/*.java",
- ":media_aidl",
- ":framework-media-annotation-srcs",
- ],
- aidl: {
- local_include_dirs: ["java"],
- include_dirs: [
- "frameworks/base/core/java",
- // for android.graphics.Bitmap
- // from IMediaBrowserServiceCallback
- "frameworks/base/graphics/java",
- ],
- },
-}
diff --git a/packages/MediaComponents/apex/java/android/media/IRemoteVolumeController.aidl b/packages/MediaComponents/apex/java/android/media/IRemoteVolumeController.aidl
deleted file mode 100644
index e4a4a42..0000000
--- a/packages/MediaComponents/apex/java/android/media/IRemoteVolumeController.aidl
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.session.ISessionController;
-
-/**
- * AIDL for the MediaSessionService to report interesting events on remote playback
- * to a volume control dialog. See also IVolumeController for the AudioService half.
- * TODO add in better support for multiple remote sessions.
- * @hide
- */
-oneway interface IRemoteVolumeController {
- void remoteVolumeChanged(ISessionController session, int flags);
- // sets the default session to use with the slider, replaces remoteSliderVisibility
- // on IVolumeController
- void updateRemoteController(ISessionController session);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/ISessionTokensListener.aidl b/packages/MediaComponents/apex/java/android/media/ISessionTokensListener.aidl
deleted file mode 100644
index c83a19e..0000000
--- a/packages/MediaComponents/apex/java/android/media/ISessionTokensListener.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.os.Bundle;
-
-/**
- * Listens for changes to the list of session tokens.
- * @hide
- */
-oneway interface ISessionTokensListener {
- void onSessionTokensChanged(in List<Bundle> tokens);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/MediaDescription.aidl b/packages/MediaComponents/apex/java/android/media/MediaDescription.aidl
deleted file mode 100644
index 6f934f7..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaDescription.aidl
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media;
-
-parcelable MediaDescription;
diff --git a/packages/MediaComponents/apex/java/android/media/MediaDescription.java b/packages/MediaComponents/apex/java/android/media/MediaDescription.java
deleted file mode 100644
index 31079e5..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaDescription.java
+++ /dev/null
@@ -1,383 +0,0 @@
-package android.media;
-
-import android.annotation.Nullable;
-import android.graphics.Bitmap;
-import android.media.browse.MediaBrowser;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.text.TextUtils;
-
-/**
- * A simple set of metadata for a media item suitable for display. This can be
- * created using the Builder or retrieved from existing metadata using
- * {@link MediaMetadata#getDescription()}.
- */
-public class MediaDescription implements Parcelable {
- /**
- * A unique persistent id for the content or null.
- */
- private final String mMediaId;
- /**
- * A primary title suitable for display or null.
- */
- private final CharSequence mTitle;
- /**
- * A subtitle suitable for display or null.
- */
- private final CharSequence mSubtitle;
- /**
- * A description suitable for display or null.
- */
- private final CharSequence mDescription;
- /**
- * A bitmap icon suitable for display or null.
- */
- private final Bitmap mIcon;
- /**
- * A Uri for an icon suitable for display or null.
- */
- private final Uri mIconUri;
- /**
- * Extras for opaque use by apps/system.
- */
- private final Bundle mExtras;
- /**
- * A Uri to identify this content.
- */
- private final Uri mMediaUri;
-
- /**
- * Used as a long extra field to indicate the bluetooth folder type of the media item as
- * specified in the section 6.10.2.2 of the Bluetooth AVRCP 1.5. This is valid only for
- * {@link MediaBrowser.MediaItem} with {@link MediaBrowser.MediaItem#FLAG_BROWSABLE}. The value
- * should be one of the following:
- * <ul>
- * <li>{@link #BT_FOLDER_TYPE_MIXED}</li>
- * <li>{@link #BT_FOLDER_TYPE_TITLES}</li>
- * <li>{@link #BT_FOLDER_TYPE_ALBUMS}</li>
- * <li>{@link #BT_FOLDER_TYPE_ARTISTS}</li>
- * <li>{@link #BT_FOLDER_TYPE_GENRES}</li>
- * <li>{@link #BT_FOLDER_TYPE_PLAYLISTS}</li>
- * <li>{@link #BT_FOLDER_TYPE_YEARS}</li>
- * </ul>
- *
- * @see #getExtras()
- */
- public static final String EXTRA_BT_FOLDER_TYPE = "android.media.extra.BT_FOLDER_TYPE";
-
- /**
- * The type of folder that is unknown or contains media elements of mixed types as specified in
- * the section 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_MIXED = 0;
-
- /**
- * The type of folder that contains media elements only as specified in the section 6.10.2.2 of
- * the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_TITLES = 1;
-
- /**
- * The type of folder that contains folders categorized by album as specified in the section
- * 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_ALBUMS = 2;
-
- /**
- * The type of folder that contains folders categorized by artist as specified in the section
- * 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_ARTISTS = 3;
-
- /**
- * The type of folder that contains folders categorized by genre as specified in the section
- * 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_GENRES = 4;
-
- /**
- * The type of folder that contains folders categorized by playlist as specified in the section
- * 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_PLAYLISTS = 5;
-
- /**
- * The type of folder that contains folders categorized by year as specified in the section
- * 6.10.2.2 of the Bluetooth AVRCP 1.5.
- */
- public static final long BT_FOLDER_TYPE_YEARS = 6;
-
- private MediaDescription(String mediaId, CharSequence title, CharSequence subtitle,
- CharSequence description, Bitmap icon, Uri iconUri, Bundle extras, Uri mediaUri) {
- mMediaId = mediaId;
- mTitle = title;
- mSubtitle = subtitle;
- mDescription = description;
- mIcon = icon;
- mIconUri = iconUri;
- mExtras = extras;
- mMediaUri = mediaUri;
- }
-
- private MediaDescription(Parcel in) {
- mMediaId = in.readString();
- mTitle = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(in);
- mSubtitle = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(in);
- mDescription = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(in);
- mIcon = in.readParcelable(null);
- mIconUri = in.readParcelable(null);
- mExtras = in.readBundle();
- mMediaUri = in.readParcelable(null);
- }
-
- /**
- * Returns the media id or null. See
- * {@link MediaMetadata#METADATA_KEY_MEDIA_ID}.
- */
- public @Nullable String getMediaId() {
- return mMediaId;
- }
-
- /**
- * Returns a title suitable for display or null.
- *
- * @return A title or null.
- */
- public @Nullable CharSequence getTitle() {
- return mTitle;
- }
-
- /**
- * Returns a subtitle suitable for display or null.
- *
- * @return A subtitle or null.
- */
- public @Nullable CharSequence getSubtitle() {
- return mSubtitle;
- }
-
- /**
- * Returns a description suitable for display or null.
- *
- * @return A description or null.
- */
- public @Nullable CharSequence getDescription() {
- return mDescription;
- }
-
- /**
- * Returns a bitmap icon suitable for display or null.
- *
- * @return An icon or null.
- */
- public @Nullable Bitmap getIconBitmap() {
- return mIcon;
- }
-
- /**
- * Returns a Uri for an icon suitable for display or null.
- *
- * @return An icon uri or null.
- */
- public @Nullable Uri getIconUri() {
- return mIconUri;
- }
-
- /**
- * Returns any extras that were added to the description.
- *
- * @return A bundle of extras or null.
- */
- public @Nullable Bundle getExtras() {
- return mExtras;
- }
-
- /**
- * Returns a Uri representing this content or null.
- *
- * @return A media Uri or null.
- */
- public @Nullable Uri getMediaUri() {
- return mMediaUri;
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeString(mMediaId);
- TextUtils.writeToParcel(mTitle, dest, 0);
- TextUtils.writeToParcel(mSubtitle, dest, 0);
- TextUtils.writeToParcel(mDescription, dest, 0);
- dest.writeParcelable(mIcon, flags);
- dest.writeParcelable(mIconUri, flags);
- dest.writeBundle(mExtras);
- dest.writeParcelable(mMediaUri, flags);
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) {
- return false;
- }
-
- if (!(o instanceof MediaDescription)){
- return false;
- }
-
- final MediaDescription d = (MediaDescription) o;
-
- if (!String.valueOf(mTitle).equals(String.valueOf(d.mTitle))) {
- return false;
- }
-
- if (!String.valueOf(mSubtitle).equals(String.valueOf(d.mSubtitle))) {
- return false;
- }
-
- if (!String.valueOf(mDescription).equals(String.valueOf(d.mDescription))) {
- return false;
- }
-
- return true;
- }
-
- @Override
- public String toString() {
- return mTitle + ", " + mSubtitle + ", " + mDescription;
- }
-
- public static final Parcelable.Creator<MediaDescription> CREATOR =
- new Parcelable.Creator<MediaDescription>() {
- @Override
- public MediaDescription createFromParcel(Parcel in) {
- return new MediaDescription(in);
- }
-
- @Override
- public MediaDescription[] newArray(int size) {
- return new MediaDescription[size];
- }
- };
-
- /**
- * Builder for {@link MediaDescription} objects.
- */
- public static class Builder {
- private String mMediaId;
- private CharSequence mTitle;
- private CharSequence mSubtitle;
- private CharSequence mDescription;
- private Bitmap mIcon;
- private Uri mIconUri;
- private Bundle mExtras;
- private Uri mMediaUri;
-
- /**
- * Creates an initially empty builder.
- */
- public Builder() {
- }
-
- /**
- * Sets the media id.
- *
- * @param mediaId The unique id for the item or null.
- * @return this
- */
- public Builder setMediaId(@Nullable String mediaId) {
- mMediaId = mediaId;
- return this;
- }
-
- /**
- * Sets the title.
- *
- * @param title A title suitable for display to the user or null.
- * @return this
- */
- public Builder setTitle(@Nullable CharSequence title) {
- mTitle = title;
- return this;
- }
-
- /**
- * Sets the subtitle.
- *
- * @param subtitle A subtitle suitable for display to the user or null.
- * @return this
- */
- public Builder setSubtitle(@Nullable CharSequence subtitle) {
- mSubtitle = subtitle;
- return this;
- }
-
- /**
- * Sets the description.
- *
- * @param description A description suitable for display to the user or
- * null.
- * @return this
- */
- public Builder setDescription(@Nullable CharSequence description) {
- mDescription = description;
- return this;
- }
-
- /**
- * Sets the icon.
- *
- * @param icon A {@link Bitmap} icon suitable for display to the user or
- * null.
- * @return this
- */
- public Builder setIconBitmap(@Nullable Bitmap icon) {
- mIcon = icon;
- return this;
- }
-
- /**
- * Sets the icon uri.
- *
- * @param iconUri A {@link Uri} for an icon suitable for display to the
- * user or null.
- * @return this
- */
- public Builder setIconUri(@Nullable Uri iconUri) {
- mIconUri = iconUri;
- return this;
- }
-
- /**
- * Sets a bundle of extras.
- *
- * @param extras The extras to include with this description or null.
- * @return this
- */
- public Builder setExtras(@Nullable Bundle extras) {
- mExtras = extras;
- return this;
- }
-
- /**
- * Sets the media uri.
- *
- * @param mediaUri The content's {@link Uri} for the item or null.
- * @return this
- */
- public Builder setMediaUri(@Nullable Uri mediaUri) {
- mMediaUri = mediaUri;
- return this;
- }
-
- public MediaDescription build() {
- return new MediaDescription(mMediaId, mTitle, mSubtitle, mDescription, mIcon, mIconUri,
- mExtras, mMediaUri);
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/MediaMetadata.aidl b/packages/MediaComponents/apex/java/android/media/MediaMetadata.aidl
deleted file mode 100644
index 66ee483..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaMetadata.aidl
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media;
-
-parcelable MediaMetadata;
diff --git a/packages/MediaComponents/apex/java/android/media/MediaMetadata.java b/packages/MediaComponents/apex/java/android/media/MediaMetadata.java
deleted file mode 100644
index adfd20b..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaMetadata.java
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-import android.annotation.NonNull;
-import android.annotation.StringDef;
-import android.annotation.UnsupportedAppUsage;
-import android.content.ContentResolver;
-import android.graphics.Bitmap;
-import android.graphics.BitmapFactory;
-import android.media.browse.MediaBrowser;
-import android.media.session.MediaController;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.text.TextUtils;
-import android.util.ArrayMap;
-import android.util.Log;
-import android.util.SparseArray;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.util.Set;
-import java.util.Objects;
-
-/**
- * Contains metadata about an item, such as the title, artist, etc.
- */
-public final class MediaMetadata implements Parcelable {
- private static final String TAG = "MediaMetadata";
-
- /**
- * @hide
- */
- @StringDef(prefix = { "METADATA_KEY_" }, value = {
- METADATA_KEY_TITLE,
- METADATA_KEY_ARTIST,
- METADATA_KEY_ALBUM,
- METADATA_KEY_AUTHOR,
- METADATA_KEY_WRITER,
- METADATA_KEY_COMPOSER,
- METADATA_KEY_COMPILATION,
- METADATA_KEY_DATE,
- METADATA_KEY_GENRE,
- METADATA_KEY_ALBUM_ARTIST,
- METADATA_KEY_ART_URI,
- METADATA_KEY_ALBUM_ART_URI,
- METADATA_KEY_DISPLAY_TITLE,
- METADATA_KEY_DISPLAY_SUBTITLE,
- METADATA_KEY_DISPLAY_DESCRIPTION,
- METADATA_KEY_DISPLAY_ICON_URI,
- METADATA_KEY_MEDIA_ID,
- METADATA_KEY_MEDIA_URI,
- })
- @Retention(RetentionPolicy.SOURCE)
- public @interface TextKey {}
-
- /**
- * @hide
- */
- @StringDef(prefix = { "METADATA_KEY_" }, value = {
- METADATA_KEY_DURATION,
- METADATA_KEY_YEAR,
- METADATA_KEY_TRACK_NUMBER,
- METADATA_KEY_NUM_TRACKS,
- METADATA_KEY_DISC_NUMBER,
- METADATA_KEY_BT_FOLDER_TYPE,
- })
- @Retention(RetentionPolicy.SOURCE)
- public @interface LongKey {}
-
- /**
- * @hide
- */
- @StringDef(prefix = { "METADATA_KEY_" }, value = {
- METADATA_KEY_ART,
- METADATA_KEY_ALBUM_ART,
- METADATA_KEY_DISPLAY_ICON,
- })
- @Retention(RetentionPolicy.SOURCE)
- public @interface BitmapKey {}
-
- /**
- * @hide
- */
- @StringDef(prefix = { "METADATA_KEY_" }, value = {
- METADATA_KEY_USER_RATING,
- METADATA_KEY_RATING,
- })
- @Retention(RetentionPolicy.SOURCE)
- public @interface RatingKey {}
-
- /**
- * The title of the media.
- */
- public static final String METADATA_KEY_TITLE = "android.media.metadata.TITLE";
-
- /**
- * The artist of the media.
- */
- public static final String METADATA_KEY_ARTIST = "android.media.metadata.ARTIST";
-
- /**
- * The duration of the media in ms. A negative duration indicates that the
- * duration is unknown (or infinite).
- */
- public static final String METADATA_KEY_DURATION = "android.media.metadata.DURATION";
-
- /**
- * The album title for the media.
- */
- public static final String METADATA_KEY_ALBUM = "android.media.metadata.ALBUM";
-
- /**
- * The author of the media.
- */
- public static final String METADATA_KEY_AUTHOR = "android.media.metadata.AUTHOR";
-
- /**
- * The writer of the media.
- */
- public static final String METADATA_KEY_WRITER = "android.media.metadata.WRITER";
-
- /**
- * The composer of the media.
- */
- public static final String METADATA_KEY_COMPOSER = "android.media.metadata.COMPOSER";
-
- /**
- * The compilation status of the media.
- */
- public static final String METADATA_KEY_COMPILATION = "android.media.metadata.COMPILATION";
-
- /**
- * The date the media was created or published. The format is unspecified
- * but RFC 3339 is recommended.
- */
- public static final String METADATA_KEY_DATE = "android.media.metadata.DATE";
-
- /**
- * The year the media was created or published as a long.
- */
- public static final String METADATA_KEY_YEAR = "android.media.metadata.YEAR";
-
- /**
- * The genre of the media.
- */
- public static final String METADATA_KEY_GENRE = "android.media.metadata.GENRE";
-
- /**
- * The track number for the media.
- */
- public static final String METADATA_KEY_TRACK_NUMBER = "android.media.metadata.TRACK_NUMBER";
-
- /**
- * The number of tracks in the media's original source.
- */
- public static final String METADATA_KEY_NUM_TRACKS = "android.media.metadata.NUM_TRACKS";
-
- /**
- * The disc number for the media's original source.
- */
- public static final String METADATA_KEY_DISC_NUMBER = "android.media.metadata.DISC_NUMBER";
-
- /**
- * The artist for the album of the media's original source.
- */
- public static final String METADATA_KEY_ALBUM_ARTIST = "android.media.metadata.ALBUM_ARTIST";
-
- /**
- * The artwork for the media as a {@link Bitmap}.
- * <p>
- * The artwork should be relatively small and may be scaled down by the
- * system if it is too large. For higher resolution artwork
- * {@link #METADATA_KEY_ART_URI} should be used instead.
- */
- public static final String METADATA_KEY_ART = "android.media.metadata.ART";
-
- /**
- * The artwork for the media as a Uri formatted String. The artwork can be
- * loaded using a combination of {@link ContentResolver#openInputStream} and
- * {@link BitmapFactory#decodeStream}.
- * <p>
- * For the best results, Uris should use the content:// style and support
- * {@link ContentResolver#EXTRA_SIZE} for retrieving scaled artwork through
- * {@link ContentResolver#openTypedAssetFileDescriptor(Uri, String, Bundle)}.
- */
- public static final String METADATA_KEY_ART_URI = "android.media.metadata.ART_URI";
-
- /**
- * The artwork for the album of the media's original source as a
- * {@link Bitmap}.
- * <p>
- * The artwork should be relatively small and may be scaled down by the
- * system if it is too large. For higher resolution artwork
- * {@link #METADATA_KEY_ALBUM_ART_URI} should be used instead.
- */
- public static final String METADATA_KEY_ALBUM_ART = "android.media.metadata.ALBUM_ART";
-
- /**
- * The artwork for the album of the media's original source as a Uri
- * formatted String. The artwork can be loaded using a combination of
- * {@link ContentResolver#openInputStream} and
- * {@link BitmapFactory#decodeStream}.
- * <p>
- * For the best results, Uris should use the content:// style and support
- * {@link ContentResolver#EXTRA_SIZE} for retrieving scaled artwork through
- * {@link ContentResolver#openTypedAssetFileDescriptor(Uri, String, Bundle)}.
- */
- public static final String METADATA_KEY_ALBUM_ART_URI = "android.media.metadata.ALBUM_ART_URI";
-
- /**
- * The user's rating for the media.
- *
- * @see Rating
- */
- public static final String METADATA_KEY_USER_RATING = "android.media.metadata.USER_RATING";
-
- /**
- * The overall rating for the media.
- *
- * @see Rating
- */
- public static final String METADATA_KEY_RATING = "android.media.metadata.RATING";
-
- /**
- * A title that is suitable for display to the user. This will generally be
- * the same as {@link #METADATA_KEY_TITLE} but may differ for some formats.
- * When displaying media described by this metadata this should be preferred
- * if present.
- */
- public static final String METADATA_KEY_DISPLAY_TITLE = "android.media.metadata.DISPLAY_TITLE";
-
- /**
- * A subtitle that is suitable for display to the user. When displaying a
- * second line for media described by this metadata this should be preferred
- * to other fields if present.
- */
- public static final String METADATA_KEY_DISPLAY_SUBTITLE
- = "android.media.metadata.DISPLAY_SUBTITLE";
-
- /**
- * A description that is suitable for display to the user. When displaying
- * more information for media described by this metadata this should be
- * preferred to other fields if present.
- */
- public static final String METADATA_KEY_DISPLAY_DESCRIPTION
- = "android.media.metadata.DISPLAY_DESCRIPTION";
-
- /**
- * An icon or thumbnail that is suitable for display to the user. When
- * displaying an icon for media described by this metadata this should be
- * preferred to other fields if present. This must be a {@link Bitmap}.
- * <p>
- * The icon should be relatively small and may be scaled down by the system
- * if it is too large. For higher resolution artwork
- * {@link #METADATA_KEY_DISPLAY_ICON_URI} should be used instead.
- */
- public static final String METADATA_KEY_DISPLAY_ICON
- = "android.media.metadata.DISPLAY_ICON";
-
- /**
- * A Uri formatted String for an icon or thumbnail that is suitable for
- * display to the user. When displaying more information for media described
- * by this metadata the display description should be preferred to other
- * fields when present. The icon can be loaded using a combination of
- * {@link ContentResolver#openInputStream} and
- * {@link BitmapFactory#decodeStream}.
- * <p>
- * For the best results, Uris should use the content:// style and support
- * {@link ContentResolver#EXTRA_SIZE} for retrieving scaled artwork through
- * {@link ContentResolver#openTypedAssetFileDescriptor(Uri, String, Bundle)}.
- */
- public static final String METADATA_KEY_DISPLAY_ICON_URI
- = "android.media.metadata.DISPLAY_ICON_URI";
-
- /**
- * A String key for identifying the content. This value is specific to the
- * service providing the content. If used, this should be a persistent
- * unique key for the underlying content. It may be used with
- * {@link MediaController.TransportControls#playFromMediaId(String, Bundle)}
- * to initiate playback when provided by a {@link MediaBrowser} connected to
- * the same app.
- */
- public static final String METADATA_KEY_MEDIA_ID = "android.media.metadata.MEDIA_ID";
-
- /**
- * A Uri formatted String representing the content. This value is specific to the
- * service providing the content. It may be used with
- * {@link MediaController.TransportControls#playFromUri(Uri, Bundle)}
- * to initiate playback when provided by a {@link MediaBrowser} connected to
- * the same app.
- */
- public static final String METADATA_KEY_MEDIA_URI = "android.media.metadata.MEDIA_URI";
-
- /**
- * The bluetooth folder type of the media specified in the section 6.10.2.2 of the Bluetooth
- * AVRCP 1.5. It should be one of the following:
- * <ul>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_MIXED}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_TITLES}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_ALBUMS}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_ARTISTS}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_GENRES}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_PLAYLISTS}</li>
- * <li>{@link MediaDescription#BT_FOLDER_TYPE_YEARS}</li>
- * </ul>
- */
- public static final String METADATA_KEY_BT_FOLDER_TYPE
- = "android.media.metadata.BT_FOLDER_TYPE";
-
- private static final @TextKey String[] PREFERRED_DESCRIPTION_ORDER = {
- METADATA_KEY_TITLE,
- METADATA_KEY_ARTIST,
- METADATA_KEY_ALBUM,
- METADATA_KEY_ALBUM_ARTIST,
- METADATA_KEY_WRITER,
- METADATA_KEY_AUTHOR,
- METADATA_KEY_COMPOSER
- };
-
- private static final @BitmapKey String[] PREFERRED_BITMAP_ORDER = {
- METADATA_KEY_DISPLAY_ICON,
- METADATA_KEY_ART,
- METADATA_KEY_ALBUM_ART
- };
-
- private static final @TextKey String[] PREFERRED_URI_ORDER = {
- METADATA_KEY_DISPLAY_ICON_URI,
- METADATA_KEY_ART_URI,
- METADATA_KEY_ALBUM_ART_URI
- };
-
- private static final int METADATA_TYPE_INVALID = -1;
- private static final int METADATA_TYPE_LONG = 0;
- private static final int METADATA_TYPE_TEXT = 1;
- private static final int METADATA_TYPE_BITMAP = 2;
- private static final int METADATA_TYPE_RATING = 3;
- private static final ArrayMap<String, Integer> METADATA_KEYS_TYPE;
-
- static {
- METADATA_KEYS_TYPE = new ArrayMap<String, Integer>();
- METADATA_KEYS_TYPE.put(METADATA_KEY_TITLE, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ARTIST, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DURATION, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_AUTHOR, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_WRITER, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_COMPOSER, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_COMPILATION, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DATE, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_YEAR, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_GENRE, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_TRACK_NUMBER, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_NUM_TRACKS, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISC_NUMBER, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ARTIST, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ART, METADATA_TYPE_BITMAP);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ART_URI, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ART, METADATA_TYPE_BITMAP);
- METADATA_KEYS_TYPE.put(METADATA_KEY_ALBUM_ART_URI, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_USER_RATING, METADATA_TYPE_RATING);
- METADATA_KEYS_TYPE.put(METADATA_KEY_RATING, METADATA_TYPE_RATING);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_TITLE, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_SUBTITLE, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_DESCRIPTION, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_ICON, METADATA_TYPE_BITMAP);
- METADATA_KEYS_TYPE.put(METADATA_KEY_DISPLAY_ICON_URI, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_BT_FOLDER_TYPE, METADATA_TYPE_LONG);
- METADATA_KEYS_TYPE.put(METADATA_KEY_MEDIA_ID, METADATA_TYPE_TEXT);
- METADATA_KEYS_TYPE.put(METADATA_KEY_MEDIA_URI, METADATA_TYPE_TEXT);
- }
-
- private static final SparseArray<String> EDITOR_KEY_MAPPING;
-
- static {
- EDITOR_KEY_MAPPING = new SparseArray<String>();
- EDITOR_KEY_MAPPING.put(MediaMetadataEditor.BITMAP_KEY_ARTWORK, METADATA_KEY_ART);
- EDITOR_KEY_MAPPING.put(MediaMetadataEditor.RATING_KEY_BY_OTHERS, METADATA_KEY_RATING);
- EDITOR_KEY_MAPPING.put(MediaMetadataEditor.RATING_KEY_BY_USER, METADATA_KEY_USER_RATING);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_ALBUM, METADATA_KEY_ALBUM);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_ALBUMARTIST,
- METADATA_KEY_ALBUM_ARTIST);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_ARTIST, METADATA_KEY_ARTIST);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_AUTHOR, METADATA_KEY_AUTHOR);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_CD_TRACK_NUMBER,
- METADATA_KEY_TRACK_NUMBER);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_COMPOSER, METADATA_KEY_COMPOSER);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_COMPILATION,
- METADATA_KEY_COMPILATION);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_DATE, METADATA_KEY_DATE);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_DISC_NUMBER,
- METADATA_KEY_DISC_NUMBER);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_DURATION, METADATA_KEY_DURATION);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_GENRE, METADATA_KEY_GENRE);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_NUM_TRACKS,
- METADATA_KEY_NUM_TRACKS);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_TITLE, METADATA_KEY_TITLE);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_WRITER, METADATA_KEY_WRITER);
- EDITOR_KEY_MAPPING.put(MediaMetadataRetriever.METADATA_KEY_YEAR, METADATA_KEY_YEAR);
- }
-
- private final Bundle mBundle;
- private MediaDescription mDescription;
-
- private MediaMetadata(Bundle bundle) {
- mBundle = new Bundle(bundle);
- }
-
- private MediaMetadata(Parcel in) {
- mBundle = in.readBundle();
- }
-
- /**
- * Returns true if the given key is contained in the metadata
- *
- * @param key a String key
- * @return true if the key exists in this metadata, false otherwise
- */
- public boolean containsKey(String key) {
- return mBundle.containsKey(key);
- }
-
- /**
- * Returns the value associated with the given key, or null if no mapping of
- * the desired type exists for the given key or a null value is explicitly
- * associated with the key.
- *
- * @param key The key the value is stored under
- * @return a CharSequence value, or null
- */
- public CharSequence getText(@TextKey String key) {
- return mBundle.getCharSequence(key);
- }
-
- /**
- * Returns the text value associated with the given key as a String, or null
- * if no mapping of the desired type exists for the given key or a null
- * value is explicitly associated with the key. This is equivalent to
- * calling {@link #getText getText().toString()} if the value is not null.
- *
- * @param key The key the value is stored under
- * @return a String value, or null
- */
- public String getString(@TextKey String key) {
- CharSequence text = getText(key);
- if (text != null) {
- return text.toString();
- }
- return null;
- }
-
- /**
- * Returns the value associated with the given key, or 0L if no long exists
- * for the given key.
- *
- * @param key The key the value is stored under
- * @return a long value
- */
- public long getLong(@LongKey String key) {
- return mBundle.getLong(key, 0);
- }
-
- /**
- * Returns a {@link Rating} for the given key or null if no rating exists
- * for the given key.
- *
- * @param key The key the value is stored under
- * @return A {@link Rating} or null
- */
- public Rating getRating(@RatingKey String key) {
- Rating rating = null;
- try {
- rating = mBundle.getParcelable(key);
- } catch (Exception e) {
- // ignore, value was not a bitmap
- Log.w(TAG, "Failed to retrieve a key as Rating.", e);
- }
- return rating;
- }
-
- /**
- * Returns a {@link Bitmap} for the given key or null if no bitmap exists
- * for the given key.
- *
- * @param key The key the value is stored under
- * @return A {@link Bitmap} or null
- */
- public Bitmap getBitmap(@BitmapKey String key) {
- Bitmap bmp = null;
- try {
- bmp = mBundle.getParcelable(key);
- } catch (Exception e) {
- // ignore, value was not a bitmap
- Log.w(TAG, "Failed to retrieve a key as Bitmap.", e);
- }
- return bmp;
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeBundle(mBundle);
- }
-
- /**
- * Returns the number of fields in this metadata.
- *
- * @return The number of fields in the metadata.
- */
- public int size() {
- return mBundle.size();
- }
-
- /**
- * Returns a Set containing the Strings used as keys in this metadata.
- *
- * @return a Set of String keys
- */
- public Set<String> keySet() {
- return mBundle.keySet();
- }
-
- /**
- * Returns a simple description of this metadata for display purposes.
- *
- * @return A simple description of this metadata.
- */
- public @NonNull MediaDescription getDescription() {
- if (mDescription != null) {
- return mDescription;
- }
-
- String mediaId = getString(METADATA_KEY_MEDIA_ID);
-
- CharSequence[] text = new CharSequence[3];
- Bitmap icon = null;
- Uri iconUri = null;
-
- // First handle the case where display data is set already
- CharSequence displayText = getText(METADATA_KEY_DISPLAY_TITLE);
- if (!TextUtils.isEmpty(displayText)) {
- // If they have a display title use only display data, otherwise use
- // our best bets
- text[0] = displayText;
- text[1] = getText(METADATA_KEY_DISPLAY_SUBTITLE);
- text[2] = getText(METADATA_KEY_DISPLAY_DESCRIPTION);
- } else {
- // Use whatever fields we can
- int textIndex = 0;
- int keyIndex = 0;
- while (textIndex < text.length && keyIndex < PREFERRED_DESCRIPTION_ORDER.length) {
- CharSequence next = getText(PREFERRED_DESCRIPTION_ORDER[keyIndex++]);
- if (!TextUtils.isEmpty(next)) {
- // Fill in the next empty bit of text
- text[textIndex++] = next;
- }
- }
- }
-
- // Get the best art bitmap we can find
- for (int i = 0; i < PREFERRED_BITMAP_ORDER.length; i++) {
- Bitmap next = getBitmap(PREFERRED_BITMAP_ORDER[i]);
- if (next != null) {
- icon = next;
- break;
- }
- }
-
- // Get the best Uri we can find
- for (int i = 0; i < PREFERRED_URI_ORDER.length; i++) {
- String next = getString(PREFERRED_URI_ORDER[i]);
- if (!TextUtils.isEmpty(next)) {
- iconUri = Uri.parse(next);
- break;
- }
- }
-
- Uri mediaUri = null;
- String mediaUriStr = getString(METADATA_KEY_MEDIA_URI);
- if (!TextUtils.isEmpty(mediaUriStr)) {
- mediaUri = Uri.parse(mediaUriStr);
- }
-
- MediaDescription.Builder bob = new MediaDescription.Builder();
- bob.setMediaId(mediaId);
- bob.setTitle(text[0]);
- bob.setSubtitle(text[1]);
- bob.setDescription(text[2]);
- bob.setIconBitmap(icon);
- bob.setIconUri(iconUri);
- bob.setMediaUri(mediaUri);
- if (mBundle.containsKey(METADATA_KEY_BT_FOLDER_TYPE)) {
- Bundle bundle = new Bundle();
- bundle.putLong(MediaDescription.EXTRA_BT_FOLDER_TYPE,
- getLong(METADATA_KEY_BT_FOLDER_TYPE));
- bob.setExtras(bundle);
- }
- mDescription = bob.build();
-
- return mDescription;
- }
-
- /**
- * Helper for getting the String key used by {@link MediaMetadata} from the
- * integer key that {@link MediaMetadataEditor} uses.
- *
- * @param editorKey The key used by the editor
- * @return The key used by this class or null if no mapping exists
- * @hide
- */
- @UnsupportedAppUsage
- public static String getKeyFromMetadataEditorKey(int editorKey) {
- return EDITOR_KEY_MAPPING.get(editorKey, null);
- }
-
- public static final Parcelable.Creator<MediaMetadata> CREATOR =
- new Parcelable.Creator<MediaMetadata>() {
- @Override
- public MediaMetadata createFromParcel(Parcel in) {
- return new MediaMetadata(in);
- }
-
- @Override
- public MediaMetadata[] newArray(int size) {
- return new MediaMetadata[size];
- }
- };
-
- /**
- * Compares the contents of this object to another MediaMetadata object. It
- * does not compare Bitmaps and Ratings as the media player can choose to
- * forgo these fields depending on how you retrieve the MediaMetadata.
- *
- * @param o The Metadata object to compare this object against
- * @return Whether or not the two objects have matching fields (excluding
- * Bitmaps and Ratings)
- */
- @Override
- public boolean equals(Object o) {
- if (o == this) {
- return true;
- }
-
- if (!(o instanceof MediaMetadata)) {
- return false;
- }
-
- final MediaMetadata m = (MediaMetadata) o;
-
- for (int i = 0; i < METADATA_KEYS_TYPE.size(); i++) {
- String key = METADATA_KEYS_TYPE.keyAt(i);
- switch (METADATA_KEYS_TYPE.valueAt(i)) {
- case METADATA_TYPE_TEXT:
- if (!Objects.equals(getString(key), m.getString(key))) {
- return false;
- }
- break;
- case METADATA_TYPE_LONG:
- if (getLong(key) != m.getLong(key)) {
- return false;
- }
- break;
- default:
- // Ignore ratings and bitmaps when comparing
- break;
- }
- }
-
- return true;
- }
-
- @Override
- public int hashCode() {
- int hashCode = 17;
-
- for (int i = 0; i < METADATA_KEYS_TYPE.size(); i++) {
- String key = METADATA_KEYS_TYPE.keyAt(i);
- switch (METADATA_KEYS_TYPE.valueAt(i)) {
- case METADATA_TYPE_TEXT:
- hashCode = 31 * hashCode + Objects.hash(getString(key));
- break;
- case METADATA_TYPE_LONG:
- hashCode = 31 * hashCode + Long.hashCode(getLong(key));
- break;
- default:
- // Ignore ratings and bitmaps when comparing
- break;
- }
- }
-
- return hashCode;
- }
-
- /**
- * Use to build MediaMetadata objects. The system defined metadata keys must
- * use the appropriate data type.
- */
- public static final class Builder {
- private final Bundle mBundle;
-
- /**
- * Create an empty Builder. Any field that should be included in the
- * {@link MediaMetadata} must be added.
- */
- public Builder() {
- mBundle = new Bundle();
- }
-
- /**
- * Create a Builder using a {@link MediaMetadata} instance to set the
- * initial values. All fields in the source metadata will be included in
- * the new metadata. Fields can be overwritten by adding the same key.
- *
- * @param source
- */
- public Builder(MediaMetadata source) {
- mBundle = new Bundle(source.mBundle);
- }
-
- /**
- * Create a Builder using a {@link MediaMetadata} instance to set
- * initial values, but replace bitmaps with a scaled down copy if they
- * are larger than maxBitmapSize.
- *
- * @param source The original metadata to copy.
- * @param maxBitmapSize The maximum height/width for bitmaps contained
- * in the metadata.
- * @hide
- */
- public Builder(MediaMetadata source, int maxBitmapSize) {
- this(source);
- for (String key : mBundle.keySet()) {
- Object value = mBundle.get(key);
- if (value != null && value instanceof Bitmap) {
- Bitmap bmp = (Bitmap) value;
- if (bmp.getHeight() > maxBitmapSize || bmp.getWidth() > maxBitmapSize) {
- putBitmap(key, scaleBitmap(bmp, maxBitmapSize));
- }
- }
- }
- }
-
- /**
- * Put a CharSequence value into the metadata. Custom keys may be used,
- * but if the METADATA_KEYs defined in this class are used they may only
- * be one of the following:
- * <ul>
- * <li>{@link #METADATA_KEY_TITLE}</li>
- * <li>{@link #METADATA_KEY_ARTIST}</li>
- * <li>{@link #METADATA_KEY_ALBUM}</li>
- * <li>{@link #METADATA_KEY_AUTHOR}</li>
- * <li>{@link #METADATA_KEY_WRITER}</li>
- * <li>{@link #METADATA_KEY_COMPOSER}</li>
- * <li>{@link #METADATA_KEY_DATE}</li>
- * <li>{@link #METADATA_KEY_GENRE}</li>
- * <li>{@link #METADATA_KEY_ALBUM_ARTIST}</li>
- * <li>{@link #METADATA_KEY_ART_URI}</li>
- * <li>{@link #METADATA_KEY_ALBUM_ART_URI}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_TITLE}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_SUBTITLE}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_DESCRIPTION}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_ICON_URI}</li>
- * </ul>
- *
- * @param key The key for referencing this value
- * @param value The CharSequence value to store
- * @return The Builder to allow chaining
- */
- public Builder putText(@TextKey String key, CharSequence value) {
- if (METADATA_KEYS_TYPE.containsKey(key)) {
- if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_TEXT) {
- throw new IllegalArgumentException("The " + key
- + " key cannot be used to put a CharSequence");
- }
- }
- mBundle.putCharSequence(key, value);
- return this;
- }
-
- /**
- * Put a String value into the metadata. Custom keys may be used, but if
- * the METADATA_KEYs defined in this class are used they may only be one
- * of the following:
- * <ul>
- * <li>{@link #METADATA_KEY_TITLE}</li>
- * <li>{@link #METADATA_KEY_ARTIST}</li>
- * <li>{@link #METADATA_KEY_ALBUM}</li>
- * <li>{@link #METADATA_KEY_AUTHOR}</li>
- * <li>{@link #METADATA_KEY_WRITER}</li>
- * <li>{@link #METADATA_KEY_COMPOSER}</li>
- * <li>{@link #METADATA_KEY_DATE}</li>
- * <li>{@link #METADATA_KEY_GENRE}</li>
- * <li>{@link #METADATA_KEY_ALBUM_ARTIST}</li>
- * <li>{@link #METADATA_KEY_ART_URI}</li>
- * <li>{@link #METADATA_KEY_ALBUM_ART_URI}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_TITLE}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_SUBTITLE}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_DESCRIPTION}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_ICON_URI}</li>
- * </ul>
- * <p>
- * Uris for artwork should use the content:// style and support
- * {@link ContentResolver#EXTRA_SIZE} for retrieving scaled artwork
- * through {@link ContentResolver#openTypedAssetFileDescriptor(Uri,
- * String, Bundle)}.
- *
- * @param key The key for referencing this value
- * @param value The String value to store
- * @return The Builder to allow chaining
- */
- public Builder putString(@TextKey String key, String value) {
- if (METADATA_KEYS_TYPE.containsKey(key)) {
- if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_TEXT) {
- throw new IllegalArgumentException("The " + key
- + " key cannot be used to put a String");
- }
- }
- mBundle.putCharSequence(key, value);
- return this;
- }
-
- /**
- * Put a long value into the metadata. Custom keys may be used, but if
- * the METADATA_KEYs defined in this class are used they may only be one
- * of the following:
- * <ul>
- * <li>{@link #METADATA_KEY_DURATION}</li>
- * <li>{@link #METADATA_KEY_TRACK_NUMBER}</li>
- * <li>{@link #METADATA_KEY_NUM_TRACKS}</li>
- * <li>{@link #METADATA_KEY_DISC_NUMBER}</li>
- * <li>{@link #METADATA_KEY_YEAR}</li>
- * </ul>
- *
- * @param key The key for referencing this value
- * @param value The long value to store
- * @return The Builder to allow chaining
- */
- public Builder putLong(@LongKey String key, long value) {
- if (METADATA_KEYS_TYPE.containsKey(key)) {
- if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_LONG) {
- throw new IllegalArgumentException("The " + key
- + " key cannot be used to put a long");
- }
- }
- mBundle.putLong(key, value);
- return this;
- }
-
- /**
- * Put a {@link Rating} into the metadata. Custom keys may be used, but
- * if the METADATA_KEYs defined in this class are used they may only be
- * one of the following:
- * <ul>
- * <li>{@link #METADATA_KEY_RATING}</li>
- * <li>{@link #METADATA_KEY_USER_RATING}</li>
- * </ul>
- *
- * @param key The key for referencing this value
- * @param value The Rating value to store
- * @return The Builder to allow chaining
- */
- public Builder putRating(@RatingKey String key, Rating value) {
- if (METADATA_KEYS_TYPE.containsKey(key)) {
- if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_RATING) {
- throw new IllegalArgumentException("The " + key
- + " key cannot be used to put a Rating");
- }
- }
- mBundle.putParcelable(key, value);
- return this;
- }
-
- /**
- * Put a {@link Bitmap} into the metadata. Custom keys may be used, but
- * if the METADATA_KEYs defined in this class are used they may only be
- * one of the following:
- * <ul>
- * <li>{@link #METADATA_KEY_ART}</li>
- * <li>{@link #METADATA_KEY_ALBUM_ART}</li>
- * <li>{@link #METADATA_KEY_DISPLAY_ICON}</li>
- * </ul>
- * <p>
- * Large bitmaps may be scaled down by the system when
- * {@link android.media.session.MediaSession#setMetadata} is called.
- * To pass full resolution images {@link Uri Uris} should be used with
- * {@link #putString}.
- *
- * @param key The key for referencing this value
- * @param value The Bitmap to store
- * @return The Builder to allow chaining
- */
- public Builder putBitmap(@BitmapKey String key, Bitmap value) {
- if (METADATA_KEYS_TYPE.containsKey(key)) {
- if (METADATA_KEYS_TYPE.get(key) != METADATA_TYPE_BITMAP) {
- throw new IllegalArgumentException("The " + key
- + " key cannot be used to put a Bitmap");
- }
- }
- mBundle.putParcelable(key, value);
- return this;
- }
-
- /**
- * Creates a {@link MediaMetadata} instance with the specified fields.
- *
- * @return The new MediaMetadata instance
- */
- public MediaMetadata build() {
- return new MediaMetadata(mBundle);
- }
-
- private Bitmap scaleBitmap(Bitmap bmp, int maxSize) {
- float maxSizeF = maxSize;
- float widthScale = maxSizeF / bmp.getWidth();
- float heightScale = maxSizeF / bmp.getHeight();
- float scale = Math.min(widthScale, heightScale);
- int height = (int) (bmp.getHeight() * scale);
- int width = (int) (bmp.getWidth() * scale);
- return Bitmap.createScaledBitmap(bmp, width, height, true);
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
deleted file mode 100644
index 228ea9c..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.aidl
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (C) 2018, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/** @hide */
-parcelable MediaParceledListSlice;
\ No newline at end of file
diff --git a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java b/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
deleted file mode 100644
index ec3fdb7..0000000
--- a/packages/MediaComponents/apex/java/android/media/MediaParceledListSlice.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.annotation.UnsupportedAppUsage;
-import android.os.Binder;
-import android.os.Build;
-import android.os.IBinder;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.os.RemoteException;
-import android.util.Log;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * Transfer a large list of objects across an IPC. Splits into multiple transactions if needed.
- * Note: Only use classes declared final in order to avoid subclasses overriding reading/writing
- * parcel logic.
- *
- * TODO: Add test for sending large data
- * @hide
- */
-public class MediaParceledListSlice<T extends Parcelable> implements Parcelable {
- private static final String TAG = "MediaParceledListSlice";
- private static final boolean DEBUG = false;
-
- private static final int MAX_IPC_SIZE = 64 * 1024; // IBinder.MAX_IPC_SIZE
-
- final List<T> mList;
-
- public MediaParceledListSlice(List<T> list) {
- if (list == null) {
- throw new IllegalArgumentException("list shouldn't be null");
- }
- mList = list;
- }
-
- MediaParceledListSlice(Parcel p) {
- final int itemCount = p.readInt();
- mList = new ArrayList<>(itemCount);
- if (DEBUG) {
- Log.d(TAG, "Retrieving " + itemCount + " items");
- }
- if (itemCount <= 0) {
- return;
- }
-
- int i = 0;
- while (i < itemCount) {
- if (p.readInt() == 0) {
- break;
- }
-
- final T parcelable = p.readParcelable(null);
- mList.add(parcelable);
-
- if (DEBUG) {
- Log.d(TAG, "Read inline #" + i + ": " + mList.get(mList.size() - 1));
- }
- i++;
- }
- if (i >= itemCount) {
- return;
- }
- final IBinder retriever = p.readStrongBinder();
- while (i < itemCount) {
- if (DEBUG) {
- Log.d(TAG, "Reading more @" + i + " of " + itemCount + ": retriever=" + retriever);
- }
- Parcel data = Parcel.obtain();
- Parcel reply = Parcel.obtain();
- data.writeInt(i);
- try {
- retriever.transact(IBinder.FIRST_CALL_TRANSACTION, data, reply, 0);
- } catch (RemoteException e) {
- Log.w(TAG, "Failure retrieving array; only received " + i + " of " + itemCount, e);
- return;
- }
- while (i < itemCount && reply.readInt() != 0) {
- final T parcelable = reply.readParcelable(null);
- mList.add(parcelable);
-
- if (DEBUG) {
- Log.d(TAG, "Read extra #" + i + ": " + mList.get(mList.size() - 1));
- }
- i++;
- }
- reply.recycle();
- data.recycle();
- }
- }
-
- public List<T> getList() {
- return mList;
- }
-
- /**
- * Write this to another Parcel. Note that this discards the internal Parcel
- * and should not be used anymore. This is so we can pass this to a Binder
- * where we won't have a chance to call recycle on this.
- */
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- final int itemCount = mList.size();
- dest.writeInt(itemCount);
- if (DEBUG) {
- Log.d(TAG, "Writing " + itemCount + " items");
- }
- if (itemCount > 0) {
- int i = 0;
- while (i < itemCount && dest.dataSize() < MAX_IPC_SIZE) {
- dest.writeInt(1);
-
- final T parcelable = mList.get(i);
- dest.writeParcelable(parcelable, flags);
-
- if (DEBUG) {
- Log.d(TAG, "Wrote inline #" + i + ": " + mList.get(i));
- }
- i++;
- }
- if (i < itemCount) {
- dest.writeInt(0);
- Binder retriever = new Binder() {
- @Override
- protected boolean onTransact(int code, Parcel data, Parcel reply, int flags)
- throws RemoteException {
- if (code != FIRST_CALL_TRANSACTION) {
- return super.onTransact(code, data, reply, flags);
- }
- int i = data.readInt();
- if (DEBUG) {
- Log.d(TAG, "Writing more @" + i + " of " + itemCount);
- }
- while (i < itemCount && reply.dataSize() < MAX_IPC_SIZE) {
- reply.writeInt(1);
-
- final T parcelable = mList.get(i);
- reply.writeParcelable(parcelable, flags);
-
- if (DEBUG) {
- Log.d(TAG, "Wrote extra #" + i + ": " + mList.get(i));
- }
- i++;
- }
- if (i < itemCount) {
- if (DEBUG) {
- Log.d(TAG, "Breaking @" + i + " of " + itemCount);
- }
- reply.writeInt(0);
- }
- return true;
- }
- };
- if (DEBUG) {
- Log.d(TAG, "Breaking @" + i + " of " + itemCount + ": retriever=" + retriever);
- }
- dest.writeStrongBinder(retriever);
- }
- }
- }
-
- @Override
- public int describeContents() {
- int contents = 0;
- final List<T> list = getList();
- for (int i = 0; i < list.size(); i++) {
- contents |= list.get(i).describeContents();
- }
- return contents;
- }
-
- public static final Parcelable.Creator<MediaParceledListSlice> CREATOR =
- new Parcelable.Creator<MediaParceledListSlice>() {
- @Override
- public MediaParceledListSlice createFromParcel(Parcel in) {
- return new MediaParceledListSlice(in);
- }
-
- @Override
- public MediaParceledListSlice[] newArray(int size) {
- return new MediaParceledListSlice[size];
- }
- };
-}
diff --git a/packages/MediaComponents/apex/java/android/media/Rating.aidl b/packages/MediaComponents/apex/java/android/media/Rating.aidl
deleted file mode 100644
index 1dc336a..0000000
--- a/packages/MediaComponents/apex/java/android/media/Rating.aidl
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-parcelable Rating;
diff --git a/packages/MediaComponents/apex/java/android/media/Rating.java b/packages/MediaComponents/apex/java/android/media/Rating.java
deleted file mode 100644
index 04d5364..0000000
--- a/packages/MediaComponents/apex/java/android/media/Rating.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.annotation.IntDef;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.util.Log;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-
-/**
- * A class to encapsulate rating information used as content metadata.
- * A rating is defined by its rating style (see {@link #RATING_HEART},
- * {@link #RATING_THUMB_UP_DOWN}, {@link #RATING_3_STARS}, {@link #RATING_4_STARS},
- * {@link #RATING_5_STARS} or {@link #RATING_PERCENTAGE}) and the actual rating value (which may
- * be defined as "unrated"), both of which are defined when the rating instance is constructed
- * through one of the factory methods.
- */
-public final class Rating implements Parcelable {
- private final static String TAG = "Rating";
-
- /**
- * @hide
- */
- @IntDef({RATING_NONE, RATING_HEART, RATING_THUMB_UP_DOWN, RATING_3_STARS, RATING_4_STARS,
- RATING_5_STARS, RATING_PERCENTAGE})
- @Retention(RetentionPolicy.SOURCE)
- public @interface Style {}
-
- /**
- * @hide
- */
- @IntDef({RATING_3_STARS, RATING_4_STARS, RATING_5_STARS})
- @Retention(RetentionPolicy.SOURCE)
- public @interface StarStyle {}
-
- /**
- * Indicates a rating style is not supported. A Rating will never have this
- * type, but can be used by other classes to indicate they do not support
- * Rating.
- */
- public final static int RATING_NONE = 0;
-
- /**
- * A rating style with a single degree of rating, "heart" vs "no heart". Can be used to
- * indicate the content referred to is a favorite (or not).
- */
- public final static int RATING_HEART = 1;
-
- /**
- * A rating style for "thumb up" vs "thumb down".
- */
- public final static int RATING_THUMB_UP_DOWN = 2;
-
- /**
- * A rating style with 0 to 3 stars.
- */
- public final static int RATING_3_STARS = 3;
-
- /**
- * A rating style with 0 to 4 stars.
- */
- public final static int RATING_4_STARS = 4;
-
- /**
- * A rating style with 0 to 5 stars.
- */
- public final static int RATING_5_STARS = 5;
-
- /**
- * A rating style expressed as a percentage.
- */
- public final static int RATING_PERCENTAGE = 6;
-
- private final static float RATING_NOT_RATED = -1.0f;
-
- private final int mRatingStyle;
-
- private final float mRatingValue;
-
- private Rating(@Style int ratingStyle, float rating) {
- mRatingStyle = ratingStyle;
- mRatingValue = rating;
- }
-
- @Override
- public String toString() {
- return "Rating:style=" + mRatingStyle + " rating="
- + (mRatingValue < 0.0f ? "unrated" : String.valueOf(mRatingValue));
- }
-
- @Override
- public int describeContents() {
- return mRatingStyle;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeInt(mRatingStyle);
- dest.writeFloat(mRatingValue);
- }
-
- public static final Parcelable.Creator<Rating> CREATOR
- = new Parcelable.Creator<Rating>() {
- /**
- * Rebuilds a Rating previously stored with writeToParcel().
- * @param p Parcel object to read the Rating from
- * @return a new Rating created from the data in the parcel
- */
- @Override
- public Rating createFromParcel(Parcel p) {
- return new Rating(p.readInt(), p.readFloat());
- }
-
- @Override
- public Rating[] newArray(int size) {
- return new Rating[size];
- }
- };
-
- /**
- * Return a Rating instance with no rating.
- * Create and return a new Rating instance with no rating known for the given
- * rating style.
- * @param ratingStyle one of {@link #RATING_HEART}, {@link #RATING_THUMB_UP_DOWN},
- * {@link #RATING_3_STARS}, {@link #RATING_4_STARS}, {@link #RATING_5_STARS},
- * or {@link #RATING_PERCENTAGE}.
- * @return null if an invalid rating style is passed, a new Rating instance otherwise.
- */
- public static Rating newUnratedRating(@Style int ratingStyle) {
- switch(ratingStyle) {
- case RATING_HEART:
- case RATING_THUMB_UP_DOWN:
- case RATING_3_STARS:
- case RATING_4_STARS:
- case RATING_5_STARS:
- case RATING_PERCENTAGE:
- return new Rating(ratingStyle, RATING_NOT_RATED);
- default:
- return null;
- }
- }
-
- /**
- * Return a Rating instance with a heart-based rating.
- * Create and return a new Rating instance with a rating style of {@link #RATING_HEART},
- * and a heart-based rating.
- * @param hasHeart true for a "heart selected" rating, false for "heart unselected".
- * @return a new Rating instance.
- */
- public static Rating newHeartRating(boolean hasHeart) {
- return new Rating(RATING_HEART, hasHeart ? 1.0f : 0.0f);
- }
-
- /**
- * Return a Rating instance with a thumb-based rating.
- * Create and return a new Rating instance with a {@link #RATING_THUMB_UP_DOWN}
- * rating style, and a "thumb up" or "thumb down" rating.
- * @param thumbIsUp true for a "thumb up" rating, false for "thumb down".
- * @return a new Rating instance.
- */
- public static Rating newThumbRating(boolean thumbIsUp) {
- return new Rating(RATING_THUMB_UP_DOWN, thumbIsUp ? 1.0f : 0.0f);
- }
-
- /**
- * Return a Rating instance with a star-based rating.
- * Create and return a new Rating instance with one of the star-base rating styles
- * and the given integer or fractional number of stars. Non integer values can for instance
- * be used to represent an average rating value, which might not be an integer number of stars.
- * @param starRatingStyle one of {@link #RATING_3_STARS}, {@link #RATING_4_STARS},
- * {@link #RATING_5_STARS}.
- * @param starRating a number ranging from 0.0f to 3.0f, 4.0f or 5.0f according to
- * the rating style.
- * @return null if the rating style is invalid, or the rating is out of range,
- * a new Rating instance otherwise.
- */
- public static Rating newStarRating(@StarStyle int starRatingStyle, float starRating) {
- float maxRating = -1.0f;
- switch(starRatingStyle) {
- case RATING_3_STARS:
- maxRating = 3.0f;
- break;
- case RATING_4_STARS:
- maxRating = 4.0f;
- break;
- case RATING_5_STARS:
- maxRating = 5.0f;
- break;
- default:
- Log.e(TAG, "Invalid rating style (" + starRatingStyle + ") for a star rating");
- return null;
- }
- if ((starRating < 0.0f) || (starRating > maxRating)) {
- Log.e(TAG, "Trying to set out of range star-based rating");
- return null;
- }
- return new Rating(starRatingStyle, starRating);
- }
-
- /**
- * Return a Rating instance with a percentage-based rating.
- * Create and return a new Rating instance with a {@link #RATING_PERCENTAGE}
- * rating style, and a rating of the given percentage.
- * @param percent the value of the rating
- * @return null if the rating is out of range, a new Rating instance otherwise.
- */
- public static Rating newPercentageRating(float percent) {
- if ((percent < 0.0f) || (percent > 100.0f)) {
- Log.e(TAG, "Invalid percentage-based rating value");
- return null;
- } else {
- return new Rating(RATING_PERCENTAGE, percent);
- }
- }
-
- /**
- * Return whether there is a rating value available.
- * @return true if the instance was not created with {@link #newUnratedRating(int)}.
- */
- public boolean isRated() {
- return mRatingValue >= 0.0f;
- }
-
- /**
- * Return the rating style.
- * @return one of {@link #RATING_HEART}, {@link #RATING_THUMB_UP_DOWN},
- * {@link #RATING_3_STARS}, {@link #RATING_4_STARS}, {@link #RATING_5_STARS},
- * or {@link #RATING_PERCENTAGE}.
- */
- @Style
- public int getRatingStyle() {
- return mRatingStyle;
- }
-
- /**
- * Return whether the rating is "heart selected".
- * @return true if the rating is "heart selected", false if the rating is "heart unselected",
- * if the rating style is not {@link #RATING_HEART} or if it is unrated.
- */
- public boolean hasHeart() {
- if (mRatingStyle != RATING_HEART) {
- return false;
- } else {
- return (mRatingValue == 1.0f);
- }
- }
-
- /**
- * Return whether the rating is "thumb up".
- * @return true if the rating is "thumb up", false if the rating is "thumb down",
- * if the rating style is not {@link #RATING_THUMB_UP_DOWN} or if it is unrated.
- */
- public boolean isThumbUp() {
- if (mRatingStyle != RATING_THUMB_UP_DOWN) {
- return false;
- } else {
- return (mRatingValue == 1.0f);
- }
- }
-
- /**
- * Return the star-based rating value.
- * @return a rating value greater or equal to 0.0f, or a negative value if the rating style is
- * not star-based, or if it is unrated.
- */
- public float getStarRating() {
- switch (mRatingStyle) {
- case RATING_3_STARS:
- case RATING_4_STARS:
- case RATING_5_STARS:
- if (isRated()) {
- return mRatingValue;
- }
- default:
- return -1.0f;
- }
- }
-
- /**
- * Return the percentage-based rating value.
- * @return a rating value greater or equal to 0.0f, or a negative value if the rating style is
- * not percentage-based, or if it is unrated.
- */
- public float getPercentRating() {
- if ((mRatingStyle != RATING_PERCENTAGE) || !isRated()) {
- return -1.0f;
- } else {
- return mRatingValue;
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/VolumeProvider.java b/packages/MediaComponents/apex/java/android/media/VolumeProvider.java
deleted file mode 100644
index 1c017c5..0000000
--- a/packages/MediaComponents/apex/java/android/media/VolumeProvider.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-import android.annotation.IntDef;
-import android.media.session.MediaSession;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-
-/**
- * Handles requests to adjust or set the volume on a session. This is also used
- * to push volume updates back to the session. The provider must call
- * {@link #setCurrentVolume(int)} each time the volume being provided changes.
- * <p>
- * You can set a volume provider on a session by calling
- * {@link MediaSession#setPlaybackToRemote}.
- */
-public abstract class VolumeProvider {
-
- /**
- * @hide
- */
- @IntDef({VOLUME_CONTROL_FIXED, VOLUME_CONTROL_RELATIVE, VOLUME_CONTROL_ABSOLUTE})
- @Retention(RetentionPolicy.SOURCE)
- public @interface ControlType {}
-
- /**
- * The volume is fixed and can not be modified. Requests to change volume
- * should be ignored.
- */
- public static final int VOLUME_CONTROL_FIXED = 0;
-
- /**
- * The volume control uses relative adjustment via
- * {@link #onAdjustVolume(int)}. Attempts to set the volume to a specific
- * value should be ignored.
- */
- public static final int VOLUME_CONTROL_RELATIVE = 1;
-
- /**
- * The volume control uses an absolute value. It may be adjusted using
- * {@link #onAdjustVolume(int)} or set directly using
- * {@link #onSetVolumeTo(int)}.
- */
- public static final int VOLUME_CONTROL_ABSOLUTE = 2;
-
- private final int mControlType;
- private final int mMaxVolume;
- private int mCurrentVolume;
- private Callback mCallback;
-
- /**
- * Create a new volume provider for handling volume events. You must specify
- * the type of volume control, the maximum volume that can be used, and the
- * current volume on the output.
- *
- * @param volumeControl The method for controlling volume that is used by
- * this provider.
- * @param maxVolume The maximum allowed volume.
- * @param currentVolume The current volume on the output.
- */
- public VolumeProvider(@ControlType int volumeControl, int maxVolume, int currentVolume) {
- mControlType = volumeControl;
- mMaxVolume = maxVolume;
- mCurrentVolume = currentVolume;
- }
-
- /**
- * Get the volume control type that this volume provider uses.
- *
- * @return The volume control type for this volume provider
- */
- @ControlType
- public final int getVolumeControl() {
- return mControlType;
- }
-
- /**
- * Get the maximum volume this provider allows.
- *
- * @return The max allowed volume.
- */
- public final int getMaxVolume() {
- return mMaxVolume;
- }
-
- /**
- * Gets the current volume. This will be the last value set by
- * {@link #setCurrentVolume(int)}.
- *
- * @return The current volume.
- */
- public final int getCurrentVolume() {
- return mCurrentVolume;
- }
-
- /**
- * Notify the system that the current volume has been changed. This must be
- * called every time the volume changes to ensure it is displayed properly.
- *
- * @param currentVolume The current volume on the output.
- */
- public final void setCurrentVolume(int currentVolume) {
- mCurrentVolume = currentVolume;
- if (mCallback != null) {
- mCallback.onVolumeChanged(this);
- }
- }
-
- /**
- * Override to handle requests to set the volume of the current output.
- * After the volume has been modified {@link #setCurrentVolume} must be
- * called to notify the system.
- *
- * @param volume The volume to set the output to.
- */
- public void onSetVolumeTo(int volume) {
- }
-
- /**
- * Override to handle requests to adjust the volume of the current output.
- * Direction will be one of {@link AudioManager#ADJUST_LOWER},
- * {@link AudioManager#ADJUST_RAISE}, {@link AudioManager#ADJUST_SAME}.
- * After the volume has been modified {@link #setCurrentVolume} must be
- * called to notify the system.
- *
- * @param direction The direction to change the volume in.
- */
- public void onAdjustVolume(int direction) {
- }
-
- /**
- * Sets a callback to receive volume changes.
- * @hide
- */
- public void setCallback(Callback callback) {
- mCallback = callback;
- }
-
- /**
- * Listens for changes to the volume.
- * @hide
- */
- public static abstract class Callback {
- public abstract void onVolumeChanged(VolumeProvider volumeProvider);
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.aidl b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.aidl
deleted file mode 100644
index 782e094..0000000
--- a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.aidl
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media.browse;
-
-parcelable MediaBrowser.MediaItem;
\ No newline at end of file
diff --git a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
deleted file mode 100644
index b1b14c6..0000000
--- a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowser.java
+++ /dev/null
@@ -1,1171 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.browse;
-
-import android.annotation.IntDef;
-import android.annotation.NonNull;
-import android.annotation.Nullable;
-import android.content.ComponentName;
-import android.content.Context;
-import android.content.Intent;
-import android.content.ServiceConnection;
-import android.media.MediaDescription;
-import android.media.MediaParceledListSlice;
-import android.media.session.MediaController;
-import android.media.session.MediaSession;
-import android.os.Binder;
-import android.os.Bundle;
-import android.os.Handler;
-import android.os.IBinder;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.os.RemoteException;
-import android.os.ResultReceiver;
-import android.service.media.IMediaBrowserService;
-import android.service.media.IMediaBrowserServiceCallbacks;
-import android.service.media.MediaBrowserService;
-import android.text.TextUtils;
-import android.util.ArrayMap;
-import android.util.Log;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map.Entry;
-
-/**
- * Browses media content offered by a link MediaBrowserService.
- * <p>
- * This object is not thread-safe. All calls should happen on the thread on which the browser
- * was constructed.
- * </p>
- * <h3>Standard Extra Data</h3>
- *
- * <p>These are the current standard fields that can be used as extra data via
- * {@link #subscribe(String, Bundle, SubscriptionCallback)},
- * {@link #unsubscribe(String, SubscriptionCallback)}, and
- * {@link SubscriptionCallback#onChildrenLoaded(String, List, Bundle)}.
- *
- * <ul>
- * <li> {@link #EXTRA_PAGE}
- * <li> {@link #EXTRA_PAGE_SIZE}
- * </ul>
- */
-public final class MediaBrowser {
- private static final String TAG = "MediaBrowser";
- private static final boolean DBG = false;
-
- /**
- * Used as an int extra field to denote the page number to subscribe.
- * The value of {@code EXTRA_PAGE} should be greater than or equal to 0.
- *
- * @see #EXTRA_PAGE_SIZE
- */
- public static final String EXTRA_PAGE = "android.media.browse.extra.PAGE";
-
- /**
- * Used as an int extra field to denote the number of media items in a page.
- * The value of {@code EXTRA_PAGE_SIZE} should be greater than or equal to 1.
- *
- * @see #EXTRA_PAGE
- */
- public static final String EXTRA_PAGE_SIZE = "android.media.browse.extra.PAGE_SIZE";
-
- private static final int CONNECT_STATE_DISCONNECTING = 0;
- private static final int CONNECT_STATE_DISCONNECTED = 1;
- private static final int CONNECT_STATE_CONNECTING = 2;
- private static final int CONNECT_STATE_CONNECTED = 3;
- private static final int CONNECT_STATE_SUSPENDED = 4;
-
- private final Context mContext;
- private final ComponentName mServiceComponent;
- private final ConnectionCallback mCallback;
- private final Bundle mRootHints;
- private final Handler mHandler = new Handler();
- private final ArrayMap<String, Subscription> mSubscriptions = new ArrayMap<>();
-
- private volatile int mState = CONNECT_STATE_DISCONNECTED;
- private volatile String mRootId;
- private volatile MediaSession.Token mMediaSessionToken;
- private volatile Bundle mExtras;
-
- private MediaServiceConnection mServiceConnection;
- private IMediaBrowserService mServiceBinder;
- private IMediaBrowserServiceCallbacks mServiceCallbacks;
-
- /**
- * Creates a media browser for the specified media browser service.
- *
- * @param context The context.
- * @param serviceComponent The component name of the media browser service.
- * @param callback The connection callback.
- * @param rootHints An optional bundle of service-specific arguments to send
- * to the media browser service when connecting and retrieving the root id
- * for browsing, or null if none. The contents of this bundle may affect
- * the information returned when browsing.
- * @see android.service.media.MediaBrowserService.BrowserRoot#EXTRA_RECENT
- * @see android.service.media.MediaBrowserService.BrowserRoot#EXTRA_OFFLINE
- * @see android.service.media.MediaBrowserService.BrowserRoot#EXTRA_SUGGESTED
- */
- public MediaBrowser(Context context, ComponentName serviceComponent,
- ConnectionCallback callback, Bundle rootHints) {
- if (context == null) {
- throw new IllegalArgumentException("context must not be null");
- }
- if (serviceComponent == null) {
- throw new IllegalArgumentException("service component must not be null");
- }
- if (callback == null) {
- throw new IllegalArgumentException("connection callback must not be null");
- }
- mContext = context;
- mServiceComponent = serviceComponent;
- mCallback = callback;
- mRootHints = rootHints == null ? null : new Bundle(rootHints);
- }
-
- /**
- * Connects to the media browser service.
- * <p>
- * The connection callback specified in the constructor will be invoked
- * when the connection completes or fails.
- * </p>
- */
- public void connect() {
- if (mState != CONNECT_STATE_DISCONNECTING && mState != CONNECT_STATE_DISCONNECTED) {
- throw new IllegalStateException("connect() called while neither disconnecting nor "
- + "disconnected (state=" + getStateLabel(mState) + ")");
- }
-
- mState = CONNECT_STATE_CONNECTING;
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- if (mState == CONNECT_STATE_DISCONNECTING) {
- return;
- }
- mState = CONNECT_STATE_CONNECTING;
- // TODO: remove this extra check.
- if (DBG) {
- if (mServiceConnection != null) {
- throw new RuntimeException("mServiceConnection should be null. Instead it"
- + " is " + mServiceConnection);
- }
- }
- if (mServiceBinder != null) {
- throw new RuntimeException("mServiceBinder should be null. Instead it is "
- + mServiceBinder);
- }
- if (mServiceCallbacks != null) {
- throw new RuntimeException("mServiceCallbacks should be null. Instead it is "
- + mServiceCallbacks);
- }
-
- final Intent intent = new Intent(MediaBrowserService.SERVICE_INTERFACE);
- intent.setComponent(mServiceComponent);
-
- mServiceConnection = new MediaServiceConnection();
-
- boolean bound = false;
- try {
- bound = mContext.bindService(intent, mServiceConnection,
- Context.BIND_AUTO_CREATE);
- } catch (Exception ex) {
- Log.e(TAG, "Failed binding to service " + mServiceComponent);
- }
-
- if (!bound) {
- // Tell them that it didn't work.
- forceCloseConnection();
- mCallback.onConnectionFailed();
- }
-
- if (DBG) {
- Log.d(TAG, "connect...");
- dump();
- }
- }
- });
- }
-
- /**
- * Disconnects from the media browser service.
- * After this, no more callbacks will be received.
- */
- public void disconnect() {
- // It's ok to call this any state, because allowing this lets apps not have
- // to check isConnected() unnecessarily. They won't appreciate the extra
- // assertions for this. We do everything we can here to go back to a sane state.
- mState = CONNECT_STATE_DISCONNECTING;
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- // connect() could be called before this. Then we will disconnect and reconnect.
- if (mServiceCallbacks != null) {
- try {
- mServiceBinder.disconnect(mServiceCallbacks);
- } catch (RemoteException ex) {
- // We are disconnecting anyway. Log, just for posterity but it's not
- // a big problem.
- Log.w(TAG, "RemoteException during connect for " + mServiceComponent);
- }
- }
- int state = mState;
- forceCloseConnection();
- // If the state was not CONNECT_STATE_DISCONNECTING, keep the state so that
- // the operation came after disconnect() can be handled properly.
- if (state != CONNECT_STATE_DISCONNECTING) {
- mState = state;
- }
- if (DBG) {
- Log.d(TAG, "disconnect...");
- dump();
- }
- }
- });
- }
-
- /**
- * Null out the variables and unbind from the service. This doesn't include
- * calling disconnect on the service, because we only try to do that in the
- * clean shutdown cases.
- * <p>
- * Everywhere that calls this EXCEPT for disconnect() should follow it with
- * a call to mCallback.onConnectionFailed(). Disconnect doesn't do that callback
- * for a clean shutdown, but everywhere else is a dirty shutdown and should
- * notify the app.
- * <p>
- * Also, mState should be updated properly. Mostly it should be CONNECT_STATE_DIACONNECTED
- * except for disconnect().
- */
- private void forceCloseConnection() {
- if (mServiceConnection != null) {
- try {
- mContext.unbindService(mServiceConnection);
- } catch (IllegalArgumentException e) {
- if (DBG) {
- Log.d(TAG, "unbindService failed", e);
- }
- }
- }
- mState = CONNECT_STATE_DISCONNECTED;
- mServiceConnection = null;
- mServiceBinder = null;
- mServiceCallbacks = null;
- mRootId = null;
- mMediaSessionToken = null;
- }
-
- /**
- * Returns whether the browser is connected to the service.
- */
- public boolean isConnected() {
- return mState == CONNECT_STATE_CONNECTED;
- }
-
- /**
- * Gets the service component that the media browser is connected to.
- */
- public @NonNull ComponentName getServiceComponent() {
- if (!isConnected()) {
- throw new IllegalStateException("getServiceComponent() called while not connected" +
- " (state=" + mState + ")");
- }
- return mServiceComponent;
- }
-
- /**
- * Gets the root id.
- * <p>
- * Note that the root id may become invalid or change when the
- * browser is disconnected.
- * </p>
- *
- * @throws IllegalStateException if not connected.
- */
- public @NonNull String getRoot() {
- if (!isConnected()) {
- throw new IllegalStateException("getRoot() called while not connected (state="
- + getStateLabel(mState) + ")");
- }
- return mRootId;
- }
-
- /**
- * Gets any extras for the media service.
- *
- * @throws IllegalStateException if not connected.
- */
- public @Nullable Bundle getExtras() {
- if (!isConnected()) {
- throw new IllegalStateException("getExtras() called while not connected (state="
- + getStateLabel(mState) + ")");
- }
- return mExtras;
- }
-
- /**
- * Gets the media session token associated with the media browser.
- * <p>
- * Note that the session token may become invalid or change when the
- * browser is disconnected.
- * </p>
- *
- * @return The session token for the browser, never null.
- *
- * @throws IllegalStateException if not connected.
- */
- public @NonNull MediaSession.Token getSessionToken() {
- if (!isConnected()) {
- throw new IllegalStateException("getSessionToken() called while not connected (state="
- + mState + ")");
- }
- return mMediaSessionToken;
- }
-
- /**
- * Queries for information about the media items that are contained within
- * the specified id and subscribes to receive updates when they change.
- * <p>
- * The list of subscriptions is maintained even when not connected and is
- * restored after the reconnection. It is ok to subscribe while not connected
- * but the results will not be returned until the connection completes.
- * </p>
- * <p>
- * If the id is already subscribed with a different callback then the new
- * callback will replace the previous one and the child data will be
- * reloaded.
- * </p>
- *
- * @param parentId The id of the parent media item whose list of children
- * will be subscribed.
- * @param callback The callback to receive the list of children.
- */
- public void subscribe(@NonNull String parentId, @NonNull SubscriptionCallback callback) {
- subscribeInternal(parentId, null, callback);
- }
-
- /**
- * Queries with service-specific arguments for information about the media items
- * that are contained within the specified id and subscribes to receive updates
- * when they change.
- * <p>
- * The list of subscriptions is maintained even when not connected and is
- * restored after the reconnection. It is ok to subscribe while not connected
- * but the results will not be returned until the connection completes.
- * </p>
- * <p>
- * If the id is already subscribed with a different callback then the new
- * callback will replace the previous one and the child data will be
- * reloaded.
- * </p>
- *
- * @param parentId The id of the parent media item whose list of children
- * will be subscribed.
- * @param options The bundle of service-specific arguments to send to the media
- * browser service. The contents of this bundle may affect the
- * information returned when browsing.
- * @param callback The callback to receive the list of children.
- */
- public void subscribe(@NonNull String parentId, @NonNull Bundle options,
- @NonNull SubscriptionCallback callback) {
- if (options == null) {
- throw new IllegalArgumentException("options cannot be null");
- }
- subscribeInternal(parentId, new Bundle(options), callback);
- }
-
- /**
- * Unsubscribes for changes to the children of the specified media id.
- * <p>
- * The query callback will no longer be invoked for results associated with
- * this id once this method returns.
- * </p>
- *
- * @param parentId The id of the parent media item whose list of children
- * will be unsubscribed.
- */
- public void unsubscribe(@NonNull String parentId) {
- unsubscribeInternal(parentId, null);
- }
-
- /**
- * Unsubscribes for changes to the children of the specified media id through a callback.
- * <p>
- * The query callback will no longer be invoked for results associated with
- * this id once this method returns.
- * </p>
- *
- * @param parentId The id of the parent media item whose list of children
- * will be unsubscribed.
- * @param callback A callback sent to the media browser service to subscribe.
- */
- public void unsubscribe(@NonNull String parentId, @NonNull SubscriptionCallback callback) {
- if (callback == null) {
- throw new IllegalArgumentException("callback cannot be null");
- }
- unsubscribeInternal(parentId, callback);
- }
-
- /**
- * Retrieves a specific {@link MediaItem} from the connected service. Not
- * all services may support this, so falling back to subscribing to the
- * parent's id should be used when unavailable.
- *
- * @param mediaId The id of the item to retrieve.
- * @param cb The callback to receive the result on.
- */
- public void getItem(final @NonNull String mediaId, @NonNull final ItemCallback cb) {
- if (TextUtils.isEmpty(mediaId)) {
- throw new IllegalArgumentException("mediaId cannot be empty.");
- }
- if (cb == null) {
- throw new IllegalArgumentException("cb cannot be null.");
- }
- if (mState != CONNECT_STATE_CONNECTED) {
- Log.i(TAG, "Not connected, unable to retrieve the MediaItem.");
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- cb.onError(mediaId);
- }
- });
- return;
- }
- ResultReceiver receiver = new ResultReceiver(mHandler) {
- @Override
- protected void onReceiveResult(int resultCode, Bundle resultData) {
- if (!isConnected()) {
- return;
- }
- if (resultCode != 0 || resultData == null
- || !resultData.containsKey(MediaBrowserService.KEY_MEDIA_ITEM)) {
- cb.onError(mediaId);
- return;
- }
- Parcelable item = resultData.getParcelable(MediaBrowserService.KEY_MEDIA_ITEM);
- if (item != null && !(item instanceof MediaItem)) {
- cb.onError(mediaId);
- return;
- }
- cb.onItemLoaded((MediaItem)item);
- }
- };
- try {
- mServiceBinder.getMediaItem(mediaId, receiver, mServiceCallbacks);
- } catch (RemoteException e) {
- Log.i(TAG, "Remote error getting media item.");
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- cb.onError(mediaId);
- }
- });
- }
- }
-
- private void subscribeInternal(String parentId, Bundle options, SubscriptionCallback callback) {
- // Check arguments.
- if (TextUtils.isEmpty(parentId)) {
- throw new IllegalArgumentException("parentId cannot be empty.");
- }
- if (callback == null) {
- throw new IllegalArgumentException("callback cannot be null");
- }
- // Update or create the subscription.
- Subscription sub = mSubscriptions.get(parentId);
- if (sub == null) {
- sub = new Subscription();
- mSubscriptions.put(parentId, sub);
- }
- sub.putCallback(mContext, options, callback);
-
- // If we are connected, tell the service that we are watching. If we aren't connected,
- // the service will be told when we connect.
- if (isConnected()) {
- try {
- if (options == null) {
- mServiceBinder.addSubscriptionDeprecated(parentId, mServiceCallbacks);
- }
- mServiceBinder.addSubscription(parentId, callback.mToken, options,
- mServiceCallbacks);
- } catch (RemoteException ex) {
- // Process is crashing. We will disconnect, and upon reconnect we will
- // automatically reregister. So nothing to do here.
- Log.d(TAG, "addSubscription failed with RemoteException parentId=" + parentId);
- }
- }
- }
-
- private void unsubscribeInternal(String parentId, SubscriptionCallback callback) {
- // Check arguments.
- if (TextUtils.isEmpty(parentId)) {
- throw new IllegalArgumentException("parentId cannot be empty.");
- }
-
- Subscription sub = mSubscriptions.get(parentId);
- if (sub == null) {
- return;
- }
- // Tell the service if necessary.
- try {
- if (callback == null) {
- if (isConnected()) {
- mServiceBinder.removeSubscriptionDeprecated(parentId, mServiceCallbacks);
- mServiceBinder.removeSubscription(parentId, null, mServiceCallbacks);
- }
- } else {
- final List<SubscriptionCallback> callbacks = sub.getCallbacks();
- final List<Bundle> optionsList = sub.getOptionsList();
- for (int i = callbacks.size() - 1; i >= 0; --i) {
- if (callbacks.get(i) == callback) {
- if (isConnected()) {
- mServiceBinder.removeSubscription(
- parentId, callback.mToken, mServiceCallbacks);
- }
- callbacks.remove(i);
- optionsList.remove(i);
- }
- }
- }
- } catch (RemoteException ex) {
- // Process is crashing. We will disconnect, and upon reconnect we will
- // automatically reregister. So nothing to do here.
- Log.d(TAG, "removeSubscription failed with RemoteException parentId=" + parentId);
- }
-
- if (sub.isEmpty() || callback == null) {
- mSubscriptions.remove(parentId);
- }
- }
-
- /**
- * For debugging.
- */
- private static String getStateLabel(int state) {
- switch (state) {
- case CONNECT_STATE_DISCONNECTING:
- return "CONNECT_STATE_DISCONNECTING";
- case CONNECT_STATE_DISCONNECTED:
- return "CONNECT_STATE_DISCONNECTED";
- case CONNECT_STATE_CONNECTING:
- return "CONNECT_STATE_CONNECTING";
- case CONNECT_STATE_CONNECTED:
- return "CONNECT_STATE_CONNECTED";
- case CONNECT_STATE_SUSPENDED:
- return "CONNECT_STATE_SUSPENDED";
- default:
- return "UNKNOWN/" + state;
- }
- }
-
- private final void onServiceConnected(final IMediaBrowserServiceCallbacks callback,
- final String root, final MediaSession.Token session, final Bundle extra) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- // Check to make sure there hasn't been a disconnect or a different
- // ServiceConnection.
- if (!isCurrent(callback, "onConnect")) {
- return;
- }
- // Don't allow them to call us twice.
- if (mState != CONNECT_STATE_CONNECTING) {
- Log.w(TAG, "onConnect from service while mState="
- + getStateLabel(mState) + "... ignoring");
- return;
- }
- mRootId = root;
- mMediaSessionToken = session;
- mExtras = extra;
- mState = CONNECT_STATE_CONNECTED;
-
- if (DBG) {
- Log.d(TAG, "ServiceCallbacks.onConnect...");
- dump();
- }
- mCallback.onConnected();
-
- // we may receive some subscriptions before we are connected, so re-subscribe
- // everything now
- for (Entry<String, Subscription> subscriptionEntry : mSubscriptions.entrySet()) {
- String id = subscriptionEntry.getKey();
- Subscription sub = subscriptionEntry.getValue();
- List<SubscriptionCallback> callbackList = sub.getCallbacks();
- List<Bundle> optionsList = sub.getOptionsList();
- for (int i = 0; i < callbackList.size(); ++i) {
- try {
- mServiceBinder.addSubscription(id, callbackList.get(i).mToken,
- optionsList.get(i), mServiceCallbacks);
- } catch (RemoteException ex) {
- // Process is crashing. We will disconnect, and upon reconnect we will
- // automatically reregister. So nothing to do here.
- Log.d(TAG, "addSubscription failed with RemoteException parentId="
- + id);
- }
- }
- }
- }
- });
- }
-
- private final void onConnectionFailed(final IMediaBrowserServiceCallbacks callback) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- Log.e(TAG, "onConnectFailed for " + mServiceComponent);
-
- // Check to make sure there hasn't been a disconnect or a different
- // ServiceConnection.
- if (!isCurrent(callback, "onConnectFailed")) {
- return;
- }
- // Don't allow them to call us twice.
- if (mState != CONNECT_STATE_CONNECTING) {
- Log.w(TAG, "onConnect from service while mState="
- + getStateLabel(mState) + "... ignoring");
- return;
- }
-
- // Clean up
- forceCloseConnection();
-
- // Tell the app.
- mCallback.onConnectionFailed();
- }
- });
- }
-
- private final void onLoadChildren(final IMediaBrowserServiceCallbacks callback,
- final String parentId, final MediaParceledListSlice list, final Bundle options) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- // Check that there hasn't been a disconnect or a different
- // ServiceConnection.
- if (!isCurrent(callback, "onLoadChildren")) {
- return;
- }
-
- if (DBG) {
- Log.d(TAG, "onLoadChildren for " + mServiceComponent + " id=" + parentId);
- }
-
- // Check that the subscription is still subscribed.
- final Subscription subscription = mSubscriptions.get(parentId);
- if (subscription != null) {
- // Tell the app.
- SubscriptionCallback subscriptionCallback =
- subscription.getCallback(mContext, options);
- if (subscriptionCallback != null) {
- List<MediaItem> data = list == null ? null : list.getList();
- if (options == null) {
- if (data == null) {
- subscriptionCallback.onError(parentId);
- } else {
- subscriptionCallback.onChildrenLoaded(parentId, data);
- }
- } else {
- if (data == null) {
- subscriptionCallback.onError(parentId, options);
- } else {
- subscriptionCallback.onChildrenLoaded(parentId, data, options);
- }
- }
- return;
- }
- }
- if (DBG) {
- Log.d(TAG, "onLoadChildren for id that isn't subscribed id=" + parentId);
- }
- }
- });
- }
-
- /**
- * Return true if {@code callback} is the current ServiceCallbacks. Also logs if it's not.
- */
- private boolean isCurrent(IMediaBrowserServiceCallbacks callback, String funcName) {
- if (mServiceCallbacks != callback || mState == CONNECT_STATE_DISCONNECTING
- || mState == CONNECT_STATE_DISCONNECTED) {
- if (mState != CONNECT_STATE_DISCONNECTING && mState != CONNECT_STATE_DISCONNECTED) {
- Log.i(TAG, funcName + " for " + mServiceComponent + " with mServiceConnection="
- + mServiceCallbacks + " this=" + this);
- }
- return false;
- }
- return true;
- }
-
- private ServiceCallbacks getNewServiceCallbacks() {
- return new ServiceCallbacks(this);
- }
-
- /**
- * Log internal state.
- * @hide
- */
- void dump() {
- Log.d(TAG, "MediaBrowser...");
- Log.d(TAG, " mServiceComponent=" + mServiceComponent);
- Log.d(TAG, " mCallback=" + mCallback);
- Log.d(TAG, " mRootHints=" + mRootHints);
- Log.d(TAG, " mState=" + getStateLabel(mState));
- Log.d(TAG, " mServiceConnection=" + mServiceConnection);
- Log.d(TAG, " mServiceBinder=" + mServiceBinder);
- Log.d(TAG, " mServiceCallbacks=" + mServiceCallbacks);
- Log.d(TAG, " mRootId=" + mRootId);
- Log.d(TAG, " mMediaSessionToken=" + mMediaSessionToken);
- }
-
- /**
- * A class with information on a single media item for use in browsing/searching media.
- * MediaItems are application dependent so we cannot guarantee that they contain the
- * right values.
- */
- public static class MediaItem implements Parcelable {
- private final int mFlags;
- private final MediaDescription mDescription;
-
- /** @hide */
- @Retention(RetentionPolicy.SOURCE)
- @IntDef(flag=true, value = { FLAG_BROWSABLE, FLAG_PLAYABLE })
- public @interface Flags { }
-
- /**
- * Flag: Indicates that the item has children of its own.
- */
- public static final int FLAG_BROWSABLE = 1 << 0;
-
- /**
- * Flag: Indicates that the item is playable.
- * <p>
- * The id of this item may be passed to
- * {@link MediaController.TransportControls#playFromMediaId(String, Bundle)}
- * to start playing it.
- * </p>
- */
- public static final int FLAG_PLAYABLE = 1 << 1;
-
- /**
- * Create a new MediaItem for use in browsing media.
- * @param description The description of the media, which must include a
- * media id.
- * @param flags The flags for this item.
- */
- public MediaItem(@NonNull MediaDescription description, @Flags int flags) {
- if (description == null) {
- throw new IllegalArgumentException("description cannot be null");
- }
- if (TextUtils.isEmpty(description.getMediaId())) {
- throw new IllegalArgumentException("description must have a non-empty media id");
- }
- mFlags = flags;
- mDescription = description;
- }
-
- /**
- * Private constructor.
- */
- private MediaItem(Parcel in) {
- mFlags = in.readInt();
- mDescription = MediaDescription.CREATOR.createFromParcel(in);
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel out, int flags) {
- out.writeInt(mFlags);
- mDescription.writeToParcel(out, flags);
- }
-
- @Override
- public String toString() {
- final StringBuilder sb = new StringBuilder("MediaItem{");
- sb.append("mFlags=").append(mFlags);
- sb.append(", mDescription=").append(mDescription);
- sb.append('}');
- return sb.toString();
- }
-
- public static final Parcelable.Creator<MediaItem> CREATOR =
- new Parcelable.Creator<MediaItem>() {
- @Override
- public MediaItem createFromParcel(Parcel in) {
- return new MediaItem(in);
- }
-
- @Override
- public MediaItem[] newArray(int size) {
- return new MediaItem[size];
- }
- };
-
- /**
- * Gets the flags of the item.
- */
- public @Flags int getFlags() {
- return mFlags;
- }
-
- /**
- * Returns whether this item is browsable.
- * @see #FLAG_BROWSABLE
- */
- public boolean isBrowsable() {
- return (mFlags & FLAG_BROWSABLE) != 0;
- }
-
- /**
- * Returns whether this item is playable.
- * @see #FLAG_PLAYABLE
- */
- public boolean isPlayable() {
- return (mFlags & FLAG_PLAYABLE) != 0;
- }
-
- /**
- * Returns the description of the media.
- */
- public @NonNull MediaDescription getDescription() {
- return mDescription;
- }
-
- /**
- * Returns the media id in the {@link MediaDescription} for this item.
- * @see android.media.MediaMetadata#METADATA_KEY_MEDIA_ID
- */
- public @Nullable String getMediaId() {
- return mDescription.getMediaId();
- }
- }
-
- /**
- * Callbacks for connection related events.
- */
- public static class ConnectionCallback {
- /**
- * Invoked after {@link MediaBrowser#connect()} when the request has successfully completed.
- */
- public void onConnected() {
- }
-
- /**
- * Invoked when the client is disconnected from the media browser.
- */
- public void onConnectionSuspended() {
- }
-
- /**
- * Invoked when the connection to the media browser failed.
- */
- public void onConnectionFailed() {
- }
- }
-
- /**
- * Callbacks for subscription related events.
- */
- public static abstract class SubscriptionCallback {
- Binder mToken;
-
- public SubscriptionCallback() {
- mToken = new Binder();
- }
-
- /**
- * Called when the list of children is loaded or updated.
- *
- * @param parentId The media id of the parent media item.
- * @param children The children which were loaded.
- */
- public void onChildrenLoaded(@NonNull String parentId, @NonNull List<MediaItem> children) {
- }
-
- /**
- * Called when the list of children is loaded or updated.
- *
- * @param parentId The media id of the parent media item.
- * @param children The children which were loaded.
- * @param options The bundle of service-specific arguments sent to the media
- * browser service. The contents of this bundle may affect the
- * information returned when browsing.
- */
- public void onChildrenLoaded(@NonNull String parentId, @NonNull List<MediaItem> children,
- @NonNull Bundle options) {
- }
-
- /**
- * Called when the id doesn't exist or other errors in subscribing.
- * <p>
- * If this is called, the subscription remains until {@link MediaBrowser#unsubscribe}
- * called, because some errors may heal themselves.
- * </p>
- *
- * @param parentId The media id of the parent media item whose children could
- * not be loaded.
- */
- public void onError(@NonNull String parentId) {
- }
-
- /**
- * Called when the id doesn't exist or other errors in subscribing.
- * <p>
- * If this is called, the subscription remains until {@link MediaBrowser#unsubscribe}
- * called, because some errors may heal themselves.
- * </p>
- *
- * @param parentId The media id of the parent media item whose children could
- * not be loaded.
- * @param options The bundle of service-specific arguments sent to the media
- * browser service.
- */
- public void onError(@NonNull String parentId, @NonNull Bundle options) {
- }
- }
-
- /**
- * Callback for receiving the result of {@link #getItem}.
- */
- public static abstract class ItemCallback {
- /**
- * Called when the item has been returned by the connected service.
- *
- * @param item The item that was returned or null if it doesn't exist.
- */
- public void onItemLoaded(MediaItem item) {
- }
-
- /**
- * Called there was an error retrieving it or the connected service doesn't support
- * {@link #getItem}.
- *
- * @param mediaId The media id of the media item which could not be loaded.
- */
- public void onError(@NonNull String mediaId) {
- }
- }
-
- /**
- * ServiceConnection to the other app.
- */
- private class MediaServiceConnection implements ServiceConnection {
- @Override
- public void onServiceConnected(final ComponentName name, final IBinder binder) {
- postOrRun(new Runnable() {
- @Override
- public void run() {
- if (DBG) {
- Log.d(TAG, "MediaServiceConnection.onServiceConnected name=" + name
- + " binder=" + binder);
- dump();
- }
-
- // Make sure we are still the current connection, and that they haven't called
- // disconnect().
- if (!isCurrent("onServiceConnected")) {
- return;
- }
-
- // Save their binder
- mServiceBinder = IMediaBrowserService.Stub.asInterface(binder);
-
- // We make a new mServiceCallbacks each time we connect so that we can drop
- // responses from previous connections.
- mServiceCallbacks = getNewServiceCallbacks();
- mState = CONNECT_STATE_CONNECTING;
-
- // Call connect, which is async. When we get a response from that we will
- // say that we're connected.
- try {
- if (DBG) {
- Log.d(TAG, "ServiceCallbacks.onConnect...");
- dump();
- }
- mServiceBinder.connect(mContext.getPackageName(), mRootHints,
- mServiceCallbacks);
- } catch (RemoteException ex) {
- // Connect failed, which isn't good. But the auto-reconnect on the service
- // will take over and we will come back. We will also get the
- // onServiceDisconnected, which has all the cleanup code. So let that do
- // it.
- Log.w(TAG, "RemoteException during connect for " + mServiceComponent);
- if (DBG) {
- Log.d(TAG, "ServiceCallbacks.onConnect...");
- dump();
- }
- }
- }
- });
- }
-
- @Override
- public void onServiceDisconnected(final ComponentName name) {
- postOrRun(new Runnable() {
- @Override
- public void run() {
- if (DBG) {
- Log.d(TAG, "MediaServiceConnection.onServiceDisconnected name=" + name
- + " this=" + this + " mServiceConnection=" + mServiceConnection);
- dump();
- }
-
- // Make sure we are still the current connection, and that they haven't called
- // disconnect().
- if (!isCurrent("onServiceDisconnected")) {
- return;
- }
-
- // Clear out what we set in onServiceConnected
- mServiceBinder = null;
- mServiceCallbacks = null;
-
- // And tell the app that it's suspended.
- mState = CONNECT_STATE_SUSPENDED;
- mCallback.onConnectionSuspended();
- }
- });
- }
-
- private void postOrRun(Runnable r) {
- if (Thread.currentThread() == mHandler.getLooper().getThread()) {
- r.run();
- } else {
- mHandler.post(r);
- }
- }
-
- /**
- * Return true if this is the current ServiceConnection. Also logs if it's not.
- */
- private boolean isCurrent(String funcName) {
- if (mServiceConnection != this || mState == CONNECT_STATE_DISCONNECTING
- || mState == CONNECT_STATE_DISCONNECTED) {
- if (mState != CONNECT_STATE_DISCONNECTING && mState != CONNECT_STATE_DISCONNECTED) {
- // Check mState, because otherwise this log is noisy.
- Log.i(TAG, funcName + " for " + mServiceComponent + " with mServiceConnection="
- + mServiceConnection + " this=" + this);
- }
- return false;
- }
- return true;
- }
- }
-
- /**
- * Callbacks from the service.
- */
- private static class ServiceCallbacks extends IMediaBrowserServiceCallbacks.Stub {
- private WeakReference<MediaBrowser> mMediaBrowser;
-
- public ServiceCallbacks(MediaBrowser mediaBrowser) {
- mMediaBrowser = new WeakReference<MediaBrowser>(mediaBrowser);
- }
-
- /**
- * The other side has acknowledged our connection. The parameters to this function
- * are the initial data as requested.
- */
- @Override
- public void onConnect(String root, MediaSession.Token session,
- final Bundle extras) {
- MediaBrowser mediaBrowser = mMediaBrowser.get();
- if (mediaBrowser != null) {
- mediaBrowser.onServiceConnected(this, root, session, extras);
- }
- }
-
- /**
- * The other side does not like us. Tell the app via onConnectionFailed.
- */
- @Override
- public void onConnectFailed() {
- MediaBrowser mediaBrowser = mMediaBrowser.get();
- if (mediaBrowser != null) {
- mediaBrowser.onConnectionFailed(this);
- }
- }
-
- @Override
- public void onLoadChildren(String parentId, MediaParceledListSlice list) {
- onLoadChildrenWithOptions(parentId, list, null);
- }
-
- @Override
- public void onLoadChildrenWithOptions(String parentId, MediaParceledListSlice list,
- final Bundle options) {
- MediaBrowser mediaBrowser = mMediaBrowser.get();
- if (mediaBrowser != null) {
- mediaBrowser.onLoadChildren(this, parentId, list, options);
- }
- }
- }
-
- private static class Subscription {
- private final List<SubscriptionCallback> mCallbacks;
- private final List<Bundle> mOptionsList;
-
- public Subscription() {
- mCallbacks = new ArrayList<>();
- mOptionsList = new ArrayList<>();
- }
-
- public boolean isEmpty() {
- return mCallbacks.isEmpty();
- }
-
- public List<Bundle> getOptionsList() {
- return mOptionsList;
- }
-
- public List<SubscriptionCallback> getCallbacks() {
- return mCallbacks;
- }
-
- public SubscriptionCallback getCallback(Context context, Bundle options) {
- if (options != null) {
- options.setClassLoader(context.getClassLoader());
- }
- for (int i = 0; i < mOptionsList.size(); ++i) {
- if (MediaBrowserUtils.areSameOptions(mOptionsList.get(i), options)) {
- return mCallbacks.get(i);
- }
- }
- return null;
- }
-
- public void putCallback(Context context, Bundle options, SubscriptionCallback callback) {
- if (options != null) {
- options.setClassLoader(context.getClassLoader());
- }
- for (int i = 0; i < mOptionsList.size(); ++i) {
- if (MediaBrowserUtils.areSameOptions(mOptionsList.get(i), options)) {
- mCallbacks.set(i, callback);
- return;
- }
- }
- mCallbacks.add(callback);
- mOptionsList.add(options);
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowserUtils.java b/packages/MediaComponents/apex/java/android/media/browse/MediaBrowserUtils.java
deleted file mode 100644
index 2943e60..0000000
--- a/packages/MediaComponents/apex/java/android/media/browse/MediaBrowserUtils.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.browse;
-
-import android.os.Bundle;
-
-/**
- * @hide
- */
-public class MediaBrowserUtils {
- public static boolean areSameOptions(Bundle options1, Bundle options2) {
- if (options1 == options2) {
- return true;
- } else if (options1 == null) {
- return options2.getInt(MediaBrowser.EXTRA_PAGE, -1) == -1
- && options2.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1) == -1;
- } else if (options2 == null) {
- return options1.getInt(MediaBrowser.EXTRA_PAGE, -1) == -1
- && options1.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1) == -1;
- } else {
- return options1.getInt(MediaBrowser.EXTRA_PAGE, -1)
- == options2.getInt(MediaBrowser.EXTRA_PAGE, -1)
- && options1.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1)
- == options2.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1);
- }
- }
-
- public static boolean hasDuplicatedItems(Bundle options1, Bundle options2) {
- int page1 = options1 == null ? -1 : options1.getInt(MediaBrowser.EXTRA_PAGE, -1);
- int page2 = options2 == null ? -1 : options2.getInt(MediaBrowser.EXTRA_PAGE, -1);
- int pageSize1 = options1 == null ? -1 : options1.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1);
- int pageSize2 = options2 == null ? -1 : options2.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1);
-
- int startIndex1, startIndex2, endIndex1, endIndex2;
- if (page1 == -1 || pageSize1 == -1) {
- startIndex1 = 0;
- endIndex1 = Integer.MAX_VALUE;
- } else {
- startIndex1 = pageSize1 * page1;
- endIndex1 = startIndex1 + pageSize1 - 1;
- }
-
- if (page2 == -1 || pageSize2 == -1) {
- startIndex2 = 0;
- endIndex2 = Integer.MAX_VALUE;
- } else {
- startIndex2 = pageSize2 * page2;
- endIndex2 = startIndex2 + pageSize2 - 1;
- }
-
- if (startIndex1 <= startIndex2 && startIndex2 <= endIndex1) {
- return true;
- } else if (startIndex1 <= endIndex2 && endIndex2 <= endIndex1) {
- return true;
- }
- return false;
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/IActiveSessionsListener.aidl b/packages/MediaComponents/apex/java/android/media/session/IActiveSessionsListener.aidl
deleted file mode 100644
index 4b9e4bd..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/IActiveSessionsListener.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.media.session.MediaSession;
-
-/**
- * Listens for changes to the list of active sessions.
- * @hide
- */
-oneway interface IActiveSessionsListener {
- void onActiveSessionsChanged(in List<MediaSession.Token> sessions);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ICallback.aidl b/packages/MediaComponents/apex/java/android/media/session/ICallback.aidl
deleted file mode 100644
index 322bffa..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ICallback.aidl
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.app.PendingIntent;
-import android.content.ComponentName;
-import android.media.session.MediaSession;
-import android.view.KeyEvent;
-
-/**
- * @hide
- */
-oneway interface ICallback {
- void onMediaKeyEventDispatchedToMediaSession(in KeyEvent event,
- in MediaSession.Token sessionToken);
- void onMediaKeyEventDispatchedToMediaButtonReceiver(in KeyEvent event,
- in ComponentName mediaButtonReceiver);
-
- void onAddressedPlayerChangedToMediaSession(in MediaSession.Token sessionToken);
- void onAddressedPlayerChangedToMediaButtonReceiver(in ComponentName mediaButtonReceiver);
-}
-
diff --git a/packages/MediaComponents/apex/java/android/media/session/IOnMediaKeyListener.aidl b/packages/MediaComponents/apex/java/android/media/session/IOnMediaKeyListener.aidl
deleted file mode 100644
index aa98ea3..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/IOnMediaKeyListener.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.os.ResultReceiver;
-import android.view.KeyEvent;
-
-/**
- * Listener to handle media key.
- * @hide
- */
-oneway interface IOnMediaKeyListener {
- void onMediaKey(in KeyEvent event, in ResultReceiver result);
-}
-
diff --git a/packages/MediaComponents/apex/java/android/media/session/IOnVolumeKeyLongPressListener.aidl b/packages/MediaComponents/apex/java/android/media/session/IOnVolumeKeyLongPressListener.aidl
deleted file mode 100644
index 07b8347..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/IOnVolumeKeyLongPressListener.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.view.KeyEvent;
-
-/**
- * Listener to handle volume key long-press.
- * @hide
- */
-oneway interface IOnVolumeKeyLongPressListener {
- void onVolumeKeyLongPress(in KeyEvent event);
-}
-
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl b/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
deleted file mode 100644
index 14b1c64..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ISession.aidl
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.app.PendingIntent;
-//import android.media.AudioAttributes;
-import android.media.MediaMetadata;
-import android.media.MediaParceledListSlice;
-import android.media.session.ISessionController;
-import android.media.session.PlaybackState;
-import android.media.session.MediaSession;
-import android.os.Bundle;
-import android.os.ResultReceiver;
-
-/**
- * Interface to a MediaSession in the system.
- * @hide
- */
-interface ISession {
- void sendEvent(String event, in Bundle data);
- ISessionController getController();
- void setFlags(int flags);
- void setActive(boolean active);
- void setMediaButtonReceiver(in PendingIntent mbr);
- void setLaunchPendingIntent(in PendingIntent pi);
- void destroy();
-
- // These commands are for the TransportPerformer
- void setMetadata(in MediaMetadata metadata, long duration, String metadataDescription);
- void setPlaybackState(in PlaybackState state);
- void setQueue(in MediaParceledListSlice queue);
- void setQueueTitle(CharSequence title);
- void setExtras(in Bundle extras);
- void setRatingType(int type);
-
- // These commands relate to volume handling
- //TODO(b/119751592): Decide if AudioAttributes should be updated.
- //void setPlaybackToLocal(in AudioAttributes attributes);
- void setPlaybackToRemote(int control, int max);
- void setCurrentVolume(int currentVolume);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionCallback.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionCallback.aidl
deleted file mode 100644
index 626338d..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionCallback.aidl
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.content.Intent;
-import android.media.Rating;
-import android.media.session.ISessionControllerCallback;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.ResultReceiver;
-
-/**
- * @hide
- */
-oneway interface ISessionCallback {
- void onCommand(String packageName, int pid, int uid, ISessionControllerCallback caller,
- String command, in Bundle args, in ResultReceiver cb);
- void onMediaButton(String packageName, int pid, int uid, in Intent mediaButtonIntent,
- int sequenceNumber, in ResultReceiver cb);
- void onMediaButtonFromController(String packageName, int pid, int uid,
- ISessionControllerCallback caller, in Intent mediaButtonIntent);
-
- // These callbacks are for the TransportPerformer
- void onPrepare(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onPrepareFromMediaId(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String mediaId, in Bundle extras);
- void onPrepareFromSearch(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String query, in Bundle extras);
- void onPrepareFromUri(String packageName, int pid, int uid, ISessionControllerCallback caller,
- in Uri uri, in Bundle extras);
- void onPlay(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onPlayFromMediaId(String packageName, int pid, int uid, ISessionControllerCallback caller,
- String mediaId, in Bundle extras);
- void onPlayFromSearch(String packageName, int pid, int uid, ISessionControllerCallback caller,
- String query, in Bundle extras);
- void onPlayFromUri(String packageName, int pid, int uid, ISessionControllerCallback caller,
- in Uri uri, in Bundle extras);
- void onSkipToTrack(String packageName, int pid, int uid, ISessionControllerCallback caller,
- long id);
- void onPause(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onStop(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onNext(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onPrevious(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onFastForward(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onRewind(String packageName, int pid, int uid, ISessionControllerCallback caller);
- void onSeekTo(String packageName, int pid, int uid, ISessionControllerCallback caller,
- long pos);
- void onRate(String packageName, int pid, int uid, ISessionControllerCallback caller,
- in Rating rating);
- void onCustomAction(String packageName, int pid, int uid, ISessionControllerCallback caller,
- String action, in Bundle args);
-
- // These callbacks are for volume handling
- void onAdjustVolume(String packageName, int pid, int uid, ISessionControllerCallback caller,
- int direction);
- void onSetVolumeTo(String packageName, int pid, int uid,
- ISessionControllerCallback caller, int value);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
deleted file mode 100644
index 433b12f..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionController.aidl
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.app.PendingIntent;
-import android.content.Intent;
-import android.media.MediaMetadata;
-import android.media.MediaParceledListSlice;
-import android.media.Rating;
-import android.media.session.ISessionControllerCallback;
-import android.media.session.MediaSession;
-import android.media.session.ParcelableVolumeInfo;
-import android.media.session.PlaybackState;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.ResultReceiver;
-import android.view.KeyEvent;
-
-import java.util.List;
-
-/**
- * Interface to MediaSessionRecord in the system.
- * @hide
- */
-interface ISessionController {
- void sendCommand(String packageName, ISessionControllerCallback caller,
- String command, in Bundle args, in ResultReceiver cb);
- boolean sendMediaButton(String packageName, ISessionControllerCallback caller,
- boolean asSystemService, in KeyEvent mediaButton);
- void registerCallbackListener(String packageName, ISessionControllerCallback cb);
- void unregisterCallbackListener(ISessionControllerCallback cb);
- boolean isTransportControlEnabled();
- String getPackageName();
- String getTag();
- PendingIntent getLaunchPendingIntent();
- long getFlags();
- ParcelableVolumeInfo getVolumeAttributes();
- void adjustVolume(String packageName, String opPackageName, ISessionControllerCallback caller,
- boolean asSystemService, int direction, int flags);
- void setVolumeTo(String packageName, String opPackageName, ISessionControllerCallback caller,
- int value, int flags);
-
- // These commands are for the TransportControls
- void prepare(String packageName, ISessionControllerCallback caller);
- void prepareFromMediaId(String packageName, ISessionControllerCallback caller,
- String mediaId, in Bundle extras);
- void prepareFromSearch(String packageName, ISessionControllerCallback caller,
- String string, in Bundle extras);
- void prepareFromUri(String packageName, ISessionControllerCallback caller,
- in Uri uri, in Bundle extras);
- void play(String packageName, ISessionControllerCallback caller);
- void playFromMediaId(String packageName, ISessionControllerCallback caller,
- String mediaId, in Bundle extras);
- void playFromSearch(String packageName, ISessionControllerCallback caller,
- String string, in Bundle extras);
- void playFromUri(String packageName, ISessionControllerCallback caller,
- in Uri uri, in Bundle extras);
- void skipToQueueItem(String packageName, ISessionControllerCallback caller, long id);
- void pause(String packageName, ISessionControllerCallback caller);
- void stop(String packageName, ISessionControllerCallback caller);
- void next(String packageName, ISessionControllerCallback caller);
- void previous(String packageName, ISessionControllerCallback caller);
- void fastForward(String packageName, ISessionControllerCallback caller);
- void rewind(String packageName, ISessionControllerCallback caller);
- void seekTo(String packageName, ISessionControllerCallback caller, long pos);
- void rate(String packageName, ISessionControllerCallback caller, in Rating rating);
- void sendCustomAction(String packageName, ISessionControllerCallback caller,
- String action, in Bundle args);
- MediaMetadata getMetadata();
- PlaybackState getPlaybackState();
- MediaParceledListSlice getQueue();
- CharSequence getQueueTitle();
- Bundle getExtras();
- int getRatingType();
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
deleted file mode 100644
index f5cc4f6..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionControllerCallback.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.media.MediaMetadata;
-import android.media.MediaParceledListSlice;
-import android.media.session.ParcelableVolumeInfo;
-import android.media.session.PlaybackState;
-import android.media.session.MediaSession;
-import android.os.Bundle;
-
-/**
- * @hide
- */
-oneway interface ISessionControllerCallback {
- void onEvent(String event, in Bundle extras);
- void onSessionDestroyed();
-
- // These callbacks are for the TransportController
- void onPlaybackStateChanged(in PlaybackState state);
- void onMetadataChanged(in MediaMetadata metadata);
- void onQueueChanged(in MediaParceledListSlice queue);
- void onQueueTitleChanged(CharSequence title);
- void onExtrasChanged(in Bundle extras);
- void onVolumeInfoChanged(in ParcelableVolumeInfo info);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl b/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl
deleted file mode 100644
index d6c226f..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ISessionManager.aidl
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.content.ComponentName;
-import android.media.IRemoteVolumeController;
-import android.media.session.IActiveSessionsListener;
-import android.media.session.ICallback;
-import android.media.session.IOnMediaKeyListener;
-import android.media.session.IOnVolumeKeyLongPressListener;
-import android.media.session.ISession;
-import android.media.session.ISessionCallback;
-import android.os.Bundle;
-import android.view.KeyEvent;
-
-/**
- * Interface to the MediaSessionManagerService
- * @hide
- */
-interface ISessionManager {
- ISession createSession(String packageName, in ISessionCallback cb, String tag, int userId);
- List<IBinder> getSessions(in ComponentName compName, int userId);
- void dispatchMediaKeyEvent(String packageName, boolean asSystemService, in KeyEvent keyEvent,
- boolean needWakeLock);
- void dispatchVolumeKeyEvent(String packageName, String opPackageName, boolean asSystemService,
- in KeyEvent keyEvent, int stream, boolean musicOnly);
- void dispatchAdjustVolume(String packageName, String opPackageName, int suggestedStream,
- int delta, int flags);
- void addSessionsListener(in IActiveSessionsListener listener, in ComponentName compName,
- int userId);
- void removeSessionsListener(in IActiveSessionsListener listener);
-
- // This is for the system volume UI only
- void setRemoteVolumeController(in IRemoteVolumeController rvc);
-
- // For PhoneWindowManager to precheck media keys
- boolean isGlobalPriorityActive();
-
- void setCallback(in ICallback callback);
- void setOnVolumeKeyLongPressListener(in IOnVolumeKeyLongPressListener listener);
- void setOnMediaKeyListener(in IOnMediaKeyListener listener);
-
- // MediaSession2
- boolean isTrusted(String controllerPackageName, int controllerPid, int controllerUid);
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaController.java b/packages/MediaComponents/apex/java/android/media/session/MediaController.java
deleted file mode 100644
index 65682a8..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/MediaController.java
+++ /dev/null
@@ -1,1190 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.annotation.NonNull;
-import android.annotation.Nullable;
-import android.annotation.UnsupportedAppUsage;
-import android.app.PendingIntent;
-import android.content.Context;
-import android.media.AudioAttributes;
-import android.media.AudioManager;
-import android.media.MediaMetadata;
-import android.media.MediaParceledListSlice;
-import android.media.Rating;
-import android.media.VolumeProvider;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.Handler;
-import android.os.Looper;
-import android.os.Message;
-import android.os.RemoteException;
-import android.os.ResultReceiver;
-import android.text.TextUtils;
-import android.util.Log;
-import android.view.KeyEvent;
-
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Allows an app to interact with an ongoing media session. Media buttons and
- * other commands can be sent to the session. A callback may be registered to
- * receive updates from the session, such as metadata and play state changes.
- * <p>
- * A MediaController can be created through {@link MediaSessionManager} if you
- * hold the "android.permission.MEDIA_CONTENT_CONTROL" permission or are an
- * enabled notification listener or by getting a {@link MediaSession.Token}
- * directly from the session owner.
- * <p>
- * MediaController objects are thread-safe.
- */
-public final class MediaController {
- private static final String TAG = "MediaController";
-
- private static final int MSG_EVENT = 1;
- private static final int MSG_UPDATE_PLAYBACK_STATE = 2;
- private static final int MSG_UPDATE_METADATA = 3;
- private static final int MSG_UPDATE_VOLUME = 4;
- private static final int MSG_UPDATE_QUEUE = 5;
- private static final int MSG_UPDATE_QUEUE_TITLE = 6;
- private static final int MSG_UPDATE_EXTRAS = 7;
- private static final int MSG_DESTROYED = 8;
-
- private final ISessionController mSessionBinder;
-
- private final MediaSession.Token mToken;
- private final Context mContext;
- private final CallbackStub mCbStub = new CallbackStub(this);
- private final ArrayList<MessageHandler> mCallbacks = new ArrayList<MessageHandler>();
- private final Object mLock = new Object();
-
- private boolean mCbRegistered = false;
- private String mPackageName;
- private String mTag;
-
- private final TransportControls mTransportControls;
-
- /**
- * Call for creating a MediaController directly from a binder. Should only
- * be used by framework code.
- *
- * @hide
- */
- public MediaController(Context context, ISessionController sessionBinder) {
- if (sessionBinder == null) {
- throw new IllegalArgumentException("Session token cannot be null");
- }
- if (context == null) {
- throw new IllegalArgumentException("Context cannot be null");
- }
- mSessionBinder = sessionBinder;
- mTransportControls = new TransportControls();
- mToken = new MediaSession.Token(sessionBinder);
- mContext = context;
- }
-
- /**
- * Create a new MediaController from a session's token.
- *
- * @param context The caller's context.
- * @param token The token for the session.
- */
- public MediaController(@NonNull Context context, @NonNull MediaSession.Token token) {
- this(context, token.getBinder());
- }
-
- /**
- * Get a {@link TransportControls} instance to send transport actions to
- * the associated session.
- *
- * @return A transport controls instance.
- */
- public @NonNull TransportControls getTransportControls() {
- return mTransportControls;
- }
-
- /**
- * Send the specified media button event to the session. Only media keys can
- * be sent by this method, other keys will be ignored.
- *
- * @param keyEvent The media button event to dispatch.
- * @return true if the event was sent to the session, false otherwise.
- */
- public boolean dispatchMediaButtonEvent(@NonNull KeyEvent keyEvent) {
- return dispatchMediaButtonEventInternal(false, keyEvent);
- }
-
- /**
- * Dispatches the media button event as system service to the session.
- * <p>
- * Should be only called by the {@link com.android.internal.policy.PhoneWindow} when the
- * foreground activity didn't consume the key from the hardware devices.
- *
- * @param keyEvent media key event
- * @return {@code true} if the event was sent to the session, {@code false} otherwise
- * @hide
- */
- public boolean dispatchMediaButtonEventAsSystemService(@NonNull KeyEvent keyEvent) {
- return dispatchMediaButtonEventInternal(true, keyEvent);
- }
-
- private boolean dispatchMediaButtonEventInternal(boolean asSystemService,
- @NonNull KeyEvent keyEvent) {
- if (keyEvent == null) {
- throw new IllegalArgumentException("KeyEvent may not be null");
- }
- if (!KeyEvent.isMediaSessionKey(keyEvent.getKeyCode())) {
- return false;
- }
- try {
- return mSessionBinder.sendMediaButton(mContext.getPackageName(), mCbStub,
- asSystemService, keyEvent);
- } catch (RemoteException e) {
- // System is dead. =(
- }
- return false;
- }
-
- /**
- * Dispatches the volume button event as system service to the session.
- * <p>
- * Should be only called by the {@link com.android.internal.policy.PhoneWindow} when the
- * foreground activity didn't consume the key from the hardware devices.
- *
- * @param keyEvent volume key event
- * @hide
- */
- public void dispatchVolumeButtonEventAsSystemService(@NonNull KeyEvent keyEvent) {
- switch (keyEvent.getAction()) {
- case KeyEvent.ACTION_DOWN: {
- int direction = 0;
- switch (keyEvent.getKeyCode()) {
- case KeyEvent.KEYCODE_VOLUME_UP:
- direction = AudioManager.ADJUST_RAISE;
- break;
- case KeyEvent.KEYCODE_VOLUME_DOWN:
- direction = AudioManager.ADJUST_LOWER;
- break;
- case KeyEvent.KEYCODE_VOLUME_MUTE:
- direction = AudioManager.ADJUST_TOGGLE_MUTE;
- break;
- }
- try {
- mSessionBinder.adjustVolume(mContext.getPackageName(),
- mContext.getOpPackageName(), mCbStub, true, direction,
- AudioManager.FLAG_SHOW_UI);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling adjustVolumeBy", e);
- }
- }
-
- case KeyEvent.ACTION_UP: {
- final int flags = AudioManager.FLAG_PLAY_SOUND | AudioManager.FLAG_VIBRATE
- | AudioManager.FLAG_FROM_KEY;
- try {
- mSessionBinder.adjustVolume(mContext.getPackageName(),
- mContext.getOpPackageName(), mCbStub, true, 0, flags);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling adjustVolumeBy", e);
- }
- }
- }
- }
-
- /**
- * Get the current playback state for this session.
- *
- * @return The current PlaybackState or null
- */
- public @Nullable PlaybackState getPlaybackState() {
- try {
- return mSessionBinder.getPlaybackState();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getPlaybackState.", e);
- return null;
- }
- }
-
- /**
- * Get the current metadata for this session.
- *
- * @return The current MediaMetadata or null.
- */
- public @Nullable MediaMetadata getMetadata() {
- try {
- return mSessionBinder.getMetadata();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getMetadata.", e);
- return null;
- }
- }
-
- /**
- * Get the current play queue for this session if one is set. If you only
- * care about the current item {@link #getMetadata()} should be used.
- *
- * @return The current play queue or null.
- */
- public @Nullable List<MediaSession.QueueItem> getQueue() {
- try {
- MediaParceledListSlice queue = mSessionBinder.getQueue();
- if (queue != null) {
- return queue.getList();
- }
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getQueue.", e);
- }
- return null;
- }
-
- /**
- * Get the queue title for this session.
- */
- public @Nullable CharSequence getQueueTitle() {
- try {
- return mSessionBinder.getQueueTitle();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getQueueTitle", e);
- }
- return null;
- }
-
- /**
- * Get the extras for this session.
- */
- public @Nullable Bundle getExtras() {
- try {
- return mSessionBinder.getExtras();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getExtras", e);
- }
- return null;
- }
-
- /**
- * Get the rating type supported by the session. One of:
- * <ul>
- * <li>{@link Rating#RATING_NONE}</li>
- * <li>{@link Rating#RATING_HEART}</li>
- * <li>{@link Rating#RATING_THUMB_UP_DOWN}</li>
- * <li>{@link Rating#RATING_3_STARS}</li>
- * <li>{@link Rating#RATING_4_STARS}</li>
- * <li>{@link Rating#RATING_5_STARS}</li>
- * <li>{@link Rating#RATING_PERCENTAGE}</li>
- * </ul>
- *
- * @return The supported rating type
- */
- public int getRatingType() {
- try {
- return mSessionBinder.getRatingType();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getRatingType.", e);
- return Rating.RATING_NONE;
- }
- }
-
- /**
- * Get the flags for this session. Flags are defined in {@link MediaSession}.
- *
- * @return The current set of flags for the session.
- */
- public @MediaSession.SessionFlags long getFlags() {
- try {
- return mSessionBinder.getFlags();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getFlags.", e);
- }
- return 0;
- }
-
- /**
- * Get the current playback info for this session.
- *
- * @return The current playback info or null.
- */
- public @Nullable PlaybackInfo getPlaybackInfo() {
- try {
- ParcelableVolumeInfo result = mSessionBinder.getVolumeAttributes();
- return new PlaybackInfo(result.volumeType, result.audioAttrs, result.controlType,
- result.maxVolume, result.currentVolume);
-
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getAudioInfo.", e);
- }
- return null;
- }
-
- /**
- * Get an intent for launching UI associated with this session if one
- * exists.
- *
- * @return A {@link PendingIntent} to launch UI or null.
- */
- public @Nullable PendingIntent getSessionActivity() {
- try {
- return mSessionBinder.getLaunchPendingIntent();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling getPendingIntent.", e);
- }
- return null;
- }
-
- /**
- * Get the token for the session this is connected to.
- *
- * @return The token for the connected session.
- */
- public @NonNull MediaSession.Token getSessionToken() {
- return mToken;
- }
-
- /**
- * Set the volume of the output this session is playing on. The command will
- * be ignored if it does not support
- * {@link VolumeProvider#VOLUME_CONTROL_ABSOLUTE}. The flags in
- * {@link AudioManager} may be used to affect the handling.
- *
- * @see #getPlaybackInfo()
- * @param value The value to set it to, between 0 and the reported max.
- * @param flags Flags from {@link AudioManager} to include with the volume
- * request.
- */
- public void setVolumeTo(int value, int flags) {
- try {
- mSessionBinder.setVolumeTo(mContext.getPackageName(), mContext.getOpPackageName(),
- mCbStub, value, flags);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling setVolumeTo.", e);
- }
- }
-
- /**
- * Adjust the volume of the output this session is playing on. The direction
- * must be one of {@link AudioManager#ADJUST_LOWER},
- * {@link AudioManager#ADJUST_RAISE}, or {@link AudioManager#ADJUST_SAME}.
- * The command will be ignored if the session does not support
- * {@link VolumeProvider#VOLUME_CONTROL_RELATIVE} or
- * {@link VolumeProvider#VOLUME_CONTROL_ABSOLUTE}. The flags in
- * {@link AudioManager} may be used to affect the handling.
- *
- * @see #getPlaybackInfo()
- * @param direction The direction to adjust the volume in.
- * @param flags Any flags to pass with the command.
- */
- public void adjustVolume(int direction, int flags) {
- try {
- mSessionBinder.adjustVolume(mContext.getPackageName(), mContext.getOpPackageName(),
- mCbStub, false, direction, flags);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling adjustVolumeBy.", e);
- }
- }
-
- /**
- * Registers a callback to receive updates from the Session. Updates will be
- * posted on the caller's thread.
- *
- * @param callback The callback object, must not be null.
- */
- public void registerCallback(@NonNull Callback callback) {
- registerCallback(callback, null);
- }
-
- /**
- * Registers a callback to receive updates from the session. Updates will be
- * posted on the specified handler's thread.
- *
- * @param callback The callback object, must not be null.
- * @param handler The handler to post updates on. If null the callers thread
- * will be used.
- */
- public void registerCallback(@NonNull Callback callback, @Nullable Handler handler) {
- if (callback == null) {
- throw new IllegalArgumentException("callback must not be null");
- }
- if (handler == null) {
- handler = new Handler();
- }
- synchronized (mLock) {
- addCallbackLocked(callback, handler);
- }
- }
-
- /**
- * Unregisters the specified callback. If an update has already been posted
- * you may still receive it after calling this method.
- *
- * @param callback The callback to remove.
- */
- public void unregisterCallback(@NonNull Callback callback) {
- if (callback == null) {
- throw new IllegalArgumentException("callback must not be null");
- }
- synchronized (mLock) {
- removeCallbackLocked(callback);
- }
- }
-
- /**
- * Sends a generic command to the session. It is up to the session creator
- * to decide what commands and parameters they will support. As such,
- * commands should only be sent to sessions that the controller owns.
- *
- * @param command The command to send
- * @param args Any parameters to include with the command
- * @param cb The callback to receive the result on
- */
- public void sendCommand(@NonNull String command, @Nullable Bundle args,
- @Nullable ResultReceiver cb) {
- if (TextUtils.isEmpty(command)) {
- throw new IllegalArgumentException("command cannot be null or empty");
- }
- try {
- mSessionBinder.sendCommand(mContext.getPackageName(), mCbStub, command, args, cb);
- } catch (RemoteException e) {
- Log.d(TAG, "Dead object in sendCommand.", e);
- }
- }
-
- /**
- * Get the session owner's package name.
- *
- * @return The package name of of the session owner.
- */
- public String getPackageName() {
- if (mPackageName == null) {
- try {
- mPackageName = mSessionBinder.getPackageName();
- } catch (RemoteException e) {
- Log.d(TAG, "Dead object in getPackageName.", e);
- }
- }
- return mPackageName;
- }
-
- /**
- * Get the session's tag for debugging purposes.
- *
- * @return The session's tag.
- * @hide
- */
- public String getTag() {
- if (mTag == null) {
- try {
- mTag = mSessionBinder.getTag();
- } catch (RemoteException e) {
- Log.d(TAG, "Dead object in getTag.", e);
- }
- }
- return mTag;
- }
-
- /*
- * @hide
- */
- ISessionController getSessionBinder() {
- return mSessionBinder;
- }
-
- /**
- * @hide
- */
- @UnsupportedAppUsage
- public boolean controlsSameSession(MediaController other) {
- if (other == null) return false;
- return mSessionBinder.asBinder() == other.getSessionBinder().asBinder();
- }
-
- private void addCallbackLocked(Callback cb, Handler handler) {
- if (getHandlerForCallbackLocked(cb) != null) {
- Log.w(TAG, "Callback is already added, ignoring");
- return;
- }
- MessageHandler holder = new MessageHandler(handler.getLooper(), cb);
- mCallbacks.add(holder);
- holder.mRegistered = true;
-
- if (!mCbRegistered) {
- try {
- mSessionBinder.registerCallbackListener(mContext.getPackageName(), mCbStub);
- mCbRegistered = true;
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in registerCallback", e);
- }
- }
- }
-
- private boolean removeCallbackLocked(Callback cb) {
- boolean success = false;
- for (int i = mCallbacks.size() - 1; i >= 0; i--) {
- MessageHandler handler = mCallbacks.get(i);
- if (cb == handler.mCallback) {
- mCallbacks.remove(i);
- success = true;
- handler.mRegistered = false;
- }
- }
- if (mCbRegistered && mCallbacks.size() == 0) {
- try {
- mSessionBinder.unregisterCallbackListener(mCbStub);
- } catch (RemoteException e) {
- Log.e(TAG, "Dead object in removeCallbackLocked");
- }
- mCbRegistered = false;
- }
- return success;
- }
-
- private MessageHandler getHandlerForCallbackLocked(Callback cb) {
- if (cb == null) {
- throw new IllegalArgumentException("Callback cannot be null");
- }
- for (int i = mCallbacks.size() - 1; i >= 0; i--) {
- MessageHandler handler = mCallbacks.get(i);
- if (cb == handler.mCallback) {
- return handler;
- }
- }
- return null;
- }
-
- private final void postMessage(int what, Object obj, Bundle data) {
- synchronized (mLock) {
- for (int i = mCallbacks.size() - 1; i >= 0; i--) {
- mCallbacks.get(i).post(what, obj, data);
- }
- }
- }
-
- /**
- * Callback for receiving updates from the session. A Callback can be
- * registered using {@link #registerCallback}.
- */
- public static abstract class Callback {
- /**
- * Override to handle the session being destroyed. The session is no
- * longer valid after this call and calls to it will be ignored.
- */
- public void onSessionDestroyed() {
- }
-
- /**
- * Override to handle custom events sent by the session owner without a
- * specified interface. Controllers should only handle these for
- * sessions they own.
- *
- * @param event The event from the session.
- * @param extras Optional parameters for the event, may be null.
- */
- public void onSessionEvent(@NonNull String event, @Nullable Bundle extras) {
- }
-
- /**
- * Override to handle changes in playback state.
- *
- * @param state The new playback state of the session
- */
- public void onPlaybackStateChanged(@Nullable PlaybackState state) {
- }
-
- /**
- * Override to handle changes to the current metadata.
- *
- * @param metadata The current metadata for the session or null if none.
- * @see MediaMetadata
- */
- public void onMetadataChanged(@Nullable MediaMetadata metadata) {
- }
-
- /**
- * Override to handle changes to items in the queue.
- *
- * @param queue A list of items in the current play queue. It should
- * include the currently playing item as well as previous and
- * upcoming items if applicable.
- * @see MediaSession.QueueItem
- */
- public void onQueueChanged(@Nullable List<MediaSession.QueueItem> queue) {
- }
-
- /**
- * Override to handle changes to the queue title.
- *
- * @param title The title that should be displayed along with the play queue such as
- * "Now Playing". May be null if there is no such title.
- */
- public void onQueueTitleChanged(@Nullable CharSequence title) {
- }
-
- /**
- * Override to handle changes to the {@link MediaSession} extras.
- *
- * @param extras The extras that can include other information associated with the
- * {@link MediaSession}.
- */
- public void onExtrasChanged(@Nullable Bundle extras) {
- }
-
- /**
- * Override to handle changes to the audio info.
- *
- * @param info The current audio info for this session.
- */
- public void onAudioInfoChanged(PlaybackInfo info) {
- }
- }
-
- /**
- * Interface for controlling media playback on a session. This allows an app
- * to send media transport commands to the session.
- */
- public final class TransportControls {
- private static final String TAG = "TransportController";
-
- private TransportControls() {
- }
-
- /**
- * Request that the player prepare its playback. In other words, other sessions can continue
- * to play during the preparation of this session. This method can be used to speed up the
- * start of the playback. Once the preparation is done, the session will change its playback
- * state to {@link PlaybackState#STATE_PAUSED}. Afterwards, {@link #play} can be called to
- * start playback.
- */
- public void prepare() {
- try {
- mSessionBinder.prepare(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling prepare.", e);
- }
- }
-
- /**
- * Request that the player prepare playback for a specific media id. In other words, other
- * sessions can continue to play during the preparation of this session. This method can be
- * used to speed up the start of the playback. Once the preparation is done, the session
- * will change its playback state to {@link PlaybackState#STATE_PAUSED}. Afterwards,
- * {@link #play} can be called to start playback. If the preparation is not needed,
- * {@link #playFromMediaId} can be directly called without this method.
- *
- * @param mediaId The id of the requested media.
- * @param extras Optional extras that can include extra information about the media item
- * to be prepared.
- */
- public void prepareFromMediaId(String mediaId, Bundle extras) {
- if (TextUtils.isEmpty(mediaId)) {
- throw new IllegalArgumentException(
- "You must specify a non-empty String for prepareFromMediaId.");
- }
- try {
- mSessionBinder.prepareFromMediaId(mContext.getPackageName(), mCbStub, mediaId,
- extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling prepare(" + mediaId + ").", e);
- }
- }
-
- /**
- * Request that the player prepare playback for a specific search query. An empty or null
- * query should be treated as a request to prepare any music. In other words, other sessions
- * can continue to play during the preparation of this session. This method can be used to
- * speed up the start of the playback. Once the preparation is done, the session will
- * change its playback state to {@link PlaybackState#STATE_PAUSED}. Afterwards,
- * {@link #play} can be called to start playback. If the preparation is not needed,
- * {@link #playFromSearch} can be directly called without this method.
- *
- * @param query The search query.
- * @param extras Optional extras that can include extra information
- * about the query.
- */
- public void prepareFromSearch(String query, Bundle extras) {
- if (query == null) {
- // This is to remain compatible with
- // INTENT_ACTION_MEDIA_PLAY_FROM_SEARCH
- query = "";
- }
- try {
- mSessionBinder.prepareFromSearch(mContext.getPackageName(), mCbStub, query,
- extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling prepare(" + query + ").", e);
- }
- }
-
- /**
- * Request that the player prepare playback for a specific {@link Uri}. In other words,
- * other sessions can continue to play during the preparation of this session. This method
- * can be used to speed up the start of the playback. Once the preparation is done, the
- * session will change its playback state to {@link PlaybackState#STATE_PAUSED}. Afterwards,
- * {@link #play} can be called to start playback. If the preparation is not needed,
- * {@link #playFromUri} can be directly called without this method.
- *
- * @param uri The URI of the requested media.
- * @param extras Optional extras that can include extra information about the media item
- * to be prepared.
- */
- public void prepareFromUri(Uri uri, Bundle extras) {
- if (uri == null || Uri.EMPTY.equals(uri)) {
- throw new IllegalArgumentException(
- "You must specify a non-empty Uri for prepareFromUri.");
- }
- try {
- mSessionBinder.prepareFromUri(mContext.getPackageName(), mCbStub, uri, extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling prepare(" + uri + ").", e);
- }
- }
-
- /**
- * Request that the player start its playback at its current position.
- */
- public void play() {
- try {
- mSessionBinder.play(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling play.", e);
- }
- }
-
- /**
- * Request that the player start playback for a specific media id.
- *
- * @param mediaId The id of the requested media.
- * @param extras Optional extras that can include extra information about the media item
- * to be played.
- */
- public void playFromMediaId(String mediaId, Bundle extras) {
- if (TextUtils.isEmpty(mediaId)) {
- throw new IllegalArgumentException(
- "You must specify a non-empty String for playFromMediaId.");
- }
- try {
- mSessionBinder.playFromMediaId(mContext.getPackageName(), mCbStub, mediaId,
- extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling play(" + mediaId + ").", e);
- }
- }
-
- /**
- * Request that the player start playback for a specific search query.
- * An empty or null query should be treated as a request to play any
- * music.
- *
- * @param query The search query.
- * @param extras Optional extras that can include extra information
- * about the query.
- */
- public void playFromSearch(String query, Bundle extras) {
- if (query == null) {
- // This is to remain compatible with
- // INTENT_ACTION_MEDIA_PLAY_FROM_SEARCH
- query = "";
- }
- try {
- mSessionBinder.playFromSearch(mContext.getPackageName(), mCbStub, query, extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling play(" + query + ").", e);
- }
- }
-
- /**
- * Request that the player start playback for a specific {@link Uri}.
- *
- * @param uri The URI of the requested media.
- * @param extras Optional extras that can include extra information about the media item
- * to be played.
- */
- public void playFromUri(Uri uri, Bundle extras) {
- if (uri == null || Uri.EMPTY.equals(uri)) {
- throw new IllegalArgumentException(
- "You must specify a non-empty Uri for playFromUri.");
- }
- try {
- mSessionBinder.playFromUri(mContext.getPackageName(), mCbStub, uri, extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling play(" + uri + ").", e);
- }
- }
-
- /**
- * Play an item with a specific id in the play queue. If you specify an
- * id that is not in the play queue, the behavior is undefined.
- */
- public void skipToQueueItem(long id) {
- try {
- mSessionBinder.skipToQueueItem(mContext.getPackageName(), mCbStub, id);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling skipToItem(" + id + ").", e);
- }
- }
-
- /**
- * Request that the player pause its playback and stay at its current
- * position.
- */
- public void pause() {
- try {
- mSessionBinder.pause(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling pause.", e);
- }
- }
-
- /**
- * Request that the player stop its playback; it may clear its state in
- * whatever way is appropriate.
- */
- public void stop() {
- try {
- mSessionBinder.stop(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling stop.", e);
- }
- }
-
- /**
- * Move to a new location in the media stream.
- *
- * @param pos Position to move to, in milliseconds.
- */
- public void seekTo(long pos) {
- try {
- mSessionBinder.seekTo(mContext.getPackageName(), mCbStub, pos);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling seekTo.", e);
- }
- }
-
- /**
- * Start fast forwarding. If playback is already fast forwarding this
- * may increase the rate.
- */
- public void fastForward() {
- try {
- mSessionBinder.fastForward(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling fastForward.", e);
- }
- }
-
- /**
- * Skip to the next item.
- */
- public void skipToNext() {
- try {
- mSessionBinder.next(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling next.", e);
- }
- }
-
- /**
- * Start rewinding. If playback is already rewinding this may increase
- * the rate.
- */
- public void rewind() {
- try {
- mSessionBinder.rewind(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling rewind.", e);
- }
- }
-
- /**
- * Skip to the previous item.
- */
- public void skipToPrevious() {
- try {
- mSessionBinder.previous(mContext.getPackageName(), mCbStub);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling previous.", e);
- }
- }
-
- /**
- * Rate the current content. This will cause the rating to be set for
- * the current user. The Rating type must match the type returned by
- * {@link #getRatingType()}.
- *
- * @param rating The rating to set for the current content
- */
- public void setRating(Rating rating) {
- try {
- mSessionBinder.rate(mContext.getPackageName(), mCbStub, rating);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error calling rate.", e);
- }
- }
-
- /**
- * Send a custom action back for the {@link MediaSession} to perform.
- *
- * @param customAction The action to perform.
- * @param args Optional arguments to supply to the {@link MediaSession} for this
- * custom action.
- */
- public void sendCustomAction(@NonNull PlaybackState.CustomAction customAction,
- @Nullable Bundle args) {
- if (customAction == null) {
- throw new IllegalArgumentException("CustomAction cannot be null.");
- }
- sendCustomAction(customAction.getAction(), args);
- }
-
- /**
- * Send the id and args from a custom action back for the {@link MediaSession} to perform.
- *
- * @see #sendCustomAction(PlaybackState.CustomAction action, Bundle args)
- * @param action The action identifier of the {@link PlaybackState.CustomAction} as
- * specified by the {@link MediaSession}.
- * @param args Optional arguments to supply to the {@link MediaSession} for this
- * custom action.
- */
- public void sendCustomAction(@NonNull String action, @Nullable Bundle args) {
- if (TextUtils.isEmpty(action)) {
- throw new IllegalArgumentException("CustomAction cannot be null.");
- }
- try {
- mSessionBinder.sendCustomAction(mContext.getPackageName(), mCbStub, action, args);
- } catch (RemoteException e) {
- Log.d(TAG, "Dead object in sendCustomAction.", e);
- }
- }
- }
-
- /**
- * Holds information about the current playback and how audio is handled for
- * this session.
- */
- public static final class PlaybackInfo {
- /**
- * The session uses remote playback.
- */
- public static final int PLAYBACK_TYPE_REMOTE = 2;
- /**
- * The session uses local playback.
- */
- public static final int PLAYBACK_TYPE_LOCAL = 1;
-
- private final int mVolumeType;
- private final int mVolumeControl;
- private final int mMaxVolume;
- private final int mCurrentVolume;
- private final AudioAttributes mAudioAttrs;
-
- /**
- * @hide
- */
- public PlaybackInfo(int type, AudioAttributes attrs, int control, int max, int current) {
- mVolumeType = type;
- mAudioAttrs = attrs;
- mVolumeControl = control;
- mMaxVolume = max;
- mCurrentVolume = current;
- }
-
- /**
- * Get the type of playback which affects volume handling. One of:
- * <ul>
- * <li>{@link #PLAYBACK_TYPE_LOCAL}</li>
- * <li>{@link #PLAYBACK_TYPE_REMOTE}</li>
- * </ul>
- *
- * @return The type of playback this session is using.
- */
- public int getPlaybackType() {
- return mVolumeType;
- }
-
- /**
- * Get the audio attributes for this session. The attributes will affect
- * volume handling for the session. When the volume type is
- * {@link PlaybackInfo#PLAYBACK_TYPE_REMOTE} these may be ignored by the
- * remote volume handler.
- *
- * @return The attributes for this session.
- */
- public AudioAttributes getAudioAttributes() {
- return mAudioAttrs;
- }
-
- /**
- * Get the type of volume control that can be used. One of:
- * <ul>
- * <li>{@link VolumeProvider#VOLUME_CONTROL_ABSOLUTE}</li>
- * <li>{@link VolumeProvider#VOLUME_CONTROL_RELATIVE}</li>
- * <li>{@link VolumeProvider#VOLUME_CONTROL_FIXED}</li>
- * </ul>
- *
- * @return The type of volume control that may be used with this
- * session.
- */
- public int getVolumeControl() {
- return mVolumeControl;
- }
-
- /**
- * Get the maximum volume that may be set for this session.
- *
- * @return The maximum allowed volume where this session is playing.
- */
- public int getMaxVolume() {
- return mMaxVolume;
- }
-
- /**
- * Get the current volume for this session.
- *
- * @return The current volume where this session is playing.
- */
- public int getCurrentVolume() {
- return mCurrentVolume;
- }
- }
-
- private final static class CallbackStub extends ISessionControllerCallback.Stub {
- private final WeakReference<MediaController> mController;
-
- public CallbackStub(MediaController controller) {
- mController = new WeakReference<MediaController>(controller);
- }
-
- @Override
- public void onSessionDestroyed() {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_DESTROYED, null, null);
- }
- }
-
- @Override
- public void onEvent(String event, Bundle extras) {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_EVENT, event, extras);
- }
- }
-
- @Override
- public void onPlaybackStateChanged(PlaybackState state) {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_UPDATE_PLAYBACK_STATE, state, null);
- }
- }
-
- @Override
- public void onMetadataChanged(MediaMetadata metadata) {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_UPDATE_METADATA, metadata, null);
- }
- }
-
- @Override
- public void onQueueChanged(MediaParceledListSlice parceledQueue) {
- List<MediaSession.QueueItem> queue = parceledQueue == null ? null : parceledQueue
- .getList();
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_UPDATE_QUEUE, queue, null);
- }
- }
-
- @Override
- public void onQueueTitleChanged(CharSequence title) {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_UPDATE_QUEUE_TITLE, title, null);
- }
- }
-
- @Override
- public void onExtrasChanged(Bundle extras) {
- MediaController controller = mController.get();
- if (controller != null) {
- controller.postMessage(MSG_UPDATE_EXTRAS, extras, null);
- }
- }
-
- @Override
- public void onVolumeInfoChanged(ParcelableVolumeInfo pvi) {
- MediaController controller = mController.get();
- if (controller != null) {
- PlaybackInfo info = new PlaybackInfo(pvi.volumeType, pvi.audioAttrs,
- pvi.controlType, pvi.maxVolume, pvi.currentVolume);
- controller.postMessage(MSG_UPDATE_VOLUME, info, null);
- }
- }
-
- }
-
- private final static class MessageHandler extends Handler {
- private final MediaController.Callback mCallback;
- private boolean mRegistered = false;
-
- public MessageHandler(Looper looper, MediaController.Callback cb) {
- super(looper);
- mCallback = cb;
- }
-
- @Override
- public void handleMessage(Message msg) {
- if (!mRegistered) {
- return;
- }
- switch (msg.what) {
- case MSG_EVENT:
- mCallback.onSessionEvent((String) msg.obj, msg.getData());
- break;
- case MSG_UPDATE_PLAYBACK_STATE:
- mCallback.onPlaybackStateChanged((PlaybackState) msg.obj);
- break;
- case MSG_UPDATE_METADATA:
- mCallback.onMetadataChanged((MediaMetadata) msg.obj);
- break;
- case MSG_UPDATE_QUEUE:
- mCallback.onQueueChanged((List<MediaSession.QueueItem>) msg.obj);
- break;
- case MSG_UPDATE_QUEUE_TITLE:
- mCallback.onQueueTitleChanged((CharSequence) msg.obj);
- break;
- case MSG_UPDATE_EXTRAS:
- mCallback.onExtrasChanged((Bundle) msg.obj);
- break;
- case MSG_UPDATE_VOLUME:
- mCallback.onAudioInfoChanged((PlaybackInfo) msg.obj);
- break;
- case MSG_DESTROYED:
- mCallback.onSessionDestroyed();
- break;
- }
- }
-
- public void post(int what, Object obj, Bundle data) {
- Message msg = obtainMessage(what, obj);
- msg.setAsynchronous(true);
- msg.setData(data);
- msg.sendToTarget();
- }
- }
-
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.aidl b/packages/MediaComponents/apex/java/android/media/session/MediaSession.aidl
deleted file mode 100644
index f657cef..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.aidl
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media.session;
-
-parcelable MediaSession.Token;
-parcelable MediaSession.QueueItem;
\ No newline at end of file
diff --git a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java b/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
deleted file mode 100644
index 73e16a6..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/MediaSession.java
+++ /dev/null
@@ -1,1570 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media.session;
-
-import android.annotation.IntDef;
-import android.annotation.NonNull;
-import android.annotation.Nullable;
-import android.annotation.UnsupportedAppUsage;
-import android.app.Activity;
-import android.app.PendingIntent;
-import android.content.Context;
-import android.content.Intent;
-import android.media.AudioAttributes;
-import android.media.MediaDescription;
-import android.media.MediaMetadata;
-import android.media.MediaParceledListSlice;
-import android.media.Rating;
-import android.media.VolumeProvider;
-import android.media.session.MediaSessionManager.RemoteUserInfo;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.Handler;
-import android.os.Looper;
-import android.os.Message;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.os.RemoteException;
-import android.os.ResultReceiver;
-import android.os.UserHandle;
-import android.service.media.MediaBrowserService;
-import android.text.TextUtils;
-import android.util.Log;
-import android.util.Pair;
-import android.view.KeyEvent;
-import android.view.ViewConfiguration;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.ref.WeakReference;
-import java.util.List;
-import java.util.Objects;
-
-/**
- * Allows interaction with media controllers, volume keys, media buttons, and
- * transport controls.
- * <p>
- * A MediaSession should be created when an app wants to publish media playback
- * information or handle media keys. In general an app only needs one session
- * for all playback, though multiple sessions can be created to provide finer
- * grain controls of media.
- * <p>
- * Once a session is created the owner of the session may pass its
- * {@link #getSessionToken() session token} to other processes to allow them to
- * create a {@link MediaController} to interact with the session.
- * <p>
- * To receive commands, media keys, and other events a {@link Callback} must be
- * set with {@link #setCallback(Callback)} and {@link #setActive(boolean)
- * setActive(true)} must be called.
- * <p>
- * When an app is finished performing playback it must call {@link #release()}
- * to clean up the session and notify any controllers.
- * <p>
- * MediaSession objects are thread safe.
- */
-public final class MediaSession {
- private static final String TAG = "MediaSession";
-
- /**
- * Set this flag on the session to indicate that it can handle media button
- * events.
- * @deprecated This flag is no longer used. All media sessions are expected to handle media
- * button events now.
- */
- @Deprecated
- public static final int FLAG_HANDLES_MEDIA_BUTTONS = 1 << 0;
-
- /**
- * Set this flag on the session to indicate that it handles transport
- * control commands through its {@link Callback}.
- * @deprecated This flag is no longer used. All media sessions are expected to handle transport
- * controls now.
- */
- @Deprecated
- public static final int FLAG_HANDLES_TRANSPORT_CONTROLS = 1 << 1;
-
- /**
- * System only flag for a session that needs to have priority over all other
- * sessions. This flag ensures this session will receive media button events
- * regardless of the current ordering in the system.
- *
- * @hide
- */
- public static final int FLAG_EXCLUSIVE_GLOBAL_PRIORITY = 1 << 16;
-
- /**
- * @hide
- */
- public static final int INVALID_UID = -1;
-
- /**
- * @hide
- */
- public static final int INVALID_PID = -1;
-
- /** @hide */
- @Retention(RetentionPolicy.SOURCE)
- @IntDef(flag = true, value = {
- FLAG_HANDLES_MEDIA_BUTTONS,
- FLAG_HANDLES_TRANSPORT_CONTROLS,
- FLAG_EXCLUSIVE_GLOBAL_PRIORITY })
- public @interface SessionFlags { }
-
- private final Object mLock = new Object();
- private final int mMaxBitmapSize;
-
- private final MediaSession.Token mSessionToken;
- private final MediaController mController;
- private final ISession mBinder;
- private final CallbackStub mCbStub;
-
- // Do not change the name of mCallback. Support lib accesses this by using reflection.
- @UnsupportedAppUsage
- private CallbackMessageHandler mCallback;
- private VolumeProvider mVolumeProvider;
- private PlaybackState mPlaybackState;
-
- private boolean mActive = false;
-
- /**
- * Creates a new session. The session will automatically be registered with
- * the system but will not be published until {@link #setActive(boolean)
- * setActive(true)} is called. You must call {@link #release()} when
- * finished with the session.
- *
- * @param context The context to use to create the session.
- * @param tag A short name for debugging purposes.
- */
- public MediaSession(@NonNull Context context, @NonNull String tag) {
- this(context, tag, UserHandle.myUserId());
- }
-
- /**
- * Creates a new session as the specified user. To create a session as a
- * user other than your own you must hold the
- * {@link android.Manifest.permission#INTERACT_ACROSS_USERS_FULL}
- * permission.
- *
- * @param context The context to use to create the session.
- * @param tag A short name for debugging purposes.
- * @param userId The user id to create the session as.
- * @hide
- */
- public MediaSession(@NonNull Context context, @NonNull String tag, int userId) {
- if (context == null) {
- throw new IllegalArgumentException("context cannot be null.");
- }
- if (TextUtils.isEmpty(tag)) {
- throw new IllegalArgumentException("tag cannot be null or empty");
- }
- mMaxBitmapSize = context.getResources().getDimensionPixelSize(
- android.R.dimen.config_mediaMetadataBitmapMaxSize);
- mCbStub = new CallbackStub(this);
- MediaSessionManager manager = (MediaSessionManager) context
- .getSystemService(Context.MEDIA_SESSION_SERVICE);
- try {
- //TODO(b/119749862): Resolve hidden API usage. MediaSessioManager#createSession
- //mBinder = manager.createSession(mCbStub, tag, userId);
- mBinder = null; //TODO: remove this.
- mSessionToken = new Token(mBinder.getController());
- mController = new MediaController(context, mSessionToken);
- } catch (RemoteException e) {
- throw new RuntimeException("Remote error creating session.", e);
- }
- }
-
- /**
- * Set the callback to receive updates for the MediaSession. This includes
- * media button events and transport controls. The caller's thread will be
- * used to post updates.
- * <p>
- * Set the callback to null to stop receiving updates.
- *
- * @param callback The callback object
- */
- public void setCallback(@Nullable Callback callback) {
- setCallback(callback, null);
- }
-
- /**
- * Set the callback to receive updates for the MediaSession. This includes
- * media button events and transport controls.
- * <p>
- * Set the callback to null to stop receiving updates.
- *
- * @param callback The callback to receive updates on.
- * @param handler The handler that events should be posted on.
- */
- public void setCallback(@Nullable Callback callback, @Nullable Handler handler) {
- synchronized (mLock) {
- if (mCallback != null) {
- // We're updating the callback, clear the session from the old one.
- mCallback.mCallback.mSession = null;
- mCallback.removeCallbacksAndMessages(null);
- }
- if (callback == null) {
- mCallback = null;
- return;
- }
- if (handler == null) {
- handler = new Handler();
- }
- callback.mSession = this;
- CallbackMessageHandler msgHandler = new CallbackMessageHandler(handler.getLooper(),
- callback);
- mCallback = msgHandler;
- }
- }
-
- /**
- * Set an intent for launching UI for this Session. This can be used as a
- * quick link to an ongoing media screen. The intent should be for an
- * activity that may be started using {@link Activity#startActivity(Intent)}.
- *
- * @param pi The intent to launch to show UI for this Session.
- */
- public void setSessionActivity(@Nullable PendingIntent pi) {
- try {
- mBinder.setLaunchPendingIntent(pi);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setLaunchPendingIntent.", e);
- }
- }
-
- /**
- * Set a pending intent for your media button receiver to allow restarting
- * playback after the session has been stopped. If your app is started in
- * this way an {@link Intent#ACTION_MEDIA_BUTTON} intent will be sent via
- * the pending intent.
- *
- * @param mbr The {@link PendingIntent} to send the media button event to.
- */
- public void setMediaButtonReceiver(@Nullable PendingIntent mbr) {
- try {
- mBinder.setMediaButtonReceiver(mbr);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setMediaButtonReceiver.", e);
- }
- }
-
- /**
- * Set any flags for the session.
- *
- * @param flags The flags to set for this session.
- */
- public void setFlags(@SessionFlags int flags) {
- try {
- mBinder.setFlags(flags);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setFlags.", e);
- }
- }
-
- /**
- * Set the attributes for this session's audio. This will affect the
- * system's volume handling for this session. If
- * {@link #setPlaybackToRemote} was previously called it will stop receiving
- * volume commands and the system will begin sending volume changes to the
- * appropriate stream.
- * <p>
- * By default sessions use attributes for media.
- *
- * @param attributes The {@link AudioAttributes} for this session's audio.
- */
- public void setPlaybackToLocal(AudioAttributes attributes) {
- if (attributes == null) {
- throw new IllegalArgumentException("Attributes cannot be null for local playback.");
- }
- //TODO(b/119751592): Decide if AudioAttributes should be updated.
- /*
- try {
- mBinder.setPlaybackToLocal(attributes);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setPlaybackToLocal.", e);
- }
- */
- }
-
- /**
- * Configure this session to use remote volume handling. This must be called
- * to receive volume button events, otherwise the system will adjust the
- * appropriate stream volume for this session. If
- * {@link #setPlaybackToLocal} was previously called the system will stop
- * handling volume changes for this session and pass them to the volume
- * provider instead.
- *
- * @param volumeProvider The provider that will handle volume changes. May
- * not be null.
- */
- public void setPlaybackToRemote(@NonNull VolumeProvider volumeProvider) {
- if (volumeProvider == null) {
- throw new IllegalArgumentException("volumeProvider may not be null!");
- }
- synchronized (mLock) {
- mVolumeProvider = volumeProvider;
- }
- volumeProvider.setCallback(new VolumeProvider.Callback() {
- @Override
- public void onVolumeChanged(VolumeProvider volumeProvider) {
- notifyRemoteVolumeChanged(volumeProvider);
- }
- });
-
- try {
- mBinder.setPlaybackToRemote(volumeProvider.getVolumeControl(),
- volumeProvider.getMaxVolume());
- mBinder.setCurrentVolume(volumeProvider.getCurrentVolume());
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setPlaybackToRemote.", e);
- }
- }
-
- /**
- * Set if this session is currently active and ready to receive commands. If
- * set to false your session's controller may not be discoverable. You must
- * set the session to active before it can start receiving media button
- * events or transport commands.
- *
- * @param active Whether this session is active or not.
- */
- public void setActive(boolean active) {
- if (mActive == active) {
- return;
- }
- try {
- mBinder.setActive(active);
- mActive = active;
- } catch (RemoteException e) {
- Log.wtf(TAG, "Failure in setActive.", e);
- }
- }
-
- /**
- * Get the current active state of this session.
- *
- * @return True if the session is active, false otherwise.
- */
- public boolean isActive() {
- return mActive;
- }
-
- /**
- * Send a proprietary event to all MediaControllers listening to this
- * Session. It's up to the Controller/Session owner to determine the meaning
- * of any events.
- *
- * @param event The name of the event to send
- * @param extras Any extras included with the event
- */
- public void sendSessionEvent(@NonNull String event, @Nullable Bundle extras) {
- if (TextUtils.isEmpty(event)) {
- throw new IllegalArgumentException("event cannot be null or empty");
- }
- try {
- mBinder.sendEvent(event, extras);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error sending event", e);
- }
- }
-
- /**
- * This must be called when an app has finished performing playback. If
- * playback is expected to start again shortly the session can be left open,
- * but it must be released if your activity or service is being destroyed.
- */
- public void release() {
- try {
- mBinder.destroy();
- } catch (RemoteException e) {
- Log.wtf(TAG, "Error releasing session: ", e);
- }
- }
-
- /**
- * Retrieve a token object that can be used by apps to create a
- * {@link MediaController} for interacting with this session. The owner of
- * the session is responsible for deciding how to distribute these tokens.
- *
- * @return A token that can be used to create a MediaController for this
- * session
- */
- public @NonNull Token getSessionToken() {
- return mSessionToken;
- }
-
- /**
- * Get a controller for this session. This is a convenience method to avoid
- * having to cache your own controller in process.
- *
- * @return A controller for this session.
- */
- public @NonNull MediaController getController() {
- return mController;
- }
-
- /**
- * Update the current playback state.
- *
- * @param state The current state of playback
- */
- public void setPlaybackState(@Nullable PlaybackState state) {
- mPlaybackState = state;
- try {
- mBinder.setPlaybackState(state);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Dead object in setPlaybackState.", e);
- }
- }
-
- /**
- * Update the current metadata. New metadata can be created using
- * {@link android.media.MediaMetadata.Builder}. This operation may take time proportional to
- * the size of the bitmap to replace large bitmaps with a scaled down copy.
- *
- * @param metadata The new metadata
- * @see android.media.MediaMetadata.Builder#putBitmap
- */
- public void setMetadata(@Nullable MediaMetadata metadata) {
- long duration = -1;
- int fields = 0;
- MediaDescription description = null;
- if (metadata != null) {
- metadata = (new MediaMetadata.Builder(metadata, mMaxBitmapSize)).build();
- if (metadata.containsKey(MediaMetadata.METADATA_KEY_DURATION)) {
- duration = metadata.getLong(MediaMetadata.METADATA_KEY_DURATION);
- }
- fields = metadata.size();
- description = metadata.getDescription();
- }
- String metadataDescription = "size=" + fields + ", description=" + description;
-
- try {
- mBinder.setMetadata(metadata, duration, metadataDescription);
- } catch (RemoteException e) {
- Log.wtf(TAG, "Dead object in setPlaybackState.", e);
- }
- }
-
- /**
- * Update the list of items in the play queue. It is an ordered list and
- * should contain the current item, and previous or upcoming items if they
- * exist. Specify null if there is no current play queue.
- * <p>
- * The queue should be of reasonable size. If the play queue is unbounded
- * within your app, it is better to send a reasonable amount in a sliding
- * window instead.
- *
- * @param queue A list of items in the play queue.
- */
- public void setQueue(@Nullable List<QueueItem> queue) {
- try {
- mBinder.setQueue(queue == null ? null : new MediaParceledListSlice<QueueItem>(queue));
- } catch (RemoteException e) {
- Log.wtf("Dead object in setQueue.", e);
- }
- }
-
- /**
- * Set the title of the play queue. The UI should display this title along
- * with the play queue itself.
- * e.g. "Play Queue", "Now Playing", or an album name.
- *
- * @param title The title of the play queue.
- */
- public void setQueueTitle(@Nullable CharSequence title) {
- try {
- mBinder.setQueueTitle(title);
- } catch (RemoteException e) {
- Log.wtf("Dead object in setQueueTitle.", e);
- }
- }
-
- /**
- * Set the style of rating used by this session. Apps trying to set the
- * rating should use this style. Must be one of the following:
- * <ul>
- * <li>{@link Rating#RATING_NONE}</li>
- * <li>{@link Rating#RATING_3_STARS}</li>
- * <li>{@link Rating#RATING_4_STARS}</li>
- * <li>{@link Rating#RATING_5_STARS}</li>
- * <li>{@link Rating#RATING_HEART}</li>
- * <li>{@link Rating#RATING_PERCENTAGE}</li>
- * <li>{@link Rating#RATING_THUMB_UP_DOWN}</li>
- * </ul>
- */
- public void setRatingType(@Rating.Style int type) {
- try {
- mBinder.setRatingType(type);
- } catch (RemoteException e) {
- Log.e(TAG, "Error in setRatingType.", e);
- }
- }
-
- /**
- * Set some extras that can be associated with the {@link MediaSession}. No assumptions should
- * be made as to how a {@link MediaController} will handle these extras.
- * Keys should be fully qualified (e.g. com.example.MY_EXTRA) to avoid conflicts.
- *
- * @param extras The extras associated with the {@link MediaSession}.
- */
- public void setExtras(@Nullable Bundle extras) {
- try {
- mBinder.setExtras(extras);
- } catch (RemoteException e) {
- Log.wtf("Dead object in setExtras.", e);
- }
- }
-
- /**
- * Gets the controller information who sent the current request.
- * <p>
- * Note: This is only valid while in a request callback, such as {@link Callback#onPlay}.
- *
- * @throws IllegalStateException If this method is called outside of {@link Callback} methods.
- * @see MediaSessionManager#isTrustedForMediaControl(RemoteUserInfo)
- */
- public final @NonNull RemoteUserInfo getCurrentControllerInfo() {
- if (mCallback == null || mCallback.mCurrentControllerInfo == null) {
- throw new IllegalStateException(
- "This should be called inside of MediaSession.Callback methods");
- }
- return mCallback.mCurrentControllerInfo;
- }
-
- /**
- * Notify the system that the remote volume changed.
- *
- * @param provider The provider that is handling volume changes.
- * @hide
- */
- public void notifyRemoteVolumeChanged(VolumeProvider provider) {
- synchronized (mLock) {
- if (provider == null || provider != mVolumeProvider) {
- Log.w(TAG, "Received update from stale volume provider");
- return;
- }
- }
- try {
- mBinder.setCurrentVolume(provider.getCurrentVolume());
- } catch (RemoteException e) {
- Log.e(TAG, "Error in notifyVolumeChanged", e);
- }
- }
-
- /**
- * Returns the name of the package that sent the last media button, transport control, or
- * command from controllers and the system. This is only valid while in a request callback, such
- * as {@link Callback#onPlay}.
- *
- * @hide
- */
- @UnsupportedAppUsage
- public String getCallingPackage() {
- if (mCallback != null && mCallback.mCurrentControllerInfo != null) {
- return mCallback.mCurrentControllerInfo.getPackageName();
- }
- return null;
- }
-
- private void dispatchPrepare(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_PREPARE, null, null);
- }
-
- private void dispatchPrepareFromMediaId(RemoteUserInfo caller, String mediaId, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PREPARE_MEDIA_ID, mediaId, extras);
- }
-
- private void dispatchPrepareFromSearch(RemoteUserInfo caller, String query, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PREPARE_SEARCH, query, extras);
- }
-
- private void dispatchPrepareFromUri(RemoteUserInfo caller, Uri uri, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PREPARE_URI, uri, extras);
- }
-
- private void dispatchPlay(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_PLAY, null, null);
- }
-
- private void dispatchPlayFromMediaId(RemoteUserInfo caller, String mediaId, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PLAY_MEDIA_ID, mediaId, extras);
- }
-
- private void dispatchPlayFromSearch(RemoteUserInfo caller, String query, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PLAY_SEARCH, query, extras);
- }
-
- private void dispatchPlayFromUri(RemoteUserInfo caller, Uri uri, Bundle extras) {
- postToCallback(caller, CallbackMessageHandler.MSG_PLAY_URI, uri, extras);
- }
-
- private void dispatchSkipToItem(RemoteUserInfo caller, long id) {
- postToCallback(caller, CallbackMessageHandler.MSG_SKIP_TO_ITEM, id, null);
- }
-
- private void dispatchPause(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_PAUSE, null, null);
- }
-
- private void dispatchStop(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_STOP, null, null);
- }
-
- private void dispatchNext(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_NEXT, null, null);
- }
-
- private void dispatchPrevious(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_PREVIOUS, null, null);
- }
-
- private void dispatchFastForward(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_FAST_FORWARD, null, null);
- }
-
- private void dispatchRewind(RemoteUserInfo caller) {
- postToCallback(caller, CallbackMessageHandler.MSG_REWIND, null, null);
- }
-
- private void dispatchSeekTo(RemoteUserInfo caller, long pos) {
- postToCallback(caller, CallbackMessageHandler.MSG_SEEK_TO, pos, null);
- }
-
- private void dispatchRate(RemoteUserInfo caller, Rating rating) {
- postToCallback(caller, CallbackMessageHandler.MSG_RATE, rating, null);
- }
-
- private void dispatchCustomAction(RemoteUserInfo caller, String action, Bundle args) {
- postToCallback(caller, CallbackMessageHandler.MSG_CUSTOM_ACTION, action, args);
- }
-
- private void dispatchMediaButton(RemoteUserInfo caller, Intent mediaButtonIntent) {
- postToCallback(caller, CallbackMessageHandler.MSG_MEDIA_BUTTON, mediaButtonIntent, null);
- }
-
- private void dispatchMediaButtonDelayed(RemoteUserInfo info, Intent mediaButtonIntent,
- long delay) {
- postToCallbackDelayed(info, CallbackMessageHandler.MSG_PLAY_PAUSE_KEY_DOUBLE_TAP_TIMEOUT,
- mediaButtonIntent, null, delay);
- }
-
- private void dispatchAdjustVolume(RemoteUserInfo caller, int direction) {
- postToCallback(caller, CallbackMessageHandler.MSG_ADJUST_VOLUME, direction, null);
- }
-
- private void dispatchSetVolumeTo(RemoteUserInfo caller, int volume) {
- postToCallback(caller, CallbackMessageHandler.MSG_SET_VOLUME, volume, null);
- }
-
- private void dispatchCommand(RemoteUserInfo caller, String command, Bundle args,
- ResultReceiver resultCb) {
- Command cmd = new Command(command, args, resultCb);
- postToCallback(caller, CallbackMessageHandler.MSG_COMMAND, cmd, null);
- }
-
- private void postToCallback(RemoteUserInfo caller, int what, Object obj, Bundle data) {
- postToCallbackDelayed(caller, what, obj, data, 0);
- }
-
- private void postToCallbackDelayed(RemoteUserInfo caller, int what, Object obj, Bundle data,
- long delay) {
- synchronized (mLock) {
- if (mCallback != null) {
- mCallback.post(caller, what, obj, data, delay);
- }
- }
- }
-
- /**
- * Return true if this is considered an active playback state.
- *
- * @hide
- */
- public static boolean isActiveState(int state) {
- switch (state) {
- case PlaybackState.STATE_FAST_FORWARDING:
- case PlaybackState.STATE_REWINDING:
- case PlaybackState.STATE_SKIPPING_TO_PREVIOUS:
- case PlaybackState.STATE_SKIPPING_TO_NEXT:
- case PlaybackState.STATE_BUFFERING:
- case PlaybackState.STATE_CONNECTING:
- case PlaybackState.STATE_PLAYING:
- return true;
- }
- return false;
- }
-
- /**
- * Represents an ongoing session. This may be passed to apps by the session
- * owner to allow them to create a {@link MediaController} to communicate with
- * the session.
- */
- public static final class Token implements Parcelable {
-
- private ISessionController mBinder;
-
- /**
- * @hide
- */
- public Token(ISessionController binder) {
- mBinder = binder;
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeStrongBinder(mBinder.asBinder());
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((mBinder == null) ? 0 : mBinder.asBinder().hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj)
- return true;
- if (obj == null)
- return false;
- if (getClass() != obj.getClass())
- return false;
- Token other = (Token) obj;
- if (mBinder == null) {
- if (other.mBinder != null)
- return false;
- } else if (!mBinder.asBinder().equals(other.mBinder.asBinder()))
- return false;
- return true;
- }
-
- ISessionController getBinder() {
- return mBinder;
- }
-
- public static final Parcelable.Creator<Token> CREATOR
- = new Parcelable.Creator<Token>() {
- @Override
- public Token createFromParcel(Parcel in) {
- return new Token(ISessionController.Stub.asInterface(in.readStrongBinder()));
- }
-
- @Override
- public Token[] newArray(int size) {
- return new Token[size];
- }
- };
- }
-
- /**
- * Receives media buttons, transport controls, and commands from controllers
- * and the system. A callback may be set using {@link #setCallback}.
- */
- public abstract static class Callback {
-
- private MediaSession mSession;
- private CallbackMessageHandler mHandler;
- private boolean mMediaPlayPauseKeyPending;
-
- public Callback() {
- }
-
- /**
- * Called when a controller has sent a command to this session.
- * The owner of the session may handle custom commands but is not
- * required to.
- *
- * @param command The command name.
- * @param args Optional parameters for the command, may be null.
- * @param cb A result receiver to which a result may be sent by the command, may be null.
- */
- public void onCommand(@NonNull String command, @Nullable Bundle args,
- @Nullable ResultReceiver cb) {
- }
-
- /**
- * Called when a media button is pressed and this session has the
- * highest priority or a controller sends a media button event to the
- * session. The default behavior will call the relevant method if the
- * action for it was set.
- * <p>
- * The intent will be of type {@link Intent#ACTION_MEDIA_BUTTON} with a
- * KeyEvent in {@link Intent#EXTRA_KEY_EVENT}
- *
- * @param mediaButtonIntent an intent containing the KeyEvent as an
- * extra
- * @return True if the event was handled, false otherwise.
- */
- public boolean onMediaButtonEvent(@NonNull Intent mediaButtonIntent) {
- if (mSession != null && mHandler != null
- && Intent.ACTION_MEDIA_BUTTON.equals(mediaButtonIntent.getAction())) {
- KeyEvent ke = mediaButtonIntent.getParcelableExtra(Intent.EXTRA_KEY_EVENT);
- if (ke != null && ke.getAction() == KeyEvent.ACTION_DOWN) {
- PlaybackState state = mSession.mPlaybackState;
- long validActions = state == null ? 0 : state.getActions();
- switch (ke.getKeyCode()) {
- case KeyEvent.KEYCODE_MEDIA_PLAY_PAUSE:
- case KeyEvent.KEYCODE_HEADSETHOOK:
- if (ke.getRepeatCount() > 0) {
- // Consider long-press as a single tap.
- handleMediaPlayPauseKeySingleTapIfPending();
- } else if (mMediaPlayPauseKeyPending) {
- // Consider double tap as the next.
- mHandler.removeMessages(CallbackMessageHandler
- .MSG_PLAY_PAUSE_KEY_DOUBLE_TAP_TIMEOUT);
- mMediaPlayPauseKeyPending = false;
- if ((validActions & PlaybackState.ACTION_SKIP_TO_NEXT) != 0) {
- onSkipToNext();
- }
- } else {
- mMediaPlayPauseKeyPending = true;
- mSession.dispatchMediaButtonDelayed(
- mSession.getCurrentControllerInfo(),
- mediaButtonIntent, ViewConfiguration.getDoubleTapTimeout());
- }
- return true;
- default:
- // If another key is pressed within double tap timeout, consider the
- // pending play/pause as a single tap to handle media keys in order.
- handleMediaPlayPauseKeySingleTapIfPending();
- break;
- }
-
- switch (ke.getKeyCode()) {
- case KeyEvent.KEYCODE_MEDIA_PLAY:
- if ((validActions & PlaybackState.ACTION_PLAY) != 0) {
- onPlay();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_PAUSE:
- if ((validActions & PlaybackState.ACTION_PAUSE) != 0) {
- onPause();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_NEXT:
- if ((validActions & PlaybackState.ACTION_SKIP_TO_NEXT) != 0) {
- onSkipToNext();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_PREVIOUS:
- if ((validActions & PlaybackState.ACTION_SKIP_TO_PREVIOUS) != 0) {
- onSkipToPrevious();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_STOP:
- if ((validActions & PlaybackState.ACTION_STOP) != 0) {
- onStop();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_FAST_FORWARD:
- if ((validActions & PlaybackState.ACTION_FAST_FORWARD) != 0) {
- onFastForward();
- return true;
- }
- break;
- case KeyEvent.KEYCODE_MEDIA_REWIND:
- if ((validActions & PlaybackState.ACTION_REWIND) != 0) {
- onRewind();
- return true;
- }
- break;
- }
- }
- }
- return false;
- }
-
- private void handleMediaPlayPauseKeySingleTapIfPending() {
- if (!mMediaPlayPauseKeyPending) {
- return;
- }
- mMediaPlayPauseKeyPending = false;
- mHandler.removeMessages(CallbackMessageHandler.MSG_PLAY_PAUSE_KEY_DOUBLE_TAP_TIMEOUT);
- PlaybackState state = mSession.mPlaybackState;
- long validActions = state == null ? 0 : state.getActions();
- boolean isPlaying = state != null
- && state.getState() == PlaybackState.STATE_PLAYING;
- boolean canPlay = (validActions & (PlaybackState.ACTION_PLAY_PAUSE
- | PlaybackState.ACTION_PLAY)) != 0;
- boolean canPause = (validActions & (PlaybackState.ACTION_PLAY_PAUSE
- | PlaybackState.ACTION_PAUSE)) != 0;
- if (isPlaying && canPause) {
- onPause();
- } else if (!isPlaying && canPlay) {
- onPlay();
- }
- }
-
- /**
- * Override to handle requests to prepare playback. During the preparation, a session should
- * not hold audio focus in order to allow other sessions play seamlessly. The state of
- * playback should be updated to {@link PlaybackState#STATE_PAUSED} after the preparation is
- * done.
- */
- public void onPrepare() {
- }
-
- /**
- * Override to handle requests to prepare for playing a specific mediaId that was provided
- * by your app's {@link MediaBrowserService}. During the preparation, a session should not
- * hold audio focus in order to allow other sessions play seamlessly. The state of playback
- * should be updated to {@link PlaybackState#STATE_PAUSED} after the preparation is done.
- * The playback of the prepared content should start in the implementation of
- * {@link #onPlay}. Override {@link #onPlayFromMediaId} to handle requests for starting
- * playback without preparation.
- */
- public void onPrepareFromMediaId(String mediaId, Bundle extras) {
- }
-
- /**
- * Override to handle requests to prepare playback from a search query. An empty query
- * indicates that the app may prepare any music. The implementation should attempt to make a
- * smart choice about what to play. During the preparation, a session should not hold audio
- * focus in order to allow other sessions play seamlessly. The state of playback should be
- * updated to {@link PlaybackState#STATE_PAUSED} after the preparation is done. The playback
- * of the prepared content should start in the implementation of {@link #onPlay}. Override
- * {@link #onPlayFromSearch} to handle requests for starting playback without preparation.
- */
- public void onPrepareFromSearch(String query, Bundle extras) {
- }
-
- /**
- * Override to handle requests to prepare a specific media item represented by a URI.
- * During the preparation, a session should not hold audio focus in order to allow
- * other sessions play seamlessly. The state of playback should be updated to
- * {@link PlaybackState#STATE_PAUSED} after the preparation is done.
- * The playback of the prepared content should start in the implementation of
- * {@link #onPlay}. Override {@link #onPlayFromUri} to handle requests
- * for starting playback without preparation.
- */
- public void onPrepareFromUri(Uri uri, Bundle extras) {
- }
-
- /**
- * Override to handle requests to begin playback.
- */
- public void onPlay() {
- }
-
- /**
- * Override to handle requests to begin playback from a search query. An
- * empty query indicates that the app may play any music. The
- * implementation should attempt to make a smart choice about what to
- * play.
- */
- public void onPlayFromSearch(String query, Bundle extras) {
- }
-
- /**
- * Override to handle requests to play a specific mediaId that was
- * provided by your app's {@link MediaBrowserService}.
- */
- public void onPlayFromMediaId(String mediaId, Bundle extras) {
- }
-
- /**
- * Override to handle requests to play a specific media item represented by a URI.
- */
- public void onPlayFromUri(Uri uri, Bundle extras) {
- }
-
- /**
- * Override to handle requests to play an item with a given id from the
- * play queue.
- */
- public void onSkipToQueueItem(long id) {
- }
-
- /**
- * Override to handle requests to pause playback.
- */
- public void onPause() {
- }
-
- /**
- * Override to handle requests to skip to the next media item.
- */
- public void onSkipToNext() {
- }
-
- /**
- * Override to handle requests to skip to the previous media item.
- */
- public void onSkipToPrevious() {
- }
-
- /**
- * Override to handle requests to fast forward.
- */
- public void onFastForward() {
- }
-
- /**
- * Override to handle requests to rewind.
- */
- public void onRewind() {
- }
-
- /**
- * Override to handle requests to stop playback.
- */
- public void onStop() {
- }
-
- /**
- * Override to handle requests to seek to a specific position in ms.
- *
- * @param pos New position to move to, in milliseconds.
- */
- public void onSeekTo(long pos) {
- }
-
- /**
- * Override to handle the item being rated.
- *
- * @param rating
- */
- public void onSetRating(@NonNull Rating rating) {
- }
-
- /**
- * Called when a {@link MediaController} wants a {@link PlaybackState.CustomAction} to be
- * performed.
- *
- * @param action The action that was originally sent in the
- * {@link PlaybackState.CustomAction}.
- * @param extras Optional extras specified by the {@link MediaController}.
- */
- public void onCustomAction(@NonNull String action, @Nullable Bundle extras) {
- }
- }
-
- /**
- * @hide
- */
- public static class CallbackStub extends ISessionCallback.Stub {
- private WeakReference<MediaSession> mMediaSession;
-
- public CallbackStub(MediaSession session) {
- mMediaSession = new WeakReference<>(session);
- }
-
- private static RemoteUserInfo createRemoteUserInfo(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- return new RemoteUserInfo(packageName, pid, uid,
- caller != null ? caller.asBinder() : null);
- }
-
- @Override
- public void onCommand(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String command, Bundle args, ResultReceiver cb) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchCommand(createRemoteUserInfo(packageName, pid, uid, caller),
- command, args, cb);
- }
- }
-
- @Override
- public void onMediaButton(String packageName, int pid, int uid, Intent mediaButtonIntent,
- int sequenceNumber, ResultReceiver cb) {
- MediaSession session = mMediaSession.get();
- try {
- if (session != null) {
- session.dispatchMediaButton(createRemoteUserInfo(packageName, pid, uid, null),
- mediaButtonIntent);
- }
- } finally {
- if (cb != null) {
- cb.send(sequenceNumber, null);
- }
- }
- }
-
- @Override
- public void onMediaButtonFromController(String packageName, int pid, int uid,
- ISessionControllerCallback caller, Intent mediaButtonIntent) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchMediaButton(createRemoteUserInfo(packageName, pid, uid, caller),
- mediaButtonIntent);
- }
- }
-
- @Override
- public void onPrepare(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPrepare(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onPrepareFromMediaId(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String mediaId,
- Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPrepareFromMediaId(
- createRemoteUserInfo(packageName, pid, uid, caller), mediaId, extras);
- }
- }
-
- @Override
- public void onPrepareFromSearch(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String query,
- Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPrepareFromSearch(
- createRemoteUserInfo(packageName, pid, uid, caller), query, extras);
- }
- }
-
- @Override
- public void onPrepareFromUri(String packageName, int pid, int uid,
- ISessionControllerCallback caller, Uri uri, Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPrepareFromUri(createRemoteUserInfo(packageName, pid, uid, caller),
- uri, extras);
- }
- }
-
- @Override
- public void onPlay(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPlay(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onPlayFromMediaId(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String mediaId,
- Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPlayFromMediaId(createRemoteUserInfo(packageName, pid, uid, caller),
- mediaId, extras);
- }
- }
-
- @Override
- public void onPlayFromSearch(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String query,
- Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPlayFromSearch(createRemoteUserInfo(packageName, pid, uid, caller),
- query, extras);
- }
- }
-
- @Override
- public void onPlayFromUri(String packageName, int pid, int uid,
- ISessionControllerCallback caller, Uri uri, Bundle extras) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPlayFromUri(createRemoteUserInfo(packageName, pid, uid, caller),
- uri, extras);
- }
- }
-
- @Override
- public void onSkipToTrack(String packageName, int pid, int uid,
- ISessionControllerCallback caller, long id) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchSkipToItem(createRemoteUserInfo(packageName, pid, uid, caller), id);
- }
- }
-
- @Override
- public void onPause(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPause(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onStop(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchStop(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onNext(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchNext(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onPrevious(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchPrevious(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onFastForward(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchFastForward(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onRewind(String packageName, int pid, int uid,
- ISessionControllerCallback caller) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchRewind(createRemoteUserInfo(packageName, pid, uid, caller));
- }
- }
-
- @Override
- public void onSeekTo(String packageName, int pid, int uid,
- ISessionControllerCallback caller, long pos) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchSeekTo(createRemoteUserInfo(packageName, pid, uid, caller), pos);
- }
- }
-
- @Override
- public void onRate(String packageName, int pid, int uid, ISessionControllerCallback caller,
- Rating rating) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchRate(createRemoteUserInfo(packageName, pid, uid, caller), rating);
- }
- }
-
- @Override
- public void onCustomAction(String packageName, int pid, int uid,
- ISessionControllerCallback caller, String action, Bundle args) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchCustomAction(createRemoteUserInfo(packageName, pid, uid, caller),
- action, args);
- }
- }
-
- @Override
- public void onAdjustVolume(String packageName, int pid, int uid,
- ISessionControllerCallback caller, int direction) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchAdjustVolume(createRemoteUserInfo(packageName, pid, uid, caller),
- direction);
- }
- }
-
- @Override
- public void onSetVolumeTo(String packageName, int pid, int uid,
- ISessionControllerCallback caller, int value) {
- MediaSession session = mMediaSession.get();
- if (session != null) {
- session.dispatchSetVolumeTo(createRemoteUserInfo(packageName, pid, uid, caller),
- value);
- }
- }
- }
-
- /**
- * A single item that is part of the play queue. It contains a description
- * of the item and its id in the queue.
- */
- public static final class QueueItem implements Parcelable {
- /**
- * This id is reserved. No items can be explicitly assigned this id.
- */
- public static final int UNKNOWN_ID = -1;
-
- private final MediaDescription mDescription;
- @UnsupportedAppUsage
- private final long mId;
-
- /**
- * Create a new {@link MediaSession.QueueItem}.
- *
- * @param description The {@link MediaDescription} for this item.
- * @param id An identifier for this item. It must be unique within the
- * play queue and cannot be {@link #UNKNOWN_ID}.
- */
- public QueueItem(MediaDescription description, long id) {
- if (description == null) {
- throw new IllegalArgumentException("Description cannot be null.");
- }
- if (id == UNKNOWN_ID) {
- throw new IllegalArgumentException("Id cannot be QueueItem.UNKNOWN_ID");
- }
- mDescription = description;
- mId = id;
- }
-
- private QueueItem(Parcel in) {
- mDescription = MediaDescription.CREATOR.createFromParcel(in);
- mId = in.readLong();
- }
-
- /**
- * Get the description for this item.
- */
- public MediaDescription getDescription() {
- return mDescription;
- }
-
- /**
- * Get the queue id for this item.
- */
- public long getQueueId() {
- return mId;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- mDescription.writeToParcel(dest, flags);
- dest.writeLong(mId);
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- public static final Creator<MediaSession.QueueItem> CREATOR =
- new Creator<MediaSession.QueueItem>() {
-
- @Override
- public MediaSession.QueueItem createFromParcel(Parcel p) {
- return new MediaSession.QueueItem(p);
- }
-
- @Override
- public MediaSession.QueueItem[] newArray(int size) {
- return new MediaSession.QueueItem[size];
- }
- };
-
- @Override
- public String toString() {
- return "MediaSession.QueueItem {" +
- "Description=" + mDescription +
- ", Id=" + mId + " }";
- }
-
- @Override
- public boolean equals(Object o) {
- if (o == null) {
- return false;
- }
-
- if (!(o instanceof QueueItem)) {
- return false;
- }
-
- final QueueItem item = (QueueItem) o;
- if (mId != item.mId) {
- return false;
- }
-
- if (!Objects.equals(mDescription, item.mDescription)) {
- return false;
- }
-
- return true;
- }
- }
-
- private static final class Command {
- public final String command;
- public final Bundle extras;
- public final ResultReceiver stub;
-
- public Command(String command, Bundle extras, ResultReceiver stub) {
- this.command = command;
- this.extras = extras;
- this.stub = stub;
- }
- }
-
- private class CallbackMessageHandler extends Handler {
- private static final int MSG_COMMAND = 1;
- private static final int MSG_MEDIA_BUTTON = 2;
- private static final int MSG_PREPARE = 3;
- private static final int MSG_PREPARE_MEDIA_ID = 4;
- private static final int MSG_PREPARE_SEARCH = 5;
- private static final int MSG_PREPARE_URI = 6;
- private static final int MSG_PLAY = 7;
- private static final int MSG_PLAY_MEDIA_ID = 8;
- private static final int MSG_PLAY_SEARCH = 9;
- private static final int MSG_PLAY_URI = 10;
- private static final int MSG_SKIP_TO_ITEM = 11;
- private static final int MSG_PAUSE = 12;
- private static final int MSG_STOP = 13;
- private static final int MSG_NEXT = 14;
- private static final int MSG_PREVIOUS = 15;
- private static final int MSG_FAST_FORWARD = 16;
- private static final int MSG_REWIND = 17;
- private static final int MSG_SEEK_TO = 18;
- private static final int MSG_RATE = 19;
- private static final int MSG_CUSTOM_ACTION = 20;
- private static final int MSG_ADJUST_VOLUME = 21;
- private static final int MSG_SET_VOLUME = 22;
- private static final int MSG_PLAY_PAUSE_KEY_DOUBLE_TAP_TIMEOUT = 23;
-
- private MediaSession.Callback mCallback;
- private RemoteUserInfo mCurrentControllerInfo;
-
- public CallbackMessageHandler(Looper looper, MediaSession.Callback callback) {
- super(looper);
- mCallback = callback;
- mCallback.mHandler = this;
- }
-
- public void post(RemoteUserInfo caller, int what, Object obj, Bundle data, long delayMs) {
- Pair<RemoteUserInfo, Object> objWithCaller = Pair.create(caller, obj);
- Message msg = obtainMessage(what, objWithCaller);
- msg.setAsynchronous(true);
- msg.setData(data);
- if (delayMs > 0) {
- sendMessageDelayed(msg, delayMs);
- } else {
- sendMessage(msg);
- }
- }
-
- @Override
- public void handleMessage(Message msg) {
- mCurrentControllerInfo = ((Pair<RemoteUserInfo, Object>) msg.obj).first;
-
- VolumeProvider vp;
- Object obj = ((Pair<RemoteUserInfo, Object>) msg.obj).second;
-
- switch (msg.what) {
- case MSG_COMMAND:
- Command cmd = (Command) obj;
- mCallback.onCommand(cmd.command, cmd.extras, cmd.stub);
- break;
- case MSG_MEDIA_BUTTON:
- mCallback.onMediaButtonEvent((Intent) obj);
- break;
- case MSG_PREPARE:
- mCallback.onPrepare();
- break;
- case MSG_PREPARE_MEDIA_ID:
- mCallback.onPrepareFromMediaId((String) obj, msg.getData());
- break;
- case MSG_PREPARE_SEARCH:
- mCallback.onPrepareFromSearch((String) obj, msg.getData());
- break;
- case MSG_PREPARE_URI:
- mCallback.onPrepareFromUri((Uri) obj, msg.getData());
- break;
- case MSG_PLAY:
- mCallback.onPlay();
- break;
- case MSG_PLAY_MEDIA_ID:
- mCallback.onPlayFromMediaId((String) obj, msg.getData());
- break;
- case MSG_PLAY_SEARCH:
- mCallback.onPlayFromSearch((String) obj, msg.getData());
- break;
- case MSG_PLAY_URI:
- mCallback.onPlayFromUri((Uri) obj, msg.getData());
- break;
- case MSG_SKIP_TO_ITEM:
- mCallback.onSkipToQueueItem((Long) obj);
- break;
- case MSG_PAUSE:
- mCallback.onPause();
- break;
- case MSG_STOP:
- mCallback.onStop();
- break;
- case MSG_NEXT:
- mCallback.onSkipToNext();
- break;
- case MSG_PREVIOUS:
- mCallback.onSkipToPrevious();
- break;
- case MSG_FAST_FORWARD:
- mCallback.onFastForward();
- break;
- case MSG_REWIND:
- mCallback.onRewind();
- break;
- case MSG_SEEK_TO:
- mCallback.onSeekTo((Long) obj);
- break;
- case MSG_RATE:
- mCallback.onSetRating((Rating) obj);
- break;
- case MSG_CUSTOM_ACTION:
- mCallback.onCustomAction((String) obj, msg.getData());
- break;
- case MSG_ADJUST_VOLUME:
- synchronized (mLock) {
- vp = mVolumeProvider;
- }
- if (vp != null) {
- vp.onAdjustVolume((int) obj);
- }
- break;
- case MSG_SET_VOLUME:
- synchronized (mLock) {
- vp = mVolumeProvider;
- }
- if (vp != null) {
- vp.onSetVolumeTo((int) obj);
- }
- break;
- case MSG_PLAY_PAUSE_KEY_DOUBLE_TAP_TIMEOUT:
- mCallback.handleMediaPlayPauseKeySingleTapIfPending();
- break;
- }
- mCurrentControllerInfo = null;
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.aidl b/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.aidl
deleted file mode 100644
index c4250f0..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.aidl
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media.session;
-
-parcelable ParcelableVolumeInfo;
diff --git a/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.java b/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.java
deleted file mode 100644
index f59c975..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/ParcelableVolumeInfo.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
- **
- ** Licensed under the Apache License, Version 2.0 (the "License");
- ** you may not use this file except in compliance with the License.
- ** You may obtain a copy of the License at
- **
- ** http://www.apache.org/licenses/LICENSE-2.0
- **
- ** Unless required by applicable law or agreed to in writing, software
- ** distributed under the License is distributed on an "AS IS" BASIS,
- ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- ** See the License for the specific language governing permissions and
- ** limitations under the License.
- */
-
-package android.media.session;
-
-import android.media.AudioAttributes;
-import android.os.Parcel;
-import android.os.Parcelable;
-
-/**
- * Convenience class for passing information about the audio configuration of a
- * session. The public implementation is {@link MediaController.PlaybackInfo}.
- *
- * @hide
- */
-public class ParcelableVolumeInfo implements Parcelable {
- public int volumeType;
- public AudioAttributes audioAttrs;
- public int controlType;
- public int maxVolume;
- public int currentVolume;
-
- public ParcelableVolumeInfo(int volumeType, AudioAttributes audioAttrs, int controlType,
- int maxVolume,
- int currentVolume) {
- this.volumeType = volumeType;
- this.audioAttrs = audioAttrs;
- this.controlType = controlType;
- this.maxVolume = maxVolume;
- this.currentVolume = currentVolume;
- }
-
- public ParcelableVolumeInfo(Parcel from) {
- volumeType = from.readInt();
- controlType = from.readInt();
- maxVolume = from.readInt();
- currentVolume = from.readInt();
- audioAttrs = AudioAttributes.CREATOR.createFromParcel(from);
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeInt(volumeType);
- dest.writeInt(controlType);
- dest.writeInt(maxVolume);
- dest.writeInt(currentVolume);
- audioAttrs.writeToParcel(dest, flags);
- }
-
-
- public static final Parcelable.Creator<ParcelableVolumeInfo> CREATOR
- = new Parcelable.Creator<ParcelableVolumeInfo>() {
- @Override
- public ParcelableVolumeInfo createFromParcel(Parcel in) {
- return new ParcelableVolumeInfo(in);
- }
-
- @Override
- public ParcelableVolumeInfo[] newArray(int size) {
- return new ParcelableVolumeInfo[size];
- }
- };
-}
diff --git a/packages/MediaComponents/apex/java/android/media/session/PlaybackState.aidl b/packages/MediaComponents/apex/java/android/media/session/PlaybackState.aidl
deleted file mode 100644
index 0876ebd..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/PlaybackState.aidl
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright 2014, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-package android.media.session;
-
-parcelable PlaybackState;
diff --git a/packages/MediaComponents/apex/java/android/media/session/PlaybackState.java b/packages/MediaComponents/apex/java/android/media/session/PlaybackState.java
deleted file mode 100644
index ed4f9af..0000000
--- a/packages/MediaComponents/apex/java/android/media/session/PlaybackState.java
+++ /dev/null
@@ -1,1081 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media.session;
-
-import android.annotation.DrawableRes;
-import android.annotation.IntDef;
-import android.annotation.LongDef;
-import android.annotation.Nullable;
-import android.media.RemoteControlClient;
-import android.os.Bundle;
-import android.os.Parcel;
-import android.os.Parcelable;
-import android.os.SystemClock;
-import android.text.TextUtils;
-import java.util.ArrayList;
-import java.util.List;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-
-/**
- * Playback state for a {@link MediaSession}. This includes a state like
- * {@link PlaybackState#STATE_PLAYING}, the current playback position,
- * and the current control capabilities.
- */
-public final class PlaybackState implements Parcelable {
- private static final String TAG = "PlaybackState";
-
- /**
- * @hide
- */
- @LongDef(flag=true, value={ACTION_STOP, ACTION_PAUSE, ACTION_PLAY, ACTION_REWIND,
- ACTION_SKIP_TO_PREVIOUS, ACTION_SKIP_TO_NEXT, ACTION_FAST_FORWARD, ACTION_SET_RATING,
- ACTION_SEEK_TO, ACTION_PLAY_PAUSE, ACTION_PLAY_FROM_MEDIA_ID, ACTION_PLAY_FROM_SEARCH,
- ACTION_SKIP_TO_QUEUE_ITEM, ACTION_PLAY_FROM_URI, ACTION_PREPARE,
- ACTION_PREPARE_FROM_MEDIA_ID, ACTION_PREPARE_FROM_SEARCH, ACTION_PREPARE_FROM_URI})
- @Retention(RetentionPolicy.SOURCE)
- public @interface Actions {}
-
- /**
- * Indicates this session supports the stop command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_STOP = 1 << 0;
-
- /**
- * Indicates this session supports the pause command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PAUSE = 1 << 1;
-
- /**
- * Indicates this session supports the play command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PLAY = 1 << 2;
-
- /**
- * Indicates this session supports the rewind command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_REWIND = 1 << 3;
-
- /**
- * Indicates this session supports the previous command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_SKIP_TO_PREVIOUS = 1 << 4;
-
- /**
- * Indicates this session supports the next command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_SKIP_TO_NEXT = 1 << 5;
-
- /**
- * Indicates this session supports the fast forward command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_FAST_FORWARD = 1 << 6;
-
- /**
- * Indicates this session supports the set rating command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_SET_RATING = 1 << 7;
-
- /**
- * Indicates this session supports the seek to command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_SEEK_TO = 1 << 8;
-
- /**
- * Indicates this session supports the play/pause toggle command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PLAY_PAUSE = 1 << 9;
-
- /**
- * Indicates this session supports the play from media id command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PLAY_FROM_MEDIA_ID = 1 << 10;
-
- /**
- * Indicates this session supports the play from search command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PLAY_FROM_SEARCH = 1 << 11;
-
- /**
- * Indicates this session supports the skip to queue item command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_SKIP_TO_QUEUE_ITEM = 1 << 12;
-
- /**
- * Indicates this session supports the play from URI command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PLAY_FROM_URI = 1 << 13;
-
- /**
- * Indicates this session supports the prepare command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PREPARE = 1 << 14;
-
- /**
- * Indicates this session supports the prepare from media id command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PREPARE_FROM_MEDIA_ID = 1 << 15;
-
- /**
- * Indicates this session supports the prepare from search command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PREPARE_FROM_SEARCH = 1 << 16;
-
- /**
- * Indicates this session supports the prepare from URI command.
- *
- * @see Builder#setActions(long)
- */
- public static final long ACTION_PREPARE_FROM_URI = 1 << 17;
-
- /**
- * @hide
- */
- @IntDef({STATE_NONE, STATE_STOPPED, STATE_PAUSED, STATE_PLAYING, STATE_FAST_FORWARDING,
- STATE_REWINDING, STATE_BUFFERING, STATE_ERROR, STATE_CONNECTING,
- STATE_SKIPPING_TO_PREVIOUS, STATE_SKIPPING_TO_NEXT, STATE_SKIPPING_TO_QUEUE_ITEM})
- @Retention(RetentionPolicy.SOURCE)
- public @interface State {}
-
- /**
- * This is the default playback state and indicates that no media has been
- * added yet, or the performer has been reset and has no content to play.
- *
- * @see Builder#setState(int, long, float)
- * @see Builder#setState(int, long, float, long)
- */
- public final static int STATE_NONE = 0;
-
- /**
- * State indicating this item is currently stopped.
- *
- * @see Builder#setState
- */
- public final static int STATE_STOPPED = 1;
-
- /**
- * State indicating this item is currently paused.
- *
- * @see Builder#setState
- */
- public final static int STATE_PAUSED = 2;
-
- /**
- * State indicating this item is currently playing.
- *
- * @see Builder#setState
- */
- public final static int STATE_PLAYING = 3;
-
- /**
- * State indicating this item is currently fast forwarding.
- *
- * @see Builder#setState
- */
- public final static int STATE_FAST_FORWARDING = 4;
-
- /**
- * State indicating this item is currently rewinding.
- *
- * @see Builder#setState
- */
- public final static int STATE_REWINDING = 5;
-
- /**
- * State indicating this item is currently buffering and will begin playing
- * when enough data has buffered.
- *
- * @see Builder#setState
- */
- public final static int STATE_BUFFERING = 6;
-
- /**
- * State indicating this item is currently in an error state. The error
- * message should also be set when entering this state.
- *
- * @see Builder#setState
- */
- public final static int STATE_ERROR = 7;
-
- /**
- * State indicating the class doing playback is currently connecting to a
- * new destination. Depending on the implementation you may return to the previous
- * state when the connection finishes or enter {@link #STATE_NONE}.
- * If the connection failed {@link #STATE_ERROR} should be used.
- *
- * @see Builder#setState
- */
- public final static int STATE_CONNECTING = 8;
-
- /**
- * State indicating the player is currently skipping to the previous item.
- *
- * @see Builder#setState
- */
- public final static int STATE_SKIPPING_TO_PREVIOUS = 9;
-
- /**
- * State indicating the player is currently skipping to the next item.
- *
- * @see Builder#setState
- */
- public final static int STATE_SKIPPING_TO_NEXT = 10;
-
- /**
- * State indicating the player is currently skipping to a specific item in
- * the queue.
- *
- * @see Builder#setState
- */
- public final static int STATE_SKIPPING_TO_QUEUE_ITEM = 11;
-
- /**
- * Use this value for the position to indicate the position is not known.
- */
- public final static long PLAYBACK_POSITION_UNKNOWN = -1;
-
- private final int mState;
- private final long mPosition;
- private final long mBufferedPosition;
- private final float mSpeed;
- private final long mActions;
- private List<PlaybackState.CustomAction> mCustomActions;
- private final CharSequence mErrorMessage;
- private final long mUpdateTime;
- private final long mActiveItemId;
- private final Bundle mExtras;
-
- private PlaybackState(int state, long position, long updateTime, float speed,
- long bufferedPosition, long transportControls,
- List<PlaybackState.CustomAction> customActions, long activeItemId,
- CharSequence error, Bundle extras) {
- mState = state;
- mPosition = position;
- mSpeed = speed;
- mUpdateTime = updateTime;
- mBufferedPosition = bufferedPosition;
- mActions = transportControls;
- mCustomActions = new ArrayList<>(customActions);
- mActiveItemId = activeItemId;
- mErrorMessage = error;
- mExtras = extras;
- }
-
- private PlaybackState(Parcel in) {
- mState = in.readInt();
- mPosition = in.readLong();
- mSpeed = in.readFloat();
- mUpdateTime = in.readLong();
- mBufferedPosition = in.readLong();
- mActions = in.readLong();
- mCustomActions = in.createTypedArrayList(CustomAction.CREATOR);
- mActiveItemId = in.readLong();
- mErrorMessage = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(in);
- mExtras = in.readBundle();
- }
-
- @Override
- public String toString() {
- StringBuilder bob = new StringBuilder("PlaybackState {");
- bob.append("state=").append(mState);
- bob.append(", position=").append(mPosition);
- bob.append(", buffered position=").append(mBufferedPosition);
- bob.append(", speed=").append(mSpeed);
- bob.append(", updated=").append(mUpdateTime);
- bob.append(", actions=").append(mActions);
- bob.append(", custom actions=").append(mCustomActions);
- bob.append(", active item id=").append(mActiveItemId);
- bob.append(", error=").append(mErrorMessage);
- bob.append("}");
- return bob.toString();
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeInt(mState);
- dest.writeLong(mPosition);
- dest.writeFloat(mSpeed);
- dest.writeLong(mUpdateTime);
- dest.writeLong(mBufferedPosition);
- dest.writeLong(mActions);
- dest.writeTypedList(mCustomActions);
- dest.writeLong(mActiveItemId);
- TextUtils.writeToParcel(mErrorMessage, dest, 0);
- dest.writeBundle(mExtras);
- }
-
- /**
- * Get the current state of playback. One of the following:
- * <ul>
- * <li> {@link PlaybackState#STATE_NONE}</li>
- * <li> {@link PlaybackState#STATE_STOPPED}</li>
- * <li> {@link PlaybackState#STATE_PLAYING}</li>
- * <li> {@link PlaybackState#STATE_PAUSED}</li>
- * <li> {@link PlaybackState#STATE_FAST_FORWARDING}</li>
- * <li> {@link PlaybackState#STATE_REWINDING}</li>
- * <li> {@link PlaybackState#STATE_BUFFERING}</li>
- * <li> {@link PlaybackState#STATE_ERROR}</li>
- * <li> {@link PlaybackState#STATE_CONNECTING}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_PREVIOUS}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_NEXT}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_QUEUE_ITEM}</li>
- * </ul>
- */
- @State
- public int getState() {
- return mState;
- }
-
- /**
- * Get the current playback position in ms.
- */
- public long getPosition() {
- return mPosition;
- }
-
- /**
- * Get the current buffered position in ms. This is the farthest playback
- * point that can be reached from the current position using only buffered
- * content.
- */
- public long getBufferedPosition() {
- return mBufferedPosition;
- }
-
- /**
- * Get the current playback speed as a multiple of normal playback. This
- * should be negative when rewinding. A value of 1 means normal playback and
- * 0 means paused.
- *
- * @return The current speed of playback.
- */
- public float getPlaybackSpeed() {
- return mSpeed;
- }
-
- /**
- * Get the current actions available on this session. This should use a
- * bitmask of the available actions.
- * <ul>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_PREVIOUS}</li>
- * <li> {@link PlaybackState#ACTION_REWIND}</li>
- * <li> {@link PlaybackState#ACTION_PLAY}</li>
- * <li> {@link PlaybackState#ACTION_PAUSE}</li>
- * <li> {@link PlaybackState#ACTION_STOP}</li>
- * <li> {@link PlaybackState#ACTION_FAST_FORWARD}</li>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_NEXT}</li>
- * <li> {@link PlaybackState#ACTION_SEEK_TO}</li>
- * <li> {@link PlaybackState#ACTION_SET_RATING}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_PAUSE}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_MEDIA_ID}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_SEARCH}</li>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_QUEUE_ITEM}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_URI}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_MEDIA_ID}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_SEARCH}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_URI}</li>
- * </ul>
- */
- @Actions
- public long getActions() {
- return mActions;
- }
-
- /**
- * Get the list of custom actions.
- */
- public List<PlaybackState.CustomAction> getCustomActions() {
- return mCustomActions;
- }
-
- /**
- * Get a user readable error message. This should be set when the state is
- * {@link PlaybackState#STATE_ERROR}.
- */
- public CharSequence getErrorMessage() {
- return mErrorMessage;
- }
-
- /**
- * Get the elapsed real time at which position was last updated. If the
- * position has never been set this will return 0;
- *
- * @return The last time the position was updated.
- */
- public long getLastPositionUpdateTime() {
- return mUpdateTime;
- }
-
- /**
- * Get the id of the currently active item in the queue. If there is no
- * queue or a queue is not supported by the session this will be
- * {@link MediaSession.QueueItem#UNKNOWN_ID}.
- *
- * @return The id of the currently active item in the queue or
- * {@link MediaSession.QueueItem#UNKNOWN_ID}.
- */
- public long getActiveQueueItemId() {
- return mActiveItemId;
- }
-
- /**
- * Get any custom extras that were set on this playback state.
- *
- * @return The extras for this state or null.
- */
- public @Nullable Bundle getExtras() {
- return mExtras;
- }
-
- /**
- * Get the {@link PlaybackState} state for the given
- * {@link RemoteControlClient} state.
- *
- * @param rccState The state used by {@link RemoteControlClient}.
- * @return The equivalent state used by {@link PlaybackState}.
- * @hide
- */
- public static int getStateFromRccState(int rccState) {
- switch (rccState) {
- case RemoteControlClient.PLAYSTATE_BUFFERING:
- return STATE_BUFFERING;
- case RemoteControlClient.PLAYSTATE_ERROR:
- return STATE_ERROR;
- case RemoteControlClient.PLAYSTATE_FAST_FORWARDING:
- return STATE_FAST_FORWARDING;
- //RemoteControlClient.PLAYSTATE_NONE is hidden
- case 0: //RemoteControlClient.PLAYSTATE_NONE:
- return STATE_NONE;
- case RemoteControlClient.PLAYSTATE_PAUSED:
- return STATE_PAUSED;
- case RemoteControlClient.PLAYSTATE_PLAYING:
- return STATE_PLAYING;
- case RemoteControlClient.PLAYSTATE_REWINDING:
- return STATE_REWINDING;
- case RemoteControlClient.PLAYSTATE_SKIPPING_BACKWARDS:
- return STATE_SKIPPING_TO_PREVIOUS;
- case RemoteControlClient.PLAYSTATE_SKIPPING_FORWARDS:
- return STATE_SKIPPING_TO_NEXT;
- case RemoteControlClient.PLAYSTATE_STOPPED:
- return STATE_STOPPED;
- default:
- return -1;
- }
- }
-
- /**
- * Get the {@link RemoteControlClient} state for the given
- * {@link PlaybackState} state.
- *
- * @param state The state used by {@link PlaybackState}.
- * @return The equivalent state used by {@link RemoteControlClient}.
- * @hide
- */
- public static int getRccStateFromState(int state) {
- switch (state) {
- case STATE_BUFFERING:
- return RemoteControlClient.PLAYSTATE_BUFFERING;
- case STATE_ERROR:
- return RemoteControlClient.PLAYSTATE_ERROR;
- case STATE_FAST_FORWARDING:
- return RemoteControlClient.PLAYSTATE_FAST_FORWARDING;
- case STATE_NONE:
- //RemoteControlClient.PLAYSTATE_NONE is hidden
- return 0; //RemoteControlClient.PLAYSTATE_NONE;
- case STATE_PAUSED:
- return RemoteControlClient.PLAYSTATE_PAUSED;
- case STATE_PLAYING:
- return RemoteControlClient.PLAYSTATE_PLAYING;
- case STATE_REWINDING:
- return RemoteControlClient.PLAYSTATE_REWINDING;
- case STATE_SKIPPING_TO_PREVIOUS:
- return RemoteControlClient.PLAYSTATE_SKIPPING_BACKWARDS;
- case STATE_SKIPPING_TO_NEXT:
- return RemoteControlClient.PLAYSTATE_SKIPPING_FORWARDS;
- case STATE_STOPPED:
- return RemoteControlClient.PLAYSTATE_STOPPED;
- default:
- return -1;
- }
- }
-
- /**
- * @hide
- */
- public static long getActionsFromRccControlFlags(int rccFlags) {
- long actions = 0;
- long flag = 1;
- while (flag <= rccFlags) {
- if ((flag & rccFlags) != 0) {
- actions |= getActionForRccFlag((int) flag);
- }
- flag = flag << 1;
- }
- return actions;
- }
-
- /**
- * @hide
- */
- public static int getRccControlFlagsFromActions(long actions) {
- int rccFlags = 0;
- long action = 1;
- while (action <= actions && action < Integer.MAX_VALUE) {
- if ((action & actions) != 0) {
- rccFlags |= getRccFlagForAction(action);
- }
- action = action << 1;
- }
- return rccFlags;
- }
-
- private static long getActionForRccFlag(int flag) {
- switch (flag) {
- case RemoteControlClient.FLAG_KEY_MEDIA_PREVIOUS:
- return ACTION_SKIP_TO_PREVIOUS;
- case RemoteControlClient.FLAG_KEY_MEDIA_REWIND:
- return ACTION_REWIND;
- case RemoteControlClient.FLAG_KEY_MEDIA_PLAY:
- return ACTION_PLAY;
- case RemoteControlClient.FLAG_KEY_MEDIA_PLAY_PAUSE:
- return ACTION_PLAY_PAUSE;
- case RemoteControlClient.FLAG_KEY_MEDIA_PAUSE:
- return ACTION_PAUSE;
- case RemoteControlClient.FLAG_KEY_MEDIA_STOP:
- return ACTION_STOP;
- case RemoteControlClient.FLAG_KEY_MEDIA_FAST_FORWARD:
- return ACTION_FAST_FORWARD;
- case RemoteControlClient.FLAG_KEY_MEDIA_NEXT:
- return ACTION_SKIP_TO_NEXT;
- case RemoteControlClient.FLAG_KEY_MEDIA_POSITION_UPDATE:
- return ACTION_SEEK_TO;
- case RemoteControlClient.FLAG_KEY_MEDIA_RATING:
- return ACTION_SET_RATING;
- }
- return 0;
- }
-
- private static int getRccFlagForAction(long action) {
- // We only care about the lower set of actions that can map to rcc
- // flags.
- int testAction = action < Integer.MAX_VALUE ? (int) action : 0;
- switch (testAction) {
- case (int) ACTION_SKIP_TO_PREVIOUS:
- return RemoteControlClient.FLAG_KEY_MEDIA_PREVIOUS;
- case (int) ACTION_REWIND:
- return RemoteControlClient.FLAG_KEY_MEDIA_REWIND;
- case (int) ACTION_PLAY:
- return RemoteControlClient.FLAG_KEY_MEDIA_PLAY;
- case (int) ACTION_PLAY_PAUSE:
- return RemoteControlClient.FLAG_KEY_MEDIA_PLAY_PAUSE;
- case (int) ACTION_PAUSE:
- return RemoteControlClient.FLAG_KEY_MEDIA_PAUSE;
- case (int) ACTION_STOP:
- return RemoteControlClient.FLAG_KEY_MEDIA_STOP;
- case (int) ACTION_FAST_FORWARD:
- return RemoteControlClient.FLAG_KEY_MEDIA_FAST_FORWARD;
- case (int) ACTION_SKIP_TO_NEXT:
- return RemoteControlClient.FLAG_KEY_MEDIA_NEXT;
- case (int) ACTION_SEEK_TO:
- return RemoteControlClient.FLAG_KEY_MEDIA_POSITION_UPDATE;
- case (int) ACTION_SET_RATING:
- return RemoteControlClient.FLAG_KEY_MEDIA_RATING;
- }
- return 0;
- }
-
- public static final Parcelable.Creator<PlaybackState> CREATOR =
- new Parcelable.Creator<PlaybackState>() {
- @Override
- public PlaybackState createFromParcel(Parcel in) {
- return new PlaybackState(in);
- }
-
- @Override
- public PlaybackState[] newArray(int size) {
- return new PlaybackState[size];
- }
- };
-
- /**
- * {@link PlaybackState.CustomAction CustomActions} can be used to extend the capabilities of
- * the standard transport controls by exposing app specific actions to
- * {@link MediaController MediaControllers}.
- */
- public static final class CustomAction implements Parcelable {
- private final String mAction;
- private final CharSequence mName;
- private final int mIcon;
- private final Bundle mExtras;
-
- /**
- * Use {@link PlaybackState.CustomAction.Builder#build()}.
- */
- private CustomAction(String action, CharSequence name, int icon, Bundle extras) {
- mAction = action;
- mName = name;
- mIcon = icon;
- mExtras = extras;
- }
-
- private CustomAction(Parcel in) {
- mAction = in.readString();
- mName = TextUtils.CHAR_SEQUENCE_CREATOR.createFromParcel(in);
- mIcon = in.readInt();
- mExtras = in.readBundle();
- }
-
- @Override
- public void writeToParcel(Parcel dest, int flags) {
- dest.writeString(mAction);
- TextUtils.writeToParcel(mName, dest, flags);
- dest.writeInt(mIcon);
- dest.writeBundle(mExtras);
- }
-
- @Override
- public int describeContents() {
- return 0;
- }
-
- public static final Parcelable.Creator<PlaybackState.CustomAction> CREATOR
- = new Parcelable.Creator<PlaybackState.CustomAction>() {
-
- @Override
- public PlaybackState.CustomAction createFromParcel(Parcel p) {
- return new PlaybackState.CustomAction(p);
- }
-
- @Override
- public PlaybackState.CustomAction[] newArray(int size) {
- return new PlaybackState.CustomAction[size];
- }
- };
-
- /**
- * Returns the action of the {@link CustomAction}.
- *
- * @return The action of the {@link CustomAction}.
- */
- public String getAction() {
- return mAction;
- }
-
- /**
- * Returns the display name of this action. e.g. "Favorite"
- *
- * @return The display name of this {@link CustomAction}.
- */
- public CharSequence getName() {
- return mName;
- }
-
- /**
- * Returns the resource id of the icon in the {@link MediaSession MediaSession's} package.
- *
- * @return The resource id of the icon in the {@link MediaSession MediaSession's} package.
- */
- public int getIcon() {
- return mIcon;
- }
-
- /**
- * Returns extras which provide additional application-specific information about the
- * action, or null if none. These arguments are meant to be consumed by a
- * {@link MediaController} if it knows how to handle them.
- *
- * @return Optional arguments for the {@link CustomAction}.
- */
- public Bundle getExtras() {
- return mExtras;
- }
-
- @Override
- public String toString() {
- return "Action:" +
- "mName='" + mName +
- ", mIcon=" + mIcon +
- ", mExtras=" + mExtras;
- }
-
- /**
- * Builder for {@link CustomAction} objects.
- */
- public static final class Builder {
- private final String mAction;
- private final CharSequence mName;
- private final int mIcon;
- private Bundle mExtras;
-
- /**
- * Creates a {@link CustomAction} builder with the id, name, and icon set.
- *
- * @param action The action of the {@link CustomAction}.
- * @param name The display name of the {@link CustomAction}. This name will be displayed
- * along side the action if the UI supports it.
- * @param icon The icon resource id of the {@link CustomAction}. This resource id
- * must be in the same package as the {@link MediaSession}. It will be
- * displayed with the custom action if the UI supports it.
- */
- public Builder(String action, CharSequence name, @DrawableRes int icon) {
- if (TextUtils.isEmpty(action)) {
- throw new IllegalArgumentException(
- "You must specify an action to build a CustomAction.");
- }
- if (TextUtils.isEmpty(name)) {
- throw new IllegalArgumentException(
- "You must specify a name to build a CustomAction.");
- }
- if (icon == 0) {
- throw new IllegalArgumentException(
- "You must specify an icon resource id to build a CustomAction.");
- }
- mAction = action;
- mName = name;
- mIcon = icon;
- }
-
- /**
- * Set optional extras for the {@link CustomAction}. These extras are meant to be
- * consumed by a {@link MediaController} if it knows how to handle them.
- * Keys should be fully qualified (e.g. "com.example.MY_ARG") to avoid collisions.
- *
- * @param extras Optional extras for the {@link CustomAction}.
- * @return this.
- */
- public Builder setExtras(Bundle extras) {
- mExtras = extras;
- return this;
- }
-
- /**
- * Build and return the {@link CustomAction} instance with the specified values.
- *
- * @return A new {@link CustomAction} instance.
- */
- public CustomAction build() {
- return new CustomAction(mAction, mName, mIcon, mExtras);
- }
- }
- }
-
- /**
- * Builder for {@link PlaybackState} objects.
- */
- public static final class Builder {
- private final List<PlaybackState.CustomAction> mCustomActions = new ArrayList<>();
-
- private int mState;
- private long mPosition;
- private long mBufferedPosition;
- private float mSpeed;
- private long mActions;
- private CharSequence mErrorMessage;
- private long mUpdateTime;
- private long mActiveItemId = MediaSession.QueueItem.UNKNOWN_ID;
- private Bundle mExtras;
-
- /**
- * Creates an initially empty state builder.
- */
- public Builder() {
- }
-
- /**
- * Creates a builder with the same initial values as those in the from
- * state.
- *
- * @param from The state to use for initializing the builder.
- */
- public Builder(PlaybackState from) {
- if (from == null) {
- return;
- }
- mState = from.mState;
- mPosition = from.mPosition;
- mBufferedPosition = from.mBufferedPosition;
- mSpeed = from.mSpeed;
- mActions = from.mActions;
- if (from.mCustomActions != null) {
- mCustomActions.addAll(from.mCustomActions);
- }
- mErrorMessage = from.mErrorMessage;
- mUpdateTime = from.mUpdateTime;
- mActiveItemId = from.mActiveItemId;
- mExtras = from.mExtras;
- }
-
- /**
- * Set the current state of playback.
- * <p>
- * The position must be in ms and indicates the current playback
- * position within the item. If the position is unknown use
- * {@link #PLAYBACK_POSITION_UNKNOWN}. When not using an unknown
- * position the time at which the position was updated must be provided.
- * It is okay to use {@link SystemClock#elapsedRealtime()} if the
- * current position was just retrieved.
- * <p>
- * The speed is a multiple of normal playback and should be 0 when
- * paused and negative when rewinding. Normal playback speed is 1.0.
- * <p>
- * The state must be one of the following:
- * <ul>
- * <li> {@link PlaybackState#STATE_NONE}</li>
- * <li> {@link PlaybackState#STATE_STOPPED}</li>
- * <li> {@link PlaybackState#STATE_PLAYING}</li>
- * <li> {@link PlaybackState#STATE_PAUSED}</li>
- * <li> {@link PlaybackState#STATE_FAST_FORWARDING}</li>
- * <li> {@link PlaybackState#STATE_REWINDING}</li>
- * <li> {@link PlaybackState#STATE_BUFFERING}</li>
- * <li> {@link PlaybackState#STATE_ERROR}</li>
- * <li> {@link PlaybackState#STATE_CONNECTING}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_PREVIOUS}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_NEXT}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_QUEUE_ITEM}</li>
- * </ul>
- *
- * @param state The current state of playback.
- * @param position The position in the current item in ms.
- * @param playbackSpeed The current speed of playback as a multiple of
- * normal playback.
- * @param updateTime The time in the {@link SystemClock#elapsedRealtime}
- * timebase that the position was updated at.
- * @return this
- */
- public Builder setState(@State int state, long position, float playbackSpeed,
- long updateTime) {
- mState = state;
- mPosition = position;
- mUpdateTime = updateTime;
- mSpeed = playbackSpeed;
- return this;
- }
-
- /**
- * Set the current state of playback.
- * <p>
- * The position must be in ms and indicates the current playback
- * position within the item. If the position is unknown use
- * {@link #PLAYBACK_POSITION_UNKNOWN}. The update time will be set to
- * the current {@link SystemClock#elapsedRealtime()}.
- * <p>
- * The speed is a multiple of normal playback and should be 0 when
- * paused and negative when rewinding. Normal playback speed is 1.0.
- * <p>
- * The state must be one of the following:
- * <ul>
- * <li> {@link PlaybackState#STATE_NONE}</li>
- * <li> {@link PlaybackState#STATE_STOPPED}</li>
- * <li> {@link PlaybackState#STATE_PLAYING}</li>
- * <li> {@link PlaybackState#STATE_PAUSED}</li>
- * <li> {@link PlaybackState#STATE_FAST_FORWARDING}</li>
- * <li> {@link PlaybackState#STATE_REWINDING}</li>
- * <li> {@link PlaybackState#STATE_BUFFERING}</li>
- * <li> {@link PlaybackState#STATE_ERROR}</li>
- * <li> {@link PlaybackState#STATE_CONNECTING}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_PREVIOUS}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_NEXT}</li>
- * <li> {@link PlaybackState#STATE_SKIPPING_TO_QUEUE_ITEM}</li>
- * </ul>
- *
- * @param state The current state of playback.
- * @param position The position in the current item in ms.
- * @param playbackSpeed The current speed of playback as a multiple of
- * normal playback.
- * @return this
- */
- public Builder setState(@State int state, long position, float playbackSpeed) {
- return setState(state, position, playbackSpeed, SystemClock.elapsedRealtime());
- }
-
- /**
- * Set the current actions available on this session. This should use a
- * bitmask of possible actions.
- * <ul>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_PREVIOUS}</li>
- * <li> {@link PlaybackState#ACTION_REWIND}</li>
- * <li> {@link PlaybackState#ACTION_PLAY}</li>
- * <li> {@link PlaybackState#ACTION_PAUSE}</li>
- * <li> {@link PlaybackState#ACTION_STOP}</li>
- * <li> {@link PlaybackState#ACTION_FAST_FORWARD}</li>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_NEXT}</li>
- * <li> {@link PlaybackState#ACTION_SEEK_TO}</li>
- * <li> {@link PlaybackState#ACTION_SET_RATING}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_PAUSE}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_MEDIA_ID}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_SEARCH}</li>
- * <li> {@link PlaybackState#ACTION_SKIP_TO_QUEUE_ITEM}</li>
- * <li> {@link PlaybackState#ACTION_PLAY_FROM_URI}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_MEDIA_ID}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_SEARCH}</li>
- * <li> {@link PlaybackState#ACTION_PREPARE_FROM_URI}</li>
- * </ul>
- *
- * @param actions The set of actions allowed.
- * @return this
- */
- public Builder setActions(@Actions long actions) {
- mActions = actions;
- return this;
- }
-
- /**
- * Add a custom action to the playback state. Actions can be used to
- * expose additional functionality to {@link MediaController
- * MediaControllers} beyond what is offered by the standard transport
- * controls.
- * <p>
- * e.g. start a radio station based on the current item or skip ahead by
- * 30 seconds.
- *
- * @param action An identifier for this action. It can be sent back to
- * the {@link MediaSession} through
- * {@link MediaController.TransportControls#sendCustomAction(String, Bundle)}.
- * @param name The display name for the action. If text is shown with
- * the action or used for accessibility, this is what should
- * be used.
- * @param icon The resource action of the icon that should be displayed
- * for the action. The resource should be in the package of
- * the {@link MediaSession}.
- * @return this
- */
- public Builder addCustomAction(String action, String name, int icon) {
- return addCustomAction(new PlaybackState.CustomAction(action, name, icon, null));
- }
-
- /**
- * Add a custom action to the playback state. Actions can be used to expose additional
- * functionality to {@link MediaController MediaControllers} beyond what is offered by the
- * standard transport controls.
- * <p>
- * An example of an action would be to start a radio station based on the current item
- * or to skip ahead by 30 seconds.
- *
- * @param customAction The custom action to add to the {@link PlaybackState}.
- * @return this
- */
- public Builder addCustomAction(PlaybackState.CustomAction customAction) {
- if (customAction == null) {
- throw new IllegalArgumentException(
- "You may not add a null CustomAction to PlaybackState.");
- }
- mCustomActions.add(customAction);
- return this;
- }
-
- /**
- * Set the current buffered position in ms. This is the farthest
- * playback point that can be reached from the current position using
- * only buffered content.
- *
- * @param bufferedPosition The position in ms that playback is buffered
- * to.
- * @return this
- */
- public Builder setBufferedPosition(long bufferedPosition) {
- mBufferedPosition = bufferedPosition;
- return this;
- }
-
- /**
- * Set the active item in the play queue by specifying its id. The
- * default value is {@link MediaSession.QueueItem#UNKNOWN_ID}
- *
- * @param id The id of the active item.
- * @return this
- */
- public Builder setActiveQueueItemId(long id) {
- mActiveItemId = id;
- return this;
- }
-
- /**
- * Set a user readable error message. This should be set when the state
- * is {@link PlaybackState#STATE_ERROR}.
- *
- * @param error The error message for display to the user.
- * @return this
- */
- public Builder setErrorMessage(CharSequence error) {
- mErrorMessage = error;
- return this;
- }
-
- /**
- * Set any custom extras to be included with the playback state.
- *
- * @param extras The extras to include.
- * @return this
- */
- public Builder setExtras(Bundle extras) {
- mExtras = extras;
- return this;
- }
-
- /**
- * Build and return the {@link PlaybackState} instance with these
- * values.
- *
- * @return A new state instance.
- */
- public PlaybackState build() {
- return new PlaybackState(mState, mPosition, mUpdateTime, mSpeed, mBufferedPosition,
- mActions, mCustomActions, mActiveItemId, mErrorMessage, mExtras);
- }
- }
-}
diff --git a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserService.aidl b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserService.aidl
deleted file mode 100644
index 84f41f6..0000000
--- a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserService.aidl
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-
-package android.service.media;
-
-import android.content.res.Configuration;
-import android.service.media.IMediaBrowserServiceCallbacks;
-import android.net.Uri;
-import android.os.Bundle;
-import android.os.ResultReceiver;
-
-/**
- * Media API allows clients to browse through hierarchy of a user’s media collection,
- * playback a specific media entry and interact with the now playing queue.
- * @hide
- */
-oneway interface IMediaBrowserService {
- void connect(String pkg, in Bundle rootHints, IMediaBrowserServiceCallbacks callbacks);
- void disconnect(IMediaBrowserServiceCallbacks callbacks);
-
- void addSubscriptionDeprecated(String uri, IMediaBrowserServiceCallbacks callbacks);
- void removeSubscriptionDeprecated(String uri, IMediaBrowserServiceCallbacks callbacks);
-
- void getMediaItem(String uri, in ResultReceiver cb, IMediaBrowserServiceCallbacks callbacks);
- void addSubscription(String uri, in IBinder token, in Bundle options,
- IMediaBrowserServiceCallbacks callbacks);
- void removeSubscription(String uri, in IBinder token, IMediaBrowserServiceCallbacks callbacks);
-}
diff --git a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl b/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
deleted file mode 100644
index 8dc480d..0000000
--- a/packages/MediaComponents/apex/java/android/service/media/IMediaBrowserServiceCallbacks.aidl
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-
-package android.service.media;
-
-import android.graphics.Bitmap;
-import android.media.MediaParceledListSlice;
-import android.media.session.MediaSession;
-import android.os.Bundle;
-
-/**
- * Media API allows clients to browse through hierarchy of a user’s media collection,
- * playback a specific media entry and interact with the now playing queue.
- * @hide
- */
-oneway interface IMediaBrowserServiceCallbacks {
- /**
- * Invoked when the connected has been established.
- * @param root The root media id for browsing.
- * @param session The {@link MediaSession.Token media session token} that can be used to control
- * the playback of the media app.
- * @param extra Extras returned by the media service.
- */
- void onConnect(String root, in MediaSession.Token session, in Bundle extras);
- void onConnectFailed();
- void onLoadChildren(String mediaId, in MediaParceledListSlice list);
- void onLoadChildrenWithOptions(String mediaId, in MediaParceledListSlice list,
- in Bundle options);
-}
diff --git a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java b/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
deleted file mode 100644
index a66ec35..0000000
--- a/packages/MediaComponents/apex/java/android/service/media/MediaBrowserService.java
+++ /dev/null
@@ -1,857 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.service.media;
-
-import android.annotation.IntDef;
-import android.annotation.NonNull;
-import android.annotation.Nullable;
-import android.annotation.SdkConstant;
-import android.annotation.SdkConstant.SdkConstantType;
-import android.annotation.UnsupportedAppUsage;
-import android.app.Service;
-import android.content.Intent;
-import android.content.pm.PackageManager;
-import android.media.MediaParceledListSlice;
-import android.media.browse.MediaBrowser;
-import android.media.browse.MediaBrowserUtils;
-import android.media.session.MediaSession;
-import android.os.Binder;
-import android.os.Bundle;
-import android.os.Handler;
-import android.media.session.MediaSessionManager;
-import android.media.session.MediaSessionManager.RemoteUserInfo;
-import android.os.IBinder;
-import android.os.RemoteException;
-import android.os.ResultReceiver;
-import android.service.media.IMediaBrowserService;
-import android.service.media.IMediaBrowserServiceCallbacks;
-import android.text.TextUtils;
-import android.util.ArrayMap;
-import android.util.Log;
-import android.util.Pair;
-
-import java.io.FileDescriptor;
-import java.io.PrintWriter;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-
-/**
- * Base class for media browser services.
- * <p>
- * Media browser services enable applications to browse media content provided by an application
- * and ask the application to start playing it. They may also be used to control content that
- * is already playing by way of a {@link MediaSession}.
- * </p>
- *
- * To extend this class, you must declare the service in your manifest file with
- * an intent filter with the {@link #SERVICE_INTERFACE} action.
- *
- * For example:
- * </p><pre>
- * <service android:name=".MyMediaBrowserService"
- * android:label="@string/service_name" >
- * <intent-filter>
- * <action android:name="android.media.browse.MediaBrowserService" />
- * </intent-filter>
- * </service>
- * </pre>
- *
- */
-public abstract class MediaBrowserService extends Service {
- private static final String TAG = "MediaBrowserService";
- private static final boolean DBG = false;
-
- /**
- * The {@link Intent} that must be declared as handled by the service.
- */
- @SdkConstant(SdkConstantType.SERVICE_ACTION)
- public static final String SERVICE_INTERFACE = "android.media.browse.MediaBrowserService";
-
- /**
- * A key for passing the MediaItem to the ResultReceiver in getItem.
- * @hide
- */
- @UnsupportedAppUsage
- public static final String KEY_MEDIA_ITEM = "media_item";
-
- private static final int RESULT_FLAG_OPTION_NOT_HANDLED = 1 << 0;
- private static final int RESULT_FLAG_ON_LOAD_ITEM_NOT_IMPLEMENTED = 1 << 1;
-
- private static final int RESULT_ERROR = -1;
- private static final int RESULT_OK = 0;
-
- /** @hide */
- @Retention(RetentionPolicy.SOURCE)
- @IntDef(flag=true, value = { RESULT_FLAG_OPTION_NOT_HANDLED,
- RESULT_FLAG_ON_LOAD_ITEM_NOT_IMPLEMENTED })
- private @interface ResultFlags { }
-
- private final ArrayMap<IBinder, ConnectionRecord> mConnections = new ArrayMap<>();
- private ConnectionRecord mCurConnection;
- private final Handler mHandler = new Handler();
- private ServiceBinder mBinder;
- MediaSession.Token mSession;
-
- /**
- * All the info about a connection.
- */
- private class ConnectionRecord implements IBinder.DeathRecipient {
- String pkg;
- int uid;
- int pid;
- Bundle rootHints;
- IMediaBrowserServiceCallbacks callbacks;
- BrowserRoot root;
- HashMap<String, List<Pair<IBinder, Bundle>>> subscriptions = new HashMap<>();
-
- @Override
- public void binderDied() {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- mConnections.remove(callbacks.asBinder());
- }
- });
- }
- }
-
- /**
- * Completion handler for asynchronous callback methods in {@link MediaBrowserService}.
- * <p>
- * Each of the methods that takes one of these to send the result must call
- * {@link #sendResult} to respond to the caller with the given results. If those
- * functions return without calling {@link #sendResult}, they must instead call
- * {@link #detach} before returning, and then may call {@link #sendResult} when
- * they are done. If more than one of those methods is called, an exception will
- * be thrown.
- *
- * @see #onLoadChildren
- * @see #onLoadItem
- */
- public class Result<T> {
- private Object mDebug;
- private boolean mDetachCalled;
- private boolean mSendResultCalled;
- @UnsupportedAppUsage
- private int mFlags;
-
- Result(Object debug) {
- mDebug = debug;
- }
-
- /**
- * Send the result back to the caller.
- */
- public void sendResult(T result) {
- if (mSendResultCalled) {
- throw new IllegalStateException("sendResult() called twice for: " + mDebug);
- }
- mSendResultCalled = true;
- onResultSent(result, mFlags);
- }
-
- /**
- * Detach this message from the current thread and allow the {@link #sendResult}
- * call to happen later.
- */
- public void detach() {
- if (mDetachCalled) {
- throw new IllegalStateException("detach() called when detach() had already"
- + " been called for: " + mDebug);
- }
- if (mSendResultCalled) {
- throw new IllegalStateException("detach() called when sendResult() had already"
- + " been called for: " + mDebug);
- }
- mDetachCalled = true;
- }
-
- boolean isDone() {
- return mDetachCalled || mSendResultCalled;
- }
-
- void setFlags(@ResultFlags int flags) {
- mFlags = flags;
- }
-
- /**
- * Called when the result is sent, after assertions about not being called twice
- * have happened.
- */
- void onResultSent(T result, @ResultFlags int flags) {
- }
- }
-
- private class ServiceBinder extends IMediaBrowserService.Stub {
- @Override
- public void connect(final String pkg, final Bundle rootHints,
- final IMediaBrowserServiceCallbacks callbacks) {
-
- final int pid = Binder.getCallingPid();
- final int uid = Binder.getCallingUid();
- if (!isValidPackage(pkg, uid)) {
- throw new IllegalArgumentException("Package/uid mismatch: uid=" + uid
- + " package=" + pkg);
- }
-
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- final IBinder b = callbacks.asBinder();
-
- // Clear out the old subscriptions. We are getting new ones.
- mConnections.remove(b);
-
- final ConnectionRecord connection = new ConnectionRecord();
- connection.pkg = pkg;
- connection.pid = pid;
- connection.uid = uid;
- connection.rootHints = rootHints;
- connection.callbacks = callbacks;
-
- mCurConnection = connection;
- connection.root = MediaBrowserService.this.onGetRoot(pkg, uid, rootHints);
- mCurConnection = null;
-
- // If they didn't return something, don't allow this client.
- if (connection.root == null) {
- Log.i(TAG, "No root for client " + pkg + " from service "
- + getClass().getName());
- try {
- callbacks.onConnectFailed();
- } catch (RemoteException ex) {
- Log.w(TAG, "Calling onConnectFailed() failed. Ignoring. "
- + "pkg=" + pkg);
- }
- } else {
- try {
- mConnections.put(b, connection);
- b.linkToDeath(connection, 0);
- if (mSession != null) {
- callbacks.onConnect(connection.root.getRootId(),
- mSession, connection.root.getExtras());
- }
- } catch (RemoteException ex) {
- Log.w(TAG, "Calling onConnect() failed. Dropping client. "
- + "pkg=" + pkg);
- mConnections.remove(b);
- }
- }
- }
- });
- }
-
- @Override
- public void disconnect(final IMediaBrowserServiceCallbacks callbacks) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- final IBinder b = callbacks.asBinder();
-
- // Clear out the old subscriptions. We are getting new ones.
- final ConnectionRecord old = mConnections.remove(b);
- if (old != null) {
- // TODO
- old.callbacks.asBinder().unlinkToDeath(old, 0);
- }
- }
- });
- }
-
- @Override
- public void addSubscriptionDeprecated(String id, IMediaBrowserServiceCallbacks callbacks) {
- // do-nothing
- }
-
- @Override
- public void addSubscription(final String id, final IBinder token, final Bundle options,
- final IMediaBrowserServiceCallbacks callbacks) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- final IBinder b = callbacks.asBinder();
-
- // Get the record for the connection
- final ConnectionRecord connection = mConnections.get(b);
- if (connection == null) {
- Log.w(TAG, "addSubscription for callback that isn't registered id="
- + id);
- return;
- }
-
- MediaBrowserService.this.addSubscription(id, connection, token, options);
- }
- });
- }
-
- @Override
- public void removeSubscriptionDeprecated(String id, IMediaBrowserServiceCallbacks callbacks) {
- // do-nothing
- }
-
- @Override
- public void removeSubscription(final String id, final IBinder token,
- final IMediaBrowserServiceCallbacks callbacks) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- final IBinder b = callbacks.asBinder();
-
- ConnectionRecord connection = mConnections.get(b);
- if (connection == null) {
- Log.w(TAG, "removeSubscription for callback that isn't registered id="
- + id);
- return;
- }
- if (!MediaBrowserService.this.removeSubscription(id, connection, token)) {
- Log.w(TAG, "removeSubscription called for " + id
- + " which is not subscribed");
- }
- }
- });
- }
-
- @Override
- public void getMediaItem(final String mediaId, final ResultReceiver receiver,
- final IMediaBrowserServiceCallbacks callbacks) {
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- final IBinder b = callbacks.asBinder();
- ConnectionRecord connection = mConnections.get(b);
- if (connection == null) {
- Log.w(TAG, "getMediaItem for callback that isn't registered id=" + mediaId);
- return;
- }
- performLoadItem(mediaId, connection, receiver);
- }
- });
- }
- }
-
- @Override
- public void onCreate() {
- super.onCreate();
- mBinder = new ServiceBinder();
- }
-
- @Override
- public IBinder onBind(Intent intent) {
- if (SERVICE_INTERFACE.equals(intent.getAction())) {
- return mBinder;
- }
- return null;
- }
-
- @Override
- public void dump(FileDescriptor fd, PrintWriter writer, String[] args) {
- }
-
- /**
- * Called to get the root information for browsing by a particular client.
- * <p>
- * The implementation should verify that the client package has permission
- * to access browse media information before returning the root id; it
- * should return null if the client is not allowed to access this
- * information.
- * </p>
- *
- * @param clientPackageName The package name of the application which is
- * requesting access to browse media.
- * @param clientUid The uid of the application which is requesting access to
- * browse media.
- * @param rootHints An optional bundle of service-specific arguments to send
- * to the media browser service when connecting and retrieving the
- * root id for browsing, or null if none. The contents of this
- * bundle may affect the information returned when browsing.
- * @return The {@link BrowserRoot} for accessing this app's content or null.
- * @see BrowserRoot#EXTRA_RECENT
- * @see BrowserRoot#EXTRA_OFFLINE
- * @see BrowserRoot#EXTRA_SUGGESTED
- */
- public abstract @Nullable BrowserRoot onGetRoot(@NonNull String clientPackageName,
- int clientUid, @Nullable Bundle rootHints);
-
- /**
- * Called to get information about the children of a media item.
- * <p>
- * Implementations must call {@link Result#sendResult result.sendResult}
- * with the list of children. If loading the children will be an expensive
- * operation that should be performed on another thread,
- * {@link Result#detach result.detach} may be called before returning from
- * this function, and then {@link Result#sendResult result.sendResult}
- * called when the loading is complete.
- * </p><p>
- * In case the media item does not have any children, call {@link Result#sendResult}
- * with an empty list. When the given {@code parentId} is invalid, implementations must
- * call {@link Result#sendResult result.sendResult} with {@code null}, which will invoke
- * {@link MediaBrowser.SubscriptionCallback#onError}.
- * </p>
- *
- * @param parentId The id of the parent media item whose children are to be
- * queried.
- * @param result The Result to send the list of children to.
- */
- public abstract void onLoadChildren(@NonNull String parentId,
- @NonNull Result<List<MediaBrowser.MediaItem>> result);
-
- /**
- * Called to get information about the children of a media item.
- * <p>
- * Implementations must call {@link Result#sendResult result.sendResult}
- * with the list of children. If loading the children will be an expensive
- * operation that should be performed on another thread,
- * {@link Result#detach result.detach} may be called before returning from
- * this function, and then {@link Result#sendResult result.sendResult}
- * called when the loading is complete.
- * </p><p>
- * In case the media item does not have any children, call {@link Result#sendResult}
- * with an empty list. When the given {@code parentId} is invalid, implementations must
- * call {@link Result#sendResult result.sendResult} with {@code null}, which will invoke
- * {@link MediaBrowser.SubscriptionCallback#onError}.
- * </p>
- *
- * @param parentId The id of the parent media item whose children are to be
- * queried.
- * @param result The Result to send the list of children to.
- * @param options The bundle of service-specific arguments sent from the media
- * browser. The information returned through the result should be
- * affected by the contents of this bundle.
- */
- public void onLoadChildren(@NonNull String parentId,
- @NonNull Result<List<MediaBrowser.MediaItem>> result, @NonNull Bundle options) {
- // To support backward compatibility, when the implementation of MediaBrowserService doesn't
- // override onLoadChildren() with options, onLoadChildren() without options will be used
- // instead, and the options will be applied in the implementation of result.onResultSent().
- result.setFlags(RESULT_FLAG_OPTION_NOT_HANDLED);
- onLoadChildren(parentId, result);
- }
-
- /**
- * Called to get information about a specific media item.
- * <p>
- * Implementations must call {@link Result#sendResult result.sendResult}. If
- * loading the item will be an expensive operation {@link Result#detach
- * result.detach} may be called before returning from this function, and
- * then {@link Result#sendResult result.sendResult} called when the item has
- * been loaded.
- * </p><p>
- * When the given {@code itemId} is invalid, implementations must call
- * {@link Result#sendResult result.sendResult} with {@code null}.
- * </p><p>
- * The default implementation will invoke {@link MediaBrowser.ItemCallback#onError}.
- * </p>
- *
- * @param itemId The id for the specific
- * {@link android.media.browse.MediaBrowser.MediaItem}.
- * @param result The Result to send the item to.
- */
- public void onLoadItem(String itemId, Result<MediaBrowser.MediaItem> result) {
- result.setFlags(RESULT_FLAG_ON_LOAD_ITEM_NOT_IMPLEMENTED);
- result.sendResult(null);
- }
-
- /**
- * Call to set the media session.
- * <p>
- * This should be called as soon as possible during the service's startup.
- * It may only be called once.
- *
- * @param token The token for the service's {@link MediaSession}.
- */
- public void setSessionToken(final MediaSession.Token token) {
- if (token == null) {
- throw new IllegalArgumentException("Session token may not be null.");
- }
- if (mSession != null) {
- throw new IllegalStateException("The session token has already been set.");
- }
- mSession = token;
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- Iterator<ConnectionRecord> iter = mConnections.values().iterator();
- while (iter.hasNext()){
- ConnectionRecord connection = iter.next();
- try {
- connection.callbacks.onConnect(connection.root.getRootId(), token,
- connection.root.getExtras());
- } catch (RemoteException e) {
- Log.w(TAG, "Connection for " + connection.pkg + " is no longer valid.");
- iter.remove();
- }
- }
- }
- });
- }
-
- /**
- * Gets the session token, or null if it has not yet been created
- * or if it has been destroyed.
- */
- public @Nullable MediaSession.Token getSessionToken() {
- return mSession;
- }
-
- /**
- * Gets the root hints sent from the currently connected {@link MediaBrowser}.
- * The root hints are service-specific arguments included in an optional bundle sent to the
- * media browser service when connecting and retrieving the root id for browsing, or null if
- * none. The contents of this bundle may affect the information returned when browsing.
- *
- * @throws IllegalStateException If this method is called outside of {@link #onGetRoot} or
- * {@link #onLoadChildren} or {@link #onLoadItem}.
- * @see MediaBrowserService.BrowserRoot#EXTRA_RECENT
- * @see MediaBrowserService.BrowserRoot#EXTRA_OFFLINE
- * @see MediaBrowserService.BrowserRoot#EXTRA_SUGGESTED
- */
- public final Bundle getBrowserRootHints() {
- if (mCurConnection == null) {
- throw new IllegalStateException("This should be called inside of onGetRoot or"
- + " onLoadChildren or onLoadItem methods");
- }
- return mCurConnection.rootHints == null ? null : new Bundle(mCurConnection.rootHints);
- }
-
- /**
- * Gets the browser information who sent the current request.
- *
- * @throws IllegalStateException If this method is called outside of {@link #onGetRoot} or
- * {@link #onLoadChildren} or {@link #onLoadItem}.
- * @see MediaSessionManager#isTrustedForMediaControl(RemoteUserInfo)
- */
- public final RemoteUserInfo getCurrentBrowserInfo() {
- if (mCurConnection == null) {
- throw new IllegalStateException("This should be called inside of onGetRoot or"
- + " onLoadChildren or onLoadItem methods");
- }
- return new RemoteUserInfo(mCurConnection.pkg, mCurConnection.pid, mCurConnection.uid,
- mCurConnection.callbacks.asBinder());
- }
-
- /**
- * Notifies all connected media browsers that the children of
- * the specified parent id have changed in some way.
- * This will cause browsers to fetch subscribed content again.
- *
- * @param parentId The id of the parent media item whose
- * children changed.
- */
- public void notifyChildrenChanged(@NonNull String parentId) {
- notifyChildrenChangedInternal(parentId, null);
- }
-
- /**
- * Notifies all connected media browsers that the children of
- * the specified parent id have changed in some way.
- * This will cause browsers to fetch subscribed content again.
- *
- * @param parentId The id of the parent media item whose
- * children changed.
- * @param options The bundle of service-specific arguments to send
- * to the media browser. The contents of this bundle may
- * contain the information about the change.
- */
- public void notifyChildrenChanged(@NonNull String parentId, @NonNull Bundle options) {
- if (options == null) {
- throw new IllegalArgumentException("options cannot be null in notifyChildrenChanged");
- }
- notifyChildrenChangedInternal(parentId, options);
- }
-
- private void notifyChildrenChangedInternal(final String parentId, final Bundle options) {
- if (parentId == null) {
- throw new IllegalArgumentException("parentId cannot be null in notifyChildrenChanged");
- }
- mHandler.post(new Runnable() {
- @Override
- public void run() {
- for (IBinder binder : mConnections.keySet()) {
- ConnectionRecord connection = mConnections.get(binder);
- List<Pair<IBinder, Bundle>> callbackList =
- connection.subscriptions.get(parentId);
- if (callbackList != null) {
- for (Pair<IBinder, Bundle> callback : callbackList) {
- if (MediaBrowserUtils.hasDuplicatedItems(options, callback.second)) {
- performLoadChildren(parentId, connection, callback.second);
- }
- }
- }
- }
- }
- });
- }
-
- /**
- * Return whether the given package is one of the ones that is owned by the uid.
- */
- private boolean isValidPackage(String pkg, int uid) {
- if (pkg == null) {
- return false;
- }
- final PackageManager pm = getPackageManager();
- final String[] packages = pm.getPackagesForUid(uid);
- final int N = packages.length;
- for (int i=0; i<N; i++) {
- if (packages[i].equals(pkg)) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * Save the subscription and if it is a new subscription send the results.
- */
- private void addSubscription(String id, ConnectionRecord connection, IBinder token,
- Bundle options) {
- // Save the subscription
- List<Pair<IBinder, Bundle>> callbackList = connection.subscriptions.get(id);
- if (callbackList == null) {
- callbackList = new ArrayList<>();
- }
- for (Pair<IBinder, Bundle> callback : callbackList) {
- if (token == callback.first
- && MediaBrowserUtils.areSameOptions(options, callback.second)) {
- return;
- }
- }
- callbackList.add(new Pair<>(token, options));
- connection.subscriptions.put(id, callbackList);
- // send the results
- performLoadChildren(id, connection, options);
- }
-
- /**
- * Remove the subscription.
- */
- private boolean removeSubscription(String id, ConnectionRecord connection, IBinder token) {
- if (token == null) {
- return connection.subscriptions.remove(id) != null;
- }
- boolean removed = false;
- List<Pair<IBinder, Bundle>> callbackList = connection.subscriptions.get(id);
- if (callbackList != null) {
- Iterator<Pair<IBinder, Bundle>> iter = callbackList.iterator();
- while (iter.hasNext()){
- if (token == iter.next().first) {
- removed = true;
- iter.remove();
- }
- }
- if (callbackList.size() == 0) {
- connection.subscriptions.remove(id);
- }
- }
- return removed;
- }
-
- /**
- * Call onLoadChildren and then send the results back to the connection.
- * <p>
- * Callers must make sure that this connection is still connected.
- */
- private void performLoadChildren(final String parentId, final ConnectionRecord connection,
- final Bundle options) {
- final Result<List<MediaBrowser.MediaItem>> result
- = new Result<List<MediaBrowser.MediaItem>>(parentId) {
- @Override
- void onResultSent(List<MediaBrowser.MediaItem> list, @ResultFlags int flag) {
- if (mConnections.get(connection.callbacks.asBinder()) != connection) {
- if (DBG) {
- Log.d(TAG, "Not sending onLoadChildren result for connection that has"
- + " been disconnected. pkg=" + connection.pkg + " id=" + parentId);
- }
- return;
- }
-
- List<MediaBrowser.MediaItem> filteredList =
- (flag & RESULT_FLAG_OPTION_NOT_HANDLED) != 0
- ? applyOptions(list, options) : list;
- final MediaParceledListSlice<MediaBrowser.MediaItem> pls =
- filteredList == null ? null : new MediaParceledListSlice<>(filteredList);
- try {
- connection.callbacks.onLoadChildrenWithOptions(parentId, pls, options);
- } catch (RemoteException ex) {
- // The other side is in the process of crashing.
- Log.w(TAG, "Calling onLoadChildren() failed for id=" + parentId
- + " package=" + connection.pkg);
- }
- }
- };
-
- mCurConnection = connection;
- if (options == null) {
- onLoadChildren(parentId, result);
- } else {
- onLoadChildren(parentId, result, options);
- }
- mCurConnection = null;
-
- if (!result.isDone()) {
- throw new IllegalStateException("onLoadChildren must call detach() or sendResult()"
- + " before returning for package=" + connection.pkg + " id=" + parentId);
- }
- }
-
- private List<MediaBrowser.MediaItem> applyOptions(List<MediaBrowser.MediaItem> list,
- final Bundle options) {
- if (list == null) {
- return null;
- }
- int page = options.getInt(MediaBrowser.EXTRA_PAGE, -1);
- int pageSize = options.getInt(MediaBrowser.EXTRA_PAGE_SIZE, -1);
- if (page == -1 && pageSize == -1) {
- return list;
- }
- int fromIndex = pageSize * page;
- int toIndex = fromIndex + pageSize;
- if (page < 0 || pageSize < 1 || fromIndex >= list.size()) {
- return Collections.EMPTY_LIST;
- }
- if (toIndex > list.size()) {
- toIndex = list.size();
- }
- return list.subList(fromIndex, toIndex);
- }
-
- private void performLoadItem(String itemId, final ConnectionRecord connection,
- final ResultReceiver receiver) {
- final Result<MediaBrowser.MediaItem> result =
- new Result<MediaBrowser.MediaItem>(itemId) {
- @Override
- void onResultSent(MediaBrowser.MediaItem item, @ResultFlags int flag) {
- if (mConnections.get(connection.callbacks.asBinder()) != connection) {
- if (DBG) {
- Log.d(TAG, "Not sending onLoadItem result for connection that has"
- + " been disconnected. pkg=" + connection.pkg + " id=" + itemId);
- }
- return;
- }
- if ((flag & RESULT_FLAG_ON_LOAD_ITEM_NOT_IMPLEMENTED) != 0) {
- receiver.send(RESULT_ERROR, null);
- return;
- }
- Bundle bundle = new Bundle();
- bundle.putParcelable(KEY_MEDIA_ITEM, item);
- receiver.send(RESULT_OK, bundle);
- }
- };
-
- mCurConnection = connection;
- onLoadItem(itemId, result);
- mCurConnection = null;
-
- if (!result.isDone()) {
- throw new IllegalStateException("onLoadItem must call detach() or sendResult()"
- + " before returning for id=" + itemId);
- }
- }
-
- /**
- * Contains information that the browser service needs to send to the client
- * when first connected.
- */
- public static final class BrowserRoot {
- /**
- * The lookup key for a boolean that indicates whether the browser service should return a
- * browser root for recently played media items.
- *
- * <p>When creating a media browser for a given media browser service, this key can be
- * supplied as a root hint for retrieving media items that are recently played.
- * If the media browser service can provide such media items, the implementation must return
- * the key in the root hint when {@link #onGetRoot(String, int, Bundle)} is called back.
- *
- * <p>The root hint may contain multiple keys.
- *
- * @see #EXTRA_OFFLINE
- * @see #EXTRA_SUGGESTED
- */
- public static final String EXTRA_RECENT = "android.service.media.extra.RECENT";
-
- /**
- * The lookup key for a boolean that indicates whether the browser service should return a
- * browser root for offline media items.
- *
- * <p>When creating a media browser for a given media browser service, this key can be
- * supplied as a root hint for retrieving media items that are can be played without an
- * internet connection.
- * If the media browser service can provide such media items, the implementation must return
- * the key in the root hint when {@link #onGetRoot(String, int, Bundle)} is called back.
- *
- * <p>The root hint may contain multiple keys.
- *
- * @see #EXTRA_RECENT
- * @see #EXTRA_SUGGESTED
- */
- public static final String EXTRA_OFFLINE = "android.service.media.extra.OFFLINE";
-
- /**
- * The lookup key for a boolean that indicates whether the browser service should return a
- * browser root for suggested media items.
- *
- * <p>When creating a media browser for a given media browser service, this key can be
- * supplied as a root hint for retrieving the media items suggested by the media browser
- * service. The list of media items passed in {@link android.media.browse.MediaBrowser.SubscriptionCallback#onChildrenLoaded(String, List)}
- * is considered ordered by relevance, first being the top suggestion.
- * If the media browser service can provide such media items, the implementation must return
- * the key in the root hint when {@link #onGetRoot(String, int, Bundle)} is called back.
- *
- * <p>The root hint may contain multiple keys.
- *
- * @see #EXTRA_RECENT
- * @see #EXTRA_OFFLINE
- */
- public static final String EXTRA_SUGGESTED = "android.service.media.extra.SUGGESTED";
-
- final private String mRootId;
- final private Bundle mExtras;
-
- /**
- * Constructs a browser root.
- * @param rootId The root id for browsing.
- * @param extras Any extras about the browser service.
- */
- public BrowserRoot(@NonNull String rootId, @Nullable Bundle extras) {
- if (rootId == null) {
- throw new IllegalArgumentException("The root id in BrowserRoot cannot be null. " +
- "Use null for BrowserRoot instead.");
- }
- mRootId = rootId;
- mExtras = extras;
- }
-
- /**
- * Gets the root id for browsing.
- */
- public String getRootId() {
- return mRootId;
- }
-
- /**
- * Gets any extras about the browser service.
- */
- public Bundle getExtras() {
- return mExtras;
- }
- }
-}
diff --git a/packages/OWNERS b/packages/OWNERS
deleted file mode 100644
index 3b9fd2b..0000000
--- a/packages/OWNERS
+++ /dev/null
@@ -1,9 +0,0 @@
-akersten@google.com
-dwkang@google.com
-hdmoon@google.com
-insun@google.com
-jaewan@google.com
-jinpark@google.com
-marcone@google.com
-sungsoo@google.com
-wjia@google.com
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index c0aa477..91b7587 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -38,7 +38,8 @@
libpowermanager \
libmediautils \
libmemunreachable \
- libmedia_helper
+ libmedia_helper \
+ libvibrator
LOCAL_STATIC_LIBRARIES := \
libcpustats \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0d6ef46..4033247 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -27,6 +27,7 @@
#include <sys/time.h>
#include <sys/resource.h>
+#include <android/os/IExternalVibratorService.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
@@ -122,6 +123,21 @@
}
}
+// Keep a strong reference to external vibrator service
+static sp<os::IExternalVibratorService> sExternalVibratorService;
+
+static sp<os::IExternalVibratorService> getExternalVibratorService() {
+ if (sExternalVibratorService == 0) {
+ sp <IBinder> binder = defaultServiceManager()->getService(
+ String16("external_vibrator_service"));
+ if (binder != 0) {
+ sExternalVibratorService =
+ interface_cast<os::IExternalVibratorService>(binder);
+ }
+ }
+ return sExternalVibratorService;
+}
+
// ----------------------------------------------------------------------------
std::string formatToString(audio_format_t format) {
@@ -276,13 +292,16 @@
fullConfig.sample_rate = config->sample_rate;
fullConfig.channel_mask = config->channel_mask;
fullConfig.format = config->format;
+ std::vector<audio_io_handle_t> secondaryOutputs;
ret = AudioSystem::getOutputForAttr(attr, &io,
actualSessionId,
&streamType, client.clientPid, client.clientUid,
&fullConfig,
(audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ |
AUDIO_OUTPUT_FLAG_DIRECT),
- deviceId, &portId);
+ deviceId, &portId, &secondaryOutputs);
+ ALOGW_IF(!secondaryOutputs.empty(),
+ "%s does not support secondary outputs, ignoring them", __func__);
} else {
ret = AudioSystem::getInputForAttr(attr, &io,
actualSessionId,
@@ -318,6 +337,27 @@
return ret;
}
+/* static */
+int AudioFlinger::onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration) {
+ sp<os::IExternalVibratorService> evs = getExternalVibratorService();
+ if (evs != 0) {
+ int32_t ret;
+ binder::Status status = evs->onExternalVibrationStart(*externalVibration, &ret);
+ if (status.isOk()) {
+ return ret;
+ }
+ }
+ return AudioMixer::HAPTIC_SCALE_MUTE;
+}
+
+/* static */
+void AudioFlinger::onExternalVibrationStop(const sp<os::ExternalVibration>& externalVibration) {
+ sp<os::IExternalVibratorService> evs = getExternalVibratorService();
+ if (evs != 0) {
+ evs->onExternalVibrationStop(*externalVibration);
+ }
+}
+
static const char * const audio_interfaces[] = {
AUDIO_HARDWARE_MODULE_ID_PRIMARY,
AUDIO_HARDWARE_MODULE_ID_A2DP,
@@ -641,6 +681,7 @@
status_t lStatus;
audio_stream_type_t streamType;
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ std::vector<audio_io_handle_t> secondaryOutputs;
bool updatePid = (input.clientInfo.clientPid == -1);
const uid_t callingUid = IPCThreadState::self()->getCallingUid();
@@ -675,7 +716,7 @@
lStatus = AudioSystem::getOutputForAttr(&input.attr, &output.outputId, sessionId, &streamType,
clientPid, clientUid, &input.config, input.flags,
- &output.selectedDeviceId, &portId);
+ &output.selectedDeviceId, &portId, &secondaryOutputs);
if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
@@ -748,6 +789,59 @@
output.afLatencyMs = thread->latency();
output.portId = portId;
+ if (lStatus == NO_ERROR) {
+ // Connect secondary outputs. Failure on a secondary output must not imped the primary
+ // Any secondary output setup failure will lead to a desync between the AP and AF until
+ // the track is destroyed.
+ TeePatches teePatches;
+ for (audio_io_handle_t secondaryOutput : secondaryOutputs) {
+ PlaybackThread *secondaryThread = checkPlaybackThread_l(secondaryOutput);
+ if (secondaryThread == NULL) {
+ ALOGE("no playback thread found for secondary output %d", output.outputId);
+ continue;
+ }
+
+ size_t frameCount = std::lcm(thread->frameCount(), secondaryThread->frameCount());
+
+ using namespace std::chrono_literals;
+ auto inChannelMask = audio_channel_mask_out_to_in(input.config.channel_mask);
+ sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
+ output.sampleRate,
+ inChannelMask,
+ input.config.format,
+ frameCount,
+ NULL /* buffer */,
+ (size_t)0 /* bufferSize */,
+ AUDIO_INPUT_FLAG_DIRECT,
+ 0ns /* timeout */);
+ status_t status = patchRecord->initCheck();
+ if (status != NO_ERROR) {
+ ALOGE("Secondary output patchRecord init failed: %d", status);
+ continue;
+ }
+ sp patchTrack = new PlaybackThread::PatchTrack(secondaryThread,
+ streamType,
+ output.sampleRate,
+ input.config.channel_mask,
+ input.config.format,
+ frameCount,
+ patchRecord->buffer(),
+ patchRecord->bufferSize(),
+ output.flags,
+ 0ns /* timeout */);
+ status = patchTrack->initCheck();
+ if (status != NO_ERROR) {
+ ALOGE("Secondary output patchTrack init failed: %d", status);
+ continue;
+ }
+ teePatches.push_back({patchRecord, patchTrack});
+ secondaryThread->addPatchTrack(patchTrack);
+ patchTrack->setPeerProxy(patchRecord.get());
+ patchRecord->setPeerProxy(patchTrack.get());
+ }
+ track->setTeePatches(std::move(teePatches));
+ }
+
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
if (lStatus == NO_ERROR && effectThread != NULL) {
@@ -897,6 +991,40 @@
return NO_ERROR;
}
+status_t AudioFlinger::setMasterBalance(float balance)
+{
+ status_t ret = initCheck();
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+
+ // check calling permissions
+ if (!settingsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+
+ // check range
+ if (isnan(balance) || fabs(balance) > 1.f) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+
+ // short cut.
+ if (mMasterBalance == balance) return NO_ERROR;
+
+ mMasterBalance = balance;
+
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ if (mPlaybackThreads.valueAt(i)->isDuplicating()) {
+ continue;
+ }
+ mPlaybackThreads.valueAt(i)->setMasterBalance(balance);
+ }
+
+ return NO_ERROR;
+}
+
status_t AudioFlinger::setMode(audio_mode_t mode)
{
status_t ret = initCheck();
@@ -1036,6 +1164,13 @@
return masterVolume_l();
}
+status_t AudioFlinger::getMasterBalance(float *balance) const
+{
+ Mutex::Autolock _l(mLock);
+ *balance = getMasterBalance_l();
+ return NO_ERROR; // if called through binder, may return a transactional error
+}
+
bool AudioFlinger::masterMute() const
{
Mutex::Autolock _l(mLock);
@@ -1047,6 +1182,11 @@
return mMasterVolume;
}
+float AudioFlinger::getMasterBalance_l() const
+{
+ return mMasterBalance;
+}
+
bool AudioFlinger::masterMute_l() const
{
return mMasterMute;
@@ -3137,9 +3277,13 @@
}
// look for the thread where the specified audio session is present
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
- if (mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId) != 0) {
+ uint32_t sessionType = mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId);
+ if (sessionType != 0) {
io = mPlaybackThreads.keyAt(i);
- break;
+ // thread with same effect session is preferable
+ if ((sessionType & ThreadBase::EFFECT_SESSION) != 0) {
+ break;
+ }
}
}
if (io == AUDIO_IO_HANDLE_NONE) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index c1169d2..1441e15 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -21,8 +21,11 @@
#include "Configuration.h"
#include <atomic>
#include <mutex>
+#include <chrono>
#include <deque>
#include <map>
+#include <numeric>
+#include <optional>
#include <set>
#include <string>
#include <vector>
@@ -30,6 +33,7 @@
#include <sys/types.h>
#include <limits.h>
+#include <android/os/BnExternalVibrationController.h>
#include <android-base/macros.h>
#include <cutils/atomic.h>
@@ -84,6 +88,8 @@
#include <private/media/AudioEffectShared.h>
#include <private/media/AudioTrackShared.h>
+#include <vibrator/ExternalVibration.h>
+
#include "android/media/BnAudioRecord.h"
namespace android {
@@ -137,6 +143,10 @@
virtual float masterVolume() const;
virtual bool masterMute() const;
+ // Balance value must be within -1.f (left only) to 1.f (right only) inclusive.
+ status_t setMasterBalance(float balance) override;
+ status_t getMasterBalance(float *balance) const override;
+
virtual status_t setStreamVolume(audio_stream_type_t stream, float value,
audio_io_handle_t output);
virtual status_t setStreamMute(audio_stream_type_t stream, bool muted);
@@ -284,6 +294,9 @@
const sp<MmapStreamCallback>& callback,
sp<MmapStreamInterface>& interface,
audio_port_handle_t *handle);
+
+ static int onExternalVibrationStart(const sp<os::ExternalVibration>& externalVibration);
+ static void onExternalVibrationStop(const sp<os::ExternalVibration>& externalVibration);
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
static const size_t kLogMemorySize = 400 * 1024;
@@ -516,6 +529,9 @@
class EffectChain;
struct AudioStreamIn;
+ struct TeePatch;
+ using TeePatches = std::vector<TeePatch>;
+
struct stream_type_t {
stream_type_t()
@@ -715,6 +731,11 @@
audioHwDev(dev), stream(in), flags(flags) {}
};
+ struct TeePatch {
+ sp<RecordThread::PatchRecord> patchRecord;
+ sp<PlaybackThread::PatchTrack> patchTrack;
+ };
+
// for mAudioSessionRefs only
struct AudioSessionRef {
AudioSessionRef(audio_session_t sessionid, pid_t pid) :
@@ -776,6 +797,7 @@
// member variables below are protected by mLock
float mMasterVolume;
bool mMasterMute;
+ float mMasterBalance = 0.f;
// end of variables protected by mLock
DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> > mRecordThreads;
@@ -793,6 +815,7 @@
Vector<AudioSessionRef*> mAudioSessionRefs;
float masterVolume_l() const;
+ float getMasterBalance_l() const;
bool masterMute_l() const;
audio_module_handle_t loadHwModule_l(const char *name);
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index 1d4b3fe..a60a5f2 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -66,8 +66,9 @@
// Maintain a 64-bit render position using the 32-bit result from the HAL.
// This delta calculation relies on the arithmetic overflow behavior
// of integers. For example (100 - 0xFFFFFFF0) = 116.
- uint32_t truncatedPosition = (uint32_t)mRenderPosition;
- int32_t deltaHalPosition = (int32_t)(halPosition - truncatedPosition);
+ const uint32_t truncatedPosition = (uint32_t)mRenderPosition;
+ int32_t deltaHalPosition; // initialization not needed, overwitten by __builtin_sub_overflow()
+ (void) __builtin_sub_overflow(halPosition, truncatedPosition, &deltaHalPosition);
if (deltaHalPosition > 0) {
mRenderPosition += deltaHalPosition;
}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 6ab6369..8455e54 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1136,7 +1136,8 @@
// if controller flag is set (Note that controller == TRUE => EFFECT_FLAG_VOLUME_CTRL set)
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
- (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
+ (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND ||
+ (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_MONITOR)) {
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
@@ -1331,6 +1332,7 @@
case EFFECT_FLAG_VOLUME_NONE: s.append("none"); break;
case EFFECT_FLAG_VOLUME_CTRL: s.append("implements control"); break;
case EFFECT_FLAG_VOLUME_IND: s.append("requires indication"); break;
+ case EFFECT_FLAG_VOLUME_MONITOR: s.append("monitors volume"); break;
default: s.append("unknown/reserved"); break;
}
s.append(", ");
@@ -2277,7 +2279,7 @@
}
// then indicate volume to all other effects in chain.
// Pass altered volume to effects before volume controller
- // and requested volume to effects after controller
+ // and requested volume to effects after controller or with volume monitor flag
uint32_t lVol = newLeft;
uint32_t rVol = newRight;
@@ -2290,7 +2292,12 @@
lVol = *left;
rVol = *right;
}
- mEffects[i]->setVolume(&lVol, &rVol, false);
+ // Pass requested volume directly if this is volume monitor module
+ if (mEffects[i]->isVolumeMonitor()) {
+ mEffects[i]->setVolume(left, right, false);
+ } else {
+ mEffects[i]->setVolume(&lVol, &rVol, false);
+ }
}
*left = newLeft;
*right = newRight;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 15a26ea..58ce351 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -134,6 +134,9 @@
bool isVolumeControl() const
{ return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
== EFFECT_FLAG_VOLUME_CTRL; }
+ bool isVolumeMonitor() const
+ { return (mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK)
+ == EFFECT_FLAG_VOLUME_MONITOR; }
status_t setOffloaded(bool offloaded, audio_io_handle_t io);
bool isOffloaded() const;
void addEffectToHal_l();
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index f328577..c5b9953 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -60,7 +60,6 @@
mSinkChannelCount(FCC_2),
mMixerBuffer(NULL),
mMixerBufferSize(0),
- mMixerBufferFormat(AUDIO_FORMAT_PCM_16_BIT),
mMixerBufferState(UNDEFINED),
mFormat(Format_Invalid),
mSampleRate(0),
@@ -140,6 +139,75 @@
}
}
+void FastMixer::updateMixerTrack(int index, Reason reason) {
+ const FastMixerState * const current = (const FastMixerState *) mCurrent;
+ const FastTrack * const fastTrack = ¤t->mFastTracks[index];
+
+ // check and update generation
+ if (reason == REASON_MODIFY && mGenerations[index] == fastTrack->mGeneration) {
+ return; // no change on an already configured track.
+ }
+ mGenerations[index] = fastTrack->mGeneration;
+
+ // mMixer == nullptr on configuration failure (check done after generation update).
+ if (mMixer == nullptr) {
+ return;
+ }
+
+ switch (reason) {
+ case REASON_REMOVE:
+ mMixer->destroy(index);
+ break;
+ case REASON_ADD: {
+ const status_t status = mMixer->create(
+ index, fastTrack->mChannelMask, fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
+ LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
+ "%s: cannot create fast track index"
+ " %d, mask %#x, format %#x in AudioMixer",
+ __func__, index, fastTrack->mChannelMask, fastTrack->mFormat);
+ }
+ [[fallthrough]]; // now fallthrough to update the newly created track.
+ case REASON_MODIFY:
+ mMixer->setBufferProvider(index, fastTrack->mBufferProvider);
+
+ float vlf, vrf;
+ if (fastTrack->mVolumeProvider != nullptr) {
+ const gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
+ vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
+ vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
+ } else {
+ vlf = vrf = AudioMixer::UNITY_GAIN_FLOAT;
+ }
+
+ // set volume to avoid ramp whenever the track is updated (or created).
+ // Note: this does not distinguish from starting fresh or
+ // resuming from a paused state.
+ mMixer->setParameter(index, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
+ mMixer->setParameter(index, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
+
+ mMixer->setParameter(index, AudioMixer::RESAMPLE, AudioMixer::REMOVE, nullptr);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+ (void *)mMixerBuffer);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MIXER_FORMAT,
+ (void *)(uintptr_t)mMixerBufferFormat);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::FORMAT,
+ (void *)(uintptr_t)fastTrack->mFormat);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+ (void *)(uintptr_t)fastTrack->mChannelMask);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)mSinkChannelMask);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+ (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
+ (void *)(uintptr_t)fastTrack->mHapticIntensity);
+
+ mMixer->enable(index);
+ break;
+ default:
+ LOG_ALWAYS_FATAL("%s: invalid update reason %d", __func__, reason);
+ }
+}
+
void FastMixer::onStateChange()
{
const FastMixerState * const current = (const FastMixerState *) mCurrent;
@@ -161,6 +229,7 @@
mOutputSink = current->mOutputSink;
mOutputSinkGen = current->mOutputSinkGen;
mSinkChannelMask = current->mSinkChannelMask;
+ mBalance.setChannelMask(mSinkChannelMask);
if (mOutputSink == NULL) {
mFormat = Format_Invalid;
mSampleRate = 0;
@@ -191,10 +260,6 @@
free(mSinkBuffer);
mSinkBuffer = NULL;
if (frameCount > 0 && mSampleRate > 0) {
- // The mixer produces either 16 bit PCM or float output, select
- // float output if the HAL supports higher than 16 bit precision.
- mMixerBufferFormat = mFormat.mFormat == AUDIO_FORMAT_PCM_16_BIT ?
- AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT;
// FIXME new may block for unbounded time at internal mutex of the heap
// implementation; it would be better to have normal mixer allocate for us
// to avoid blocking here and to prevent possible priority inversion
@@ -244,21 +309,16 @@
// check for change in active track set
const unsigned currentTrackMask = current->mTrackMask;
dumpState->mTrackMask = currentTrackMask;
+ dumpState->mNumTracks = popcount(currentTrackMask);
if (current->mFastTracksGen != mFastTracksGen) {
- ALOG_ASSERT(mMixerBuffer != NULL);
// process removed tracks first to avoid running out of track names
unsigned removedTracks = previousTrackMask & ~currentTrackMask;
while (removedTracks != 0) {
int i = __builtin_ctz(removedTracks);
removedTracks &= ~(1 << i);
- const FastTrack* fastTrack = ¤t->mFastTracks[i];
- ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
- if (mMixer != NULL) {
- mMixer->destroy(i);
- }
+ updateMixerTrack(i, REASON_REMOVE);
// don't reset track dump state, since other side is ignoring it
- mGenerations[i] = fastTrack->mGeneration;
}
// now process added tracks
@@ -266,38 +326,7 @@
while (addedTracks != 0) {
int i = __builtin_ctz(addedTracks);
addedTracks &= ~(1 << i);
- const FastTrack* fastTrack = ¤t->mFastTracks[i];
- AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
- if (mMixer != NULL) {
- const int name = i; // for clarity, choose name as fast track index.
- status_t status = mMixer->create(
- name,
- fastTrack->mChannelMask,
- fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
- LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
- "%s: cannot create track name"
- " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
- __func__, name,
- fastTrack->mChannelMask, fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
- mMixer->setBufferProvider(name, bufferProvider);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
- (void *)mMixerBuffer);
- // newly allocated track names default to full scale volume
- mMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
- (void *)(uintptr_t)fastTrack->mFormat);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
- (void *)(uintptr_t)fastTrack->mChannelMask);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)mSinkChannelMask);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
- (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
- mMixer->enable(name);
- }
- mGenerations[i] = fastTrack->mGeneration;
+ updateMixerTrack(i, REASON_ADD);
}
// finally process (potentially) modified tracks; these use the same slot
@@ -306,42 +335,10 @@
while (modifiedTracks != 0) {
int i = __builtin_ctz(modifiedTracks);
modifiedTracks &= ~(1 << i);
- const FastTrack* fastTrack = ¤t->mFastTracks[i];
- if (fastTrack->mGeneration != mGenerations[i]) {
- // this track was actually modified
- AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
- ALOG_ASSERT(bufferProvider != NULL);
- if (mMixer != NULL) {
- const int name = i;
- mMixer->setBufferProvider(name, bufferProvider);
- if (fastTrack->mVolumeProvider == NULL) {
- float f = AudioMixer::UNITY_GAIN_FLOAT;
- mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
- mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
- }
- mMixer->setParameter(name, AudioMixer::RESAMPLE,
- AudioMixer::REMOVE, NULL);
- mMixer->setParameter(
- name,
- AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
- (void *)(uintptr_t)fastTrack->mFormat);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
- (void *)(uintptr_t)fastTrack->mChannelMask);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)mSinkChannelMask);
- mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
- (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
- // already enabled
- }
- mGenerations[i] = fastTrack->mGeneration;
- }
+ updateMixerTrack(i, REASON_MODIFY);
}
mFastTracksGen = current->mFastTracksGen;
-
- dumpState->mNumTracks = popcount(currentTrackMask);
}
}
@@ -408,8 +405,8 @@
float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
- mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
- mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
+ mMixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &vlf);
+ mMixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &vrf);
}
// FIXME The current implementation of framesReady() for fast tracks
// takes a tryLock, which can block
@@ -471,6 +468,12 @@
mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount,
true /*limit*/);
}
+
+ // Balance must take effect after mono conversion.
+ // mBalance detects zero balance within the class for speed (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mMixerBuffer, frameCount);
+
// prepare the buffer used to write to sink
void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer;
if (mFormat.mFormat != mMixerBufferFormat) { // sink format not the same as mixer format
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 1d332e0..97ab635 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -18,6 +18,7 @@
#define ANDROID_AUDIO_FAST_MIXER_H
#include <atomic>
+#include <audio_utils/Balance.h>
#include "FastThread.h"
#include "StateQueue.h"
#include "FastMixerState.h"
@@ -41,6 +42,8 @@
FastMixerStateQueue* sq();
virtual void setMasterMono(bool mono) { mMasterMono.store(mono); /* memory_order_seq_cst */ }
+ virtual void setMasterBalance(float balance) { mMasterBalance.store(balance); }
+ virtual float getMasterBalance() const { return mMasterBalance.load(); }
virtual void setBoottimeOffset(int64_t boottimeOffset) {
mBoottimeOffset.store(boottimeOffset); /* memory_order_seq_cst */
}
@@ -56,6 +59,14 @@
virtual void onStateChange();
virtual void onWork();
+ enum Reason {
+ REASON_REMOVE,
+ REASON_ADD,
+ REASON_MODIFY,
+ };
+ // called when a fast track of index has been removed, added, or modified
+ void updateMixerTrack(int index, Reason reason);
+
// FIXME these former local variables need comments
static const FastMixerState sInitial;
@@ -74,7 +85,7 @@
audio_channel_mask_t mSinkChannelMask;
void* mMixerBuffer; // mixer output buffer.
size_t mMixerBufferSize;
- audio_format_t mMixerBufferFormat; // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
+ static constexpr audio_format_t mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT;
uint32_t mAudioChannelCount; // audio channel count, excludes haptic channels.
@@ -89,8 +100,11 @@
ExtendedTimestamp mTimestamp;
int64_t mNativeFramesWrittenButNotPresented;
+ audio_utils::Balance mBalance;
+
// accessed without lock between multiple threads.
std::atomic_bool mMasterMono;
+ std::atomic<float> mMasterBalance{};
std::atomic_int_fast64_t mBoottimeOffset;
const audio_io_handle_t mThreadIoHandle; // parent thread id for debugging purposes
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 9d2a733..396c797 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -19,6 +19,7 @@
#include <audio_utils/minifloat.h>
#include <system/audio.h>
+#include <media/AudioMixer.h>
#include <media/ExtendedAudioBufferProvider.h>
#include <media/nbaio/NBAIO.h>
#include <media/nblog/NBLog.h>
@@ -48,6 +49,8 @@
audio_format_t mFormat; // track format
int mGeneration; // increment when any field is assigned
bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
+ AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE; // intensity of
+ // haptic data
};
// Represents a single state of the fast mixer
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 3381e4d..676a575 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -211,8 +211,8 @@
((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
((patch->sinks[0].ext.device.hw_module != srcModule) ||
!audioHwDevice->supportsAudioPatches()))) {
- audio_devices_t outputDevice = AUDIO_DEVICE_NONE;
- String8 outputDeviceAddress;
+ audio_devices_t outputDevice = patch->sinks[0].ext.device.type;
+ String8 outputDeviceAddress = String8(patch->sinks[0].ext.device.address);
if (patch->num_sources == 2) {
if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
(patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
@@ -234,8 +234,6 @@
reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
- audio_devices_t device = patch->sinks[0].ext.device.type;
- String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -254,8 +252,8 @@
patch->sinks[0].ext.device.hw_module,
&output,
&config,
- device,
- address,
+ outputDevice,
+ outputDeviceAddress,
flags);
ALOGV("mAudioFlinger.openOutput_l() returned %p", thread.get());
if (thread == 0) {
@@ -263,8 +261,6 @@
goto exit;
}
newPatch.mPlayback.setThread(reinterpret_cast<PlaybackThread*>(thread.get()));
- outputDevice = device;
- outputDeviceAddress = address;
}
audio_devices_t device = patch->sources[0].ext.device.type;
String8 address = String8(patch->sources[0].ext.device.address);
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 2d9bd8e..612855f 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -81,13 +81,16 @@
class Endpoint {
public:
Endpoint() = default;
- Endpoint(Endpoint&& other) { *this = std::move(other); }
- Endpoint& operator=(Endpoint&& other) {
+ Endpoint(const Endpoint&) = delete;
+ Endpoint& operator=(const Endpoint&) = delete;
+ Endpoint(Endpoint&& other) noexcept { swap(other); }
+ Endpoint& operator=(Endpoint&& other) noexcept {
+ swap(other);
+ return *this;
+ }
+ ~Endpoint() {
ALOGE_IF(mHandle != AUDIO_PATCH_HANDLE_NONE,
"A non empty Patch Endpoint leaked, handle %d", mHandle);
- *this = other;
- other.mHandle = AUDIO_PATCH_HANDLE_NONE;
- return *this;
}
status_t checkTrack(TrackType *trackOrNull) const {
@@ -127,10 +130,19 @@
}
void stopTrack() { if (mTrack) mTrack->stop(); }
- private:
- Endpoint(const Endpoint&) = default;
- Endpoint& operator=(const Endpoint&) = default;
+ void swap(Endpoint &other) noexcept {
+ using std::swap;
+ swap(mThread, other.mThread);
+ swap(mCloseThread, other.mCloseThread);
+ swap(mHandle, other.mHandle);
+ swap(mTrack, other.mTrack);
+ }
+ friend void swap(Endpoint &a, Endpoint &b) noexcept {
+ a.swap(b);
+ }
+
+ private:
sp<ThreadType> mThread;
bool mCloseThread = true;
audio_patch_handle_t mHandle = AUDIO_PATCH_HANDLE_NONE;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index d9f570d..357370e 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -43,9 +43,8 @@
void appendDumpHeader(String8& result);
void appendDump(String8& result, bool active);
- virtual status_t start(AudioSystem::sync_event_t event =
- AudioSystem::SYNC_EVENT_NONE,
- audio_session_t triggerSession = AUDIO_SESSION_NONE);
+ virtual status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
void pause();
@@ -119,6 +118,18 @@
void setHapticPlaybackEnabled(bool hapticPlaybackEnabled) {
mHapticPlaybackEnabled = hapticPlaybackEnabled;
}
+ /** Return at what intensity to play haptics, used in mixer. */
+ AudioMixer::haptic_intensity_t getHapticIntensity() const { return mHapticIntensity; }
+ /** Set intensity of haptic playback, should be set after querying vibrator service. */
+ void setHapticIntensity(AudioMixer::haptic_intensity_t hapticIntensity) {
+ if (AudioMixer::isValidHapticIntensity(hapticIntensity)) {
+ mHapticIntensity = hapticIntensity;
+ setHapticPlaybackEnabled(mHapticIntensity != AudioMixer::HAPTIC_SCALE_MUTE);
+ }
+ }
+ sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
+
+ void setTeePatches(TeePatches teePatches);
protected:
// for numerous
@@ -130,8 +141,8 @@
DISALLOW_COPY_AND_ASSIGN(Track);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
- // releaseBuffer() not overridden
+ status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) override;
+ void releaseBuffer(AudioBufferProvider::Buffer* buffer) override;
// ExtendedAudioBufferProvider interface
virtual size_t framesReady() const;
@@ -197,8 +208,28 @@
sp<media::VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
+ // intensity to play haptic data
+ AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE;
+ class AudioVibrationController : public os::BnExternalVibrationController {
+ public:
+ explicit AudioVibrationController(Track* track) : mTrack(track) {}
+ binder::Status mute(/*out*/ bool *ret) override;
+ binder::Status unmute(/*out*/ bool *ret) override;
+ private:
+ Track* const mTrack;
+ };
+ sp<AudioVibrationController> mAudioVibrationController;
+ sp<os::ExternalVibration> mExternalVibration;
private:
+ void interceptBuffer(const AudioBufferProvider::Buffer& buffer);
+ /** Write the source data in the buffer provider. @return written frame count. */
+ size_t writeFrames(AudioBufferProvider* dest, const void* src, size_t frameCount);
+ template <class F>
+ void forEachTeePatchTrack(F f) {
+ for (auto& tp : mTeePatches) { f(tp.patchTrack); }
+ };
+
// The following fields are only for fast tracks, and should be in a subclass
int mFastIndex; // index within FastMixerState::mFastTracks[];
// either mFastIndex == -1 if not isFastTrack()
@@ -218,6 +249,7 @@
audio_output_flags_t mFlags;
// If the last track change was notified to the client with readAndClearHasChanged
std::atomic_flag mChangeNotified = ATOMIC_FLAG_INIT;
+ TeePatches mTeePatches;
}; // end of Track
@@ -297,7 +329,7 @@
}; // end of OutputTrack
// playback track, used by PatchPanel
-class PatchTrack : public Track, public PatchProxyBufferProvider {
+class PatchTrack : public Track, public PatchTrackBase {
public:
PatchTrack(PlaybackThread *playbackThread,
@@ -308,7 +340,8 @@
size_t frameCount,
void *buffer,
size_t bufferSize,
- audio_output_flags_t flags);
+ audio_output_flags_t flags,
+ const Timeout& timeout = {});
virtual ~PatchTrack();
virtual status_t start(AudioSystem::sync_event_t event =
@@ -324,12 +357,7 @@
const struct timespec *timeOut = NULL);
virtual void releaseBuffer(Proxy::Buffer* buffer);
- void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
-
private:
void restartIfDisabled();
- sp<ClientProxy> mProxy;
- PatchProxyBufferProvider* mPeerProxy;
- struct timespec mPeerTimeout;
}; // end of PatchTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 32af7d5..ab4af33 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -113,7 +113,7 @@
};
// playback track, used by PatchPanel
-class PatchRecord : virtual public RecordTrack, public PatchProxyBufferProvider {
+class PatchRecord : public RecordTrack, public PatchTrackBase {
public:
PatchRecord(RecordThread *recordThread,
@@ -123,7 +123,8 @@
size_t frameCount,
void *buffer,
size_t bufferSize,
- audio_input_flags_t flags);
+ audio_input_flags_t flags,
+ const Timeout& timeout = {});
virtual ~PatchRecord();
// AudioBufferProvider interface
@@ -134,11 +135,4 @@
virtual status_t obtainBuffer(Proxy::Buffer *buffer,
const struct timespec *timeOut = NULL);
virtual void releaseBuffer(Proxy::Buffer *buffer);
-
- void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
-
-private:
- sp<ClientProxy> mProxy;
- PatchProxyBufferProvider* mPeerProxy;
- struct timespec mPeerTimeout;
}; // end of PatchRecord
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 31a8c7d..8a45fc2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -38,6 +38,7 @@
#include <private/media/AudioTrackShared.h>
#include <private/android_filesystem_config.h>
+#include <audio_utils/Balance.h>
#include <audio_utils/channels.h>
#include <audio_utils/mono_blend.h>
#include <audio_utils/primitives.h>
@@ -456,52 +457,6 @@
}
}
-std::string devicesToString(audio_devices_t devices)
-{
- std::string result;
- if (devices & AUDIO_DEVICE_BIT_IN) {
- InputDeviceConverter::maskToString(devices, result);
- } else {
- OutputDeviceConverter::maskToString(devices, result);
- }
- return result;
-}
-
-std::string inputFlagsToString(audio_input_flags_t flags)
-{
- std::string result;
- InputFlagConverter::maskToString(flags, result);
- return result;
-}
-
-std::string outputFlagsToString(audio_output_flags_t flags)
-{
- std::string result;
- OutputFlagConverter::maskToString(flags, result);
- return result;
-}
-
-const char *sourceToString(audio_source_t source)
-{
- switch (source) {
- case AUDIO_SOURCE_DEFAULT: return "default";
- case AUDIO_SOURCE_MIC: return "mic";
- case AUDIO_SOURCE_VOICE_UPLINK: return "voice uplink";
- case AUDIO_SOURCE_VOICE_DOWNLINK: return "voice downlink";
- case AUDIO_SOURCE_VOICE_CALL: return "voice call";
- case AUDIO_SOURCE_CAMCORDER: return "camcorder";
- case AUDIO_SOURCE_VOICE_RECOGNITION: return "voice recognition";
- case AUDIO_SOURCE_VOICE_COMMUNICATION: return "voice communication";
- case AUDIO_SOURCE_REMOTE_SUBMIX: return "remote submix";
- case AUDIO_SOURCE_UNPROCESSED: return "unprocessed";
- case AUDIO_SOURCE_VOICE_PERFORMANCE: return "voice performance";
- case AUDIO_SOURCE_ECHO_REFERENCE: return "echo reference";
- case AUDIO_SOURCE_FM_TUNER: return "FM tuner";
- case AUDIO_SOURCE_HOTWORD: return "hotword";
- default: return "unknown";
- }
-}
-
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type, bool systemReady)
: Thread(false /*canCallJava*/),
@@ -716,8 +671,8 @@
event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
const audio_devices_t newDevice = getDevice();
mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
- (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
- (unsigned)newDevice, devicesToString(newDevice).c_str());
+ (unsigned)oldDevice, toString(oldDevice).c_str(),
+ (unsigned)newDevice, toString(newDevice).c_str());
} break;
case CFG_EVENT_RELEASE_AUDIO_PATCH: {
const audio_devices_t oldDevice = getDevice();
@@ -726,8 +681,8 @@
event->mStatus = releaseAudioPatch_l(data->mHandle);
const audio_devices_t newDevice = getDevice();
mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
- (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
- (unsigned)newDevice, devicesToString(newDevice).c_str());
+ (unsigned)oldDevice, toString(oldDevice).c_str(),
+ (unsigned)newDevice, toString(newDevice).c_str());
} break;
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
@@ -857,9 +812,9 @@
dprintf(fd, " none\n");
}
// Note: output device may be used by capture threads for effects such as AEC.
- dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
- dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
- dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
+ dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, toString(mOutDevice).c_str());
+ dprintf(fd, " Input device: %#x (%s)\n", mInDevice, toString(mInDevice).c_str());
+ dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());
// Dump timestamp statistics for the Thread types that support it.
if (mType == RECORD
@@ -871,6 +826,22 @@
dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
}
+ if (mLastIoBeginNs > 0) { // MMAP may not set this
+ dprintf(fd, " Last %s occurred (msecs): %lld\n",
+ isOutput() ? "write" : "read",
+ (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
+ }
+
+ if (mProcessTimeMs.getN() > 0) {
+ dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
+ }
+
+ if (mIoJitterMs.getN() > 0) {
+ dprintf(fd, " Hal %s jitter ms stats: %s\n",
+ isOutput() ? "write" : "read",
+ mIoJitterMs.toString().c_str());
+ }
+
if (locked) {
mLock.unlock();
}
@@ -1703,7 +1674,7 @@
// mStreamTypes[] initialized in constructor body
mTracks(type == MIXER),
mOutput(output),
- mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+ mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
@@ -1856,8 +1827,6 @@
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
}
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
- dprintf(fd, " Last write occurred (msecs): %llu\n",
- (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -1870,7 +1839,7 @@
AudioStreamOut *output = mOutput;
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
- output, flags, outputFlagsToString(flags).c_str());
+ output, flags, toString(flags).c_str());
dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
if (mPipeSink.get() != nullptr) {
@@ -2271,6 +2240,11 @@
}
}
+void AudioFlinger::PlaybackThread::setMasterBalance(float balance)
+{
+ mMasterBalance.store(balance);
+}
+
void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
{
if (isDuplicating()) {
@@ -2358,15 +2332,23 @@
track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
}
- // Disable all haptic playback for all other active tracks when haptic playback is supported
- // and the track contains haptic channels. Enable haptic playback for current track.
- // TODO: Request actual haptic playback status from vibrator service
if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
&& mHapticChannelMask != AUDIO_CHANNEL_NONE) {
- for (auto &t : mActiveTracks) {
- t->setHapticPlaybackEnabled(false);
+ // Unlock due to VibratorService will lock for this call and will
+ // call Tracks.mute/unmute which also require thread's lock.
+ mLock.unlock();
+ const int intensity = AudioFlinger::onExternalVibrationStart(
+ track->getExternalVibration());
+ mLock.lock();
+ track->setHapticIntensity(static_cast<AudioMixer::haptic_intensity_t>(intensity));
+ // Haptic playback should be enabled by vibrator service.
+ if (track->getHapticPlaybackEnabled()) {
+ // Disable haptic playback of all active track to ensure only
+ // one track playing haptic if current track should play haptic.
+ for (const auto &t : mActiveTracks) {
+ t->setHapticPlaybackEnabled(false);
+ }
}
- track->setHapticPlaybackEnabled(true);
}
track->mResetDone = false;
@@ -2523,6 +2505,7 @@
mChannelMask);
}
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
+ mBalance.setChannelMask(mChannelMask);
// Get actual HAL format.
status_t result = mOutput->stream->getFormat(&mHALFormat);
@@ -2642,7 +2625,7 @@
free(mMixerBuffer);
mMixerBuffer = NULL;
if (mMixerBufferEnabled) {
- mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // also valid: AUDIO_FORMAT_PCM_16_BIT.
+ mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
mMixerBufferSize = mNormalFrameCount * mChannelCount
* audio_bytes_per_sample(mMixerBufferFormat);
(void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
@@ -3174,8 +3157,8 @@
Vector< sp<Track> > tracksToRemove;
mStandbyTimeNs = systemTime();
- nsecs_t lastWriteFinished = -1; // time last server write completed
- int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
+ int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
+ int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
// MIXER
nsecs_t lastWarning = 0;
@@ -3221,7 +3204,8 @@
}
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- while (!exitPending())
+ // loopCount is used for statistics and diagnostics.
+ for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
// Log merge requests are performed during AudioFlinger binder transactions, but
// that does not cover audio playback. It's requested here for that reason.
@@ -3379,11 +3363,11 @@
// use the time before we called the HAL write - it is a bit more accurate
// to when the server last read data than the current time here.
//
- // If we haven't written anything, mLastWriteTime will be -1
+ // If we haven't written anything, mLastIoBeginNs will be -1
// and we use systemTime().
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
- ? systemTime() : mLastWriteTime;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
+ ? systemTime() : mLastIoBeginNs;
}
for (const sp<Track> &t : mActiveTracks) {
@@ -3531,6 +3515,14 @@
true /*limit*/);
}
+ if (!hasFastMixer()) {
+ // Balance must take effect after mono conversion.
+ // We do it here if there is no FastMixer.
+ // mBalance detects zero balance within the class for speed (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ }
+
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
mNormalFrameCount * (mChannelCount + mHapticChannelCount));
@@ -3585,6 +3577,14 @@
true /*limit*/);
}
+ if (!hasFastMixer()) {
+ // Balance must take effect after mono conversion.
+ // We do it here if there is no FastMixer.
+ // mBalance detects zero balance within the class for speed (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mEffectBuffer, mNormalFrameCount);
+ }
+
memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
mNormalFrameCount * (mChannelCount + mHapticChannelCount));
// The sample data is partially interleaved when haptic channels exist,
@@ -3604,43 +3604,68 @@
// mSleepTimeUs == 0 means we must write to audio hardware
if (mSleepTimeUs == 0) {
ssize_t ret = 0;
- // We save lastWriteFinished here, as previousLastWriteFinished,
- // for throttling. On thread start, previousLastWriteFinished will be
- // set to -1, which properly results in no throttling after the first write.
- nsecs_t previousLastWriteFinished = lastWriteFinished;
- nsecs_t delta = 0;
+ // writePeriodNs is updated >= 0 when ret > 0.
+ int64_t writePeriodNs = -1;
if (mBytesRemaining) {
// FIXME rewrite to reduce number of system calls
- mLastWriteTime = systemTime(); // also used for dumpsys
+ const int64_t lastIoBeginNs = systemTime();
ret = threadLoop_write();
- lastWriteFinished = systemTime();
- delta = lastWriteFinished - mLastWriteTime;
+ const int64_t lastIoEndNs = systemTime();
if (ret < 0) {
mBytesRemaining = 0;
- } else {
+ } else if (ret > 0) {
mBytesWritten += ret;
mBytesRemaining -= ret;
- mFramesWritten += ret / mFrameSize;
+ const int64_t frames = ret / mFrameSize;
+ mFramesWritten += frames;
+
+ writePeriodNs = lastIoEndNs - mLastIoEndNs;
+ // process information relating to write time.
+ if (audio_has_proportional_frames(mFormat)) {
+ // we are in a continuous mixing cycle
+ if (mMixerStatus == MIXER_TRACKS_READY &&
+ loopCount == lastLoopCountWritten + 1) {
+
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {frames, writePeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs =
+ (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+
+ // write blocked detection
+ const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
+ if (mType == MIXER && deltaWriteNs > maxPeriod) {
+ mNumDelayedWrites++;
+ if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
+ ATRACE_NAME("underrun");
+ ALOGW("write blocked for %lld msecs, "
+ "%d delayed writes, thread %d",
+ (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
+ mNumDelayedWrites, mId);
+ lastWarning = lastIoEndNs;
+ }
+ }
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountWritten = loopCount;
}
} else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
(mMixerStatus == MIXER_DRAIN_ALL)) {
threadLoop_drain();
}
if (mType == MIXER && !mStandby) {
- // write blocked detection
- if (delta > maxPeriod) {
- mNumDelayedWrites++;
- if ((lastWriteFinished - lastWarning) > kWarningThrottleNs) {
- ATRACE_NAME("underrun");
- ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
- lastWarning = lastWriteFinished;
- }
- }
if (mThreadThrottle
&& mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
- && ret > 0) { // we wrote something
+ && writePeriodNs > 0) { // we have write period info
// Limit MixerThread data processing to no more than twice the
// expected processing rate.
//
@@ -3659,12 +3684,9 @@
// 2. threadLoop_mix (significant for heavy mixing, especially
// on low tier processors)
- // it's OK if deltaMs (and deltaNs) is an overestimate.
- nsecs_t deltaNs;
- // deltaNs = lastWriteFinished - previousLastWriteFinished;
- __builtin_sub_overflow(
- lastWriteFinished,previousLastWriteFinished, &deltaNs);
- const int32_t deltaMs = deltaNs / 1000000;
+ // it's OK if deltaMs is an overestimate.
+
+ const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
@@ -3677,7 +3699,8 @@
mThreadThrottleTimeMs += throttleMs;
// Throttle must be attributed to the previous mixer loop's write time
// to allow back-to-back throttling.
- lastWriteFinished += throttleMs * 1000000;
+ // This also ensures proper timing statistics.
+ mLastIoEndNs = systemTime(); // we fetch the write end time again.
} else {
uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
if (diff > 0) {
@@ -3760,7 +3783,6 @@
// removeTracks_l() must be called with ThreadBase::mLock held
void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
{
- bool enabledHapticTracksRemoved = false;
for (const auto& track : tracksToRemove) {
mActiveTracks.remove(track);
ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
@@ -3782,17 +3804,13 @@
// remove from our tracks vector
removeTrack_l(track);
}
- enabledHapticTracksRemoved |= track->getHapticPlaybackEnabled();
- }
- // If the thread supports haptic playback and the track playing haptic data was removed,
- // enable haptic playback on the first active track that contains haptic channels.
- // TODO: Query vibrator service to know which track should enable haptic playback.
- if (enabledHapticTracksRemoved && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
- for (auto &t : mActiveTracks) {
- if (t->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) {
- t->setHapticPlaybackEnabled(true);
- break;
- }
+ if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ && mHapticChannelCount > 0) {
+ mLock.unlock();
+ // Unlock due to VibratorService will lock for this call and will
+ // call Tracks.mute/unmute which also require thread's lock.
+ AudioFlinger::onExternalVibrationStop(track->getExternalVibration());
+ mLock.lock();
}
}
}
@@ -3985,6 +4003,7 @@
// mPipeSink below
// mNormalSink below
{
+ setMasterBalance(audioFlinger->getMasterBalance_l());
ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
"mFrameCount=%zu, mNormalFrameCount=%zu",
@@ -4615,6 +4634,7 @@
fastTrack->mChannelMask = track->mChannelMask;
fastTrack->mFormat = track->mFormat;
fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
+ fastTrack->mHapticIntensity = track->getHapticIntensity();
fastTrack->mGeneration++;
state->mTrackMask |= 1 << j;
didModify = true;
@@ -4756,7 +4776,10 @@
track->mFillingUpStatus = Track::FS_ACTIVE;
if (track->mState == TrackBase::RESUMING) {
track->mState = TrackBase::ACTIVE;
- param = AudioMixer::RAMP_VOLUME;
+ // If a new track is paused immediately after start, do not ramp on resume.
+ if (cblk->mServer != 0) {
+ param = AudioMixer::RAMP_VOLUME;
+ }
}
mAudioMixer->setParameter(trackId, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
mLeftVolFloat = -1.0;
@@ -4937,6 +4960,10 @@
trackId,
AudioMixer::TRACK,
AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
// reset retry count
track->mRetryCount = kMaxTrackRetries;
@@ -5266,6 +5293,9 @@
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
+ dprintf(fd, " Master balance: %f (%s)\n", mMasterBalance.load(),
+ (hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
+ : mBalance.toString()).c_str());
const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
if (latencyMs != 0.) {
dprintf(fd, " NormalMixer latency ms: %.2lf\n", latencyMs);
@@ -5329,23 +5359,34 @@
// ----------------------------------------------------------------------------
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady)
- : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady)
-{
-}
-
-AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
+ AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device,
ThreadBase::type_t type, bool systemReady)
: PlaybackThread(audioFlinger, output, id, device, type, systemReady)
- , mVolumeShaperActive(false)
{
+ setMasterBalance(audioFlinger->getMasterBalance_l());
}
AudioFlinger::DirectOutputThread::~DirectOutputThread()
{
}
+void AudioFlinger::DirectOutputThread::dumpInternals(int fd, const Vector<String16>& args)
+{
+ PlaybackThread::dumpInternals(fd, args);
+ dprintf(fd, " Master balance: %f Left: %f Right: %f\n",
+ mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
+}
+
+void AudioFlinger::DirectOutputThread::setMasterBalance(float balance)
+{
+ Mutex::Autolock _l(mLock);
+ if (mMasterBalance != balance) {
+ mMasterBalance.store(balance);
+ mBalance.computeStereoBalance(balance, &mMasterBalanceLeft, &mMasterBalanceRight);
+ broadcast_l();
+ }
+}
+
void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
{
float left, right;
@@ -5369,12 +5410,12 @@
if (left > GAIN_FLOAT_UNITY) {
left = GAIN_FLOAT_UNITY;
}
- left *= v;
+ left *= v * mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
right = float_from_gain(gain_minifloat_unpack_right(vlr));
if (right > GAIN_FLOAT_UNITY) {
right = GAIN_FLOAT_UNITY;
}
- right *= v;
+ right *= v * mMasterBalanceRight;
}
if (lastTrack) {
@@ -5416,6 +5457,11 @@
mFlushPending = true;
}
}
+ } else if (previousTrack == 0) {
+ // there could be an old track added back during track transition for direct
+ // output, so always issues flush to flush data of the previous track if it
+ // was already destroyed with HAL paused, then flush can resume the playback
+ mFlushPending = true;
}
PlaybackThread::onAddNewTrack_l();
}
@@ -5454,7 +5500,6 @@
doHwPause = true;
mHwPaused = true;
}
- tracksToRemove->add(track);
} else if (track->isFlushPending()) {
track->flushAck();
if (last) {
@@ -5551,7 +5596,8 @@
int64_t framesWritten = mBytesWritten / mFrameSize;
if (mStandby || !last ||
- track->presentationComplete(framesWritten, audioHALFrames)) {
+ track->presentationComplete(framesWritten, audioHALFrames) ||
+ track->isPaused()) {
if (track->isStopping_2()) {
track->mState = TrackBase::STOPPED;
}
@@ -6697,8 +6743,10 @@
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
+ int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
+
// loop while there is work to do
- for (;;) {
+ for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
Vector< sp<EffectChain> > effectChains;
// activeTracks accumulates a copy of a subset of mActiveTracks
@@ -6897,6 +6945,7 @@
int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
ssize_t framesRead;
+ const int64_t lastIoBeginNs = systemTime(); // start IO timing
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
@@ -6954,10 +7003,12 @@
}
}
+ const int64_t lastIoEndNs = systemTime(); // end IO timing
+
// Update server timestamp with server stats
// systemTime() is optional if the hardware supports timestamps.
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
// Update server timestamp with kernel stats
if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
@@ -7006,6 +7057,24 @@
ALOG_ASSERT(framesRead > 0);
mFramesRead += framesRead;
+ if (audio_has_proportional_frames(mFormat)
+ && loopCount == lastLoopCountRead + 1) {
+ const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {framesRead, readPeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountRead = loopCount;
+
#ifdef TEE_SINK
(void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
#endif
@@ -7654,7 +7723,7 @@
AudioStreamIn *input = mInput;
audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
- input, flags, inputFlagsToString(flags).c_str());
+ input, flags, toString(flags).c_str());
dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
if (mActiveTracks.isEmpty()) {
dprintf(fd, " No active record clients\n");
@@ -8418,6 +8487,7 @@
audio_output_flags_t flags =
(audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
audio_port_handle_t deviceId = mDeviceId;
+ std::vector<audio_io_handle_t> secondaryOutputs;
ret = AudioSystem::getOutputForAttr(&mAttr, &io,
mSessionId,
&stream,
@@ -8426,7 +8496,10 @@
&config,
flags,
&deviceId,
- &portId);
+ &portId,
+ &secondaryOutputs);
+ ALOGD_IF(!secondaryOutputs.empty(),
+ "MmapThread::start does not support secondary outputs, ignoring them");
} else {
audio_config_base_t config;
config.sample_rate = mSampleRate;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index aab7601..4968829 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -512,6 +512,15 @@
TimestampVerifier< // For timestamp statistics.
int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
audio_devices_t mTimestampCorrectedDevices = AUDIO_DEVICE_NONE;
+
+ // ThreadLoop statistics per iteration.
+ int64_t mLastIoBeginNs = -1;
+ int64_t mLastIoEndNs = -1;
+
+ // This should be read under ThreadBase lock (if not on the threadLoop thread).
+ audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
+ audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
+
bool mIsMsdDevice = false;
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it
@@ -733,6 +742,7 @@
// VolumeInterface
virtual void setMasterVolume(float value);
+ virtual void setMasterBalance(float balance);
virtual void setMasterMute(bool muted);
virtual void setStreamVolume(audio_stream_type_t stream, float value);
virtual void setStreamMute(audio_stream_type_t stream, bool muted);
@@ -1027,7 +1037,8 @@
AudioStreamOut *mOutput;
float mMasterVolume;
- nsecs_t mLastWriteTime;
+ std::atomic<float> mMasterBalance{};
+ audio_utils::Balance mBalance;
int mNumWrites;
int mNumDelayedWrites;
bool mInWrite;
@@ -1199,13 +1210,22 @@
// Blending with limiter is not idempotent,
// and blending without limiter is idempotent but inefficient to do twice.
virtual bool requireMonoBlend() { return mMasterMono.load() && !hasFastMixer(); }
+
+ void setMasterBalance(float balance) override {
+ mMasterBalance.store(balance);
+ if (hasFastMixer()) {
+ mFastMixer->setMasterBalance(balance);
+ }
+ }
};
class DirectOutputThread : public PlaybackThread {
public:
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, bool systemReady);
+ audio_io_handle_t id, audio_devices_t device, bool systemReady)
+ : DirectOutputThread(audioFlinger, output, id, device, DIRECT, systemReady) { }
+
virtual ~DirectOutputThread();
status_t selectPresentation(int presentationId, int programId);
@@ -1214,8 +1234,13 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
+
+ void dumpInternals(int fd, const Vector<String16>& args) override;
+
virtual void flushHw_l();
+ void setMasterBalance(float balance) override;
+
protected:
virtual uint32_t activeSleepTimeUs() const;
virtual uint32_t idleSleepTimeUs() const;
@@ -1231,10 +1256,10 @@
virtual void onAddNewTrack_l();
- bool mVolumeShaperActive;
+ bool mVolumeShaperActive = false;
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
+ audio_io_handle_t id, audio_devices_t device, ThreadBase::type_t type,
bool systemReady);
void processVolume_l(Track *track, bool lastTrack);
@@ -1243,6 +1268,10 @@
wp<Track> mPreviousTrack; // used to detect track switch
+ // This must be initialized for initial condition of mMasterBalance = 0 (disabled).
+ float mMasterBalanceLeft = 1.f;
+ float mMasterBalanceRight = 1.f;
+
public:
virtual bool hasFastMixer() const { return false; }
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index c94639b..0ba0ab4 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -329,3 +329,19 @@
const struct timespec *requested = NULL) = 0;
virtual void releaseBuffer(Proxy::Buffer* buffer) = 0;
};
+
+class PatchTrackBase : public PatchProxyBufferProvider
+{
+public:
+ using Timeout = std::optional<std::chrono::nanoseconds>;
+ PatchTrackBase(sp<ClientProxy> proxy, const ThreadBase& thread,
+ const Timeout& timeout);
+ void setPeerTimeout(std::chrono::nanoseconds timeout);
+ void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+
+protected:
+ const sp<ClientProxy> mProxy;
+ PatchProxyBufferProvider* mPeerProxy = nullptr;
+ struct timespec mPeerTimeout{};
+
+};
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index d23d19d..65f799e 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -99,7 +99,7 @@
mId(android_atomic_inc(&nextTrackId)),
mTerminated(false),
mType(type),
- mThreadIoHandle(thread->id()),
+ mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
mPortId(portId),
mIsInvalid(false)
{
@@ -277,6 +277,27 @@
return NO_ERROR;
}
+AudioFlinger::ThreadBase::PatchTrackBase::PatchTrackBase(sp<ClientProxy> proxy,
+ const ThreadBase& thread,
+ const Timeout& timeout)
+ : mProxy(proxy)
+{
+ if (timeout) {
+ setPeerTimeout(*timeout);
+ } else {
+ // Double buffer mixer
+ uint64_t mixBufferNs = ((uint64_t)2 * thread.frameCount() * 1000000000) /
+ thread.sampleRate();
+ setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
+ }
+}
+
+void AudioFlinger::ThreadBase::PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
+ mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
+ mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
+}
+
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -449,8 +470,14 @@
|| thread->type() == ThreadBase::DUPLICATING;
#ifdef TEE_SINK
mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
- + "_" + std::to_string(mId));
+ + "_" + std::to_string(mId) + "_T");
#endif
+
+ if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+ mAudioVibrationController = new AudioVibrationController(this);
+ mExternalVibration = new os::ExternalVibration(
+ mUid, "" /* pkg */, mAttr, mAudioVibrationController);
+ }
}
AudioFlinger::PlaybackThread::Track::~Track()
@@ -498,6 +525,7 @@
AudioSystem::releaseOutput(mPortId);
}
}
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
}
void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
@@ -643,8 +671,7 @@
}
// AudioBufferProvider interface
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
- AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
ServerProxy::Buffer buf;
size_t desiredFrames = buffer->frameCount;
@@ -659,10 +686,61 @@
} else {
mAudioTrackServerProxy->tallyUnderrunFrames(0);
}
-
return status;
}
+void AudioFlinger::PlaybackThread::Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
+{
+ interceptBuffer(*buffer);
+ TrackBase::releaseBuffer(buffer);
+}
+
+// TODO: compensate for time shift between HW modules.
+void AudioFlinger::PlaybackThread::Track::interceptBuffer(
+ const AudioBufferProvider::Buffer& sourceBuffer) {
+ auto start = std::chrono::steady_clock::now();
+ const size_t frameCount = sourceBuffer.frameCount;
+ for (auto& sink : mTeePatches) {
+ RecordThread::PatchRecord* patchRecord = sink.patchRecord.get();
+
+ size_t framesWritten = writeFrames(patchRecord, sourceBuffer.i8, frameCount);
+ // On buffer wrap, the buffer frame count will be less than requested,
+ // when this happens a second buffer needs to be used to write the leftover audio
+ size_t framesLeft = frameCount - framesWritten;
+ if (framesWritten != 0 && framesLeft != 0) {
+ framesWritten +=
+ writeFrames(patchRecord, sourceBuffer.i8 + framesWritten * mFrameSize, framesLeft);
+ framesLeft = frameCount - framesWritten;
+ }
+ ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
+ "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
+ framesWritten, frameCount, framesLeft);
+ }
+ auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
+ using namespace std::chrono_literals;
+ // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
+ ALOGD_IF(spent > 200us, "%s: took %lldus to intercept %zu tracks", __func__,
+ spent.count(), mTeePatches.size());
+}
+
+size_t AudioFlinger::PlaybackThread::Track::writeFrames(AudioBufferProvider* dest,
+ const void* src,
+ size_t frameCount) {
+ AudioBufferProvider::Buffer patchBuffer;
+ patchBuffer.frameCount = frameCount;
+ auto status = dest->getNextBuffer(&patchBuffer);
+ if (status != NO_ERROR) {
+ ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
+ __func__, status, strerror(-status));
+ return 0;
+ }
+ ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
+ memcpy(patchBuffer.raw, src, patchBuffer.frameCount * mFrameSize);
+ auto framesWritten = patchBuffer.frameCount;
+ dest->releaseBuffer(&patchBuffer);
+ return framesWritten;
+}
+
// releaseBuffer() is not overridden
// ExtendedAudioBufferProvider interface
@@ -810,6 +888,9 @@
} else {
status = BAD_VALUE;
}
+ if (status == NO_ERROR) {
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->start(); });
+ }
return status;
}
@@ -843,6 +924,7 @@
__func__, mId, (int)mThreadIoHandle);
}
}
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->stop(); });
}
void AudioFlinger::PlaybackThread::Track::pause()
@@ -875,6 +957,8 @@
break;
}
}
+ // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->pause(); });
}
void AudioFlinger::PlaybackThread::Track::flush()
@@ -936,6 +1020,8 @@
// because the hardware buffer could hold a large amount of audio
playbackThread->broadcast_l();
}
+ // Flush the Tee to avoid on resume playing old data and glitching on the transition to new data
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->flush(); });
}
// must be called with thread lock held
@@ -1054,6 +1140,11 @@
};
}
+void AudioFlinger::PlaybackThread::Track::setTeePatches(TeePatches teePatches) {
+ forEachTeePatchTrack([](auto patchTrack) { patchTrack->destroy(); });
+ mTeePatches = std::move(teePatches);
+}
+
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
if (!isOffloaded() && !isDirect()) {
@@ -1336,6 +1427,40 @@
mServerLatencyMs.store(latencyMs);
}
+binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
+ /*out*/ bool *ret) {
+ *ret = false;
+ sp<ThreadBase> thread = mTrack->mThread.promote();
+ if (thread != 0) {
+ // Lock for updating mHapticPlaybackEnabled.
+ Mutex::Autolock _l(thread->mLock);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ && playbackThread->mHapticChannelCount > 0) {
+ mTrack->setHapticPlaybackEnabled(false);
+ *ret = true;
+ }
+ }
+ return binder::Status::ok();
+}
+
+binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
+ /*out*/ bool *ret) {
+ *ret = false;
+ sp<ThreadBase> thread = mTrack->mThread.promote();
+ if (thread != 0) {
+ // Lock for updating mHapticPlaybackEnabled.
+ Mutex::Autolock _l(thread->mLock);
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+ if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ && playbackThread->mHapticChannelCount > 0) {
+ mTrack->setHapticPlaybackEnabled(true);
+ *ret = true;
+ }
+ }
+ return binder::Status::ok();
+}
+
// ----------------------------------------------------------------------------
#undef LOG_TAG
#define LOG_TAG "AF::OutputTrack"
@@ -1575,19 +1700,16 @@
size_t frameCount,
void *buffer,
size_t bufferSize,
- audio_output_flags_t flags)
+ audio_output_flags_t flags,
+ const Timeout& timeout)
: Track(playbackThread, NULL, streamType,
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
AUDIO_SESSION_NONE, AID_AUDIOSERVER, flags, TYPE_PATCH),
- mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
+ PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
+ *playbackThread, timeout)
{
- uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
- playbackThread->sampleRate();
- mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
- mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
-
ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
__func__, mId, sampleRate,
(int)mPeerTimeout.tv_sec,
@@ -2048,19 +2170,16 @@
size_t frameCount,
void *buffer,
size_t bufferSize,
- audio_input_flags_t flags)
+ audio_input_flags_t flags,
+ const Timeout& timeout)
: RecordTrack(recordThread, NULL,
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, AUDIO_SESSION_NONE, AID_AUDIOSERVER,
flags, TYPE_PATCH),
- mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
+ PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
+ *recordThread, timeout)
{
- uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
- recordThread->sampleRate();
- mPeerTimeout.tv_sec = mixBufferNs / 1000000000;
- mPeerTimeout.tv_nsec = (int) (mixBufferNs % 1000000000);
-
ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
__func__, mId, sampleRate,
(int)mPeerTimeout.tv_sec,
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index ebb4f3b..f72f44a 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -11,8 +11,10 @@
LOCAL_C_INCLUDES := \
frameworks/av/services/audioflinger \
$(call include-path-for, audio-utils) \
- frameworks/av/services/audiopolicy/common/include \
- frameworks/av/services/audiopolicy/engine/interface \
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_interface_headers \
LOCAL_SHARED_LIBRARIES := \
libcutils \
@@ -51,7 +53,7 @@
libcutils \
libutils \
liblog \
- libaudioclient \
+ libaudiopolicy \
libsoundtrigger
ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
@@ -76,10 +78,12 @@
endif # ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
LOCAL_C_INCLUDES += \
- frameworks/av/services/audiopolicy/common/include \
- frameworks/av/services/audiopolicy/engine/interface \
$(call include-path-for, audio-utils) \
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_interface_headers
+
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
@@ -114,10 +118,12 @@
libaudiopolicycomponents
LOCAL_C_INCLUDES += \
- frameworks/av/services/audiopolicy/common/include \
- frameworks/av/services/audiopolicy/engine/interface \
$(call include-path-for, audio-utils) \
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_interface_headers
+
LOCAL_CFLAGS := -Wall -Werror
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 1c2b9d7..9b00a4d 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -75,14 +75,16 @@
virtual status_t setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name) = 0;
+ const char *device_name,
+ audio_format_t encodedFormat) = 0;
// retrieve a device connection status
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address) = 0;
// indicate a change in device configuration
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name) = 0;
+ const char *device_name,
+ audio_format_t encodedFormat) = 0;
// indicate a change in phone state. Valid phones states are defined by audio_mode_t
virtual void setPhoneState(audio_mode_t state) = 0;
// force using a specific device category for the specified usage
@@ -108,7 +110,8 @@
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId) = 0;
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs) = 0;
// indicates to the audio policy manager that the output starts being used by corresponding stream.
virtual status_t startOutput(audio_port_handle_t portId) = 0;
// indicates to the audio policy manager that the output stops being used by corresponding stream.
@@ -234,7 +237,20 @@
virtual bool isHapticPlaybackSupported() = 0;
+ virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats) = 0;
+
virtual void setAppState(uid_t uid, app_state_t state);
+
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
+
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy) = 0;
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) = 0;
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup) = 0;
};
diff --git a/services/audiopolicy/common/include/RoutingStrategy.h b/services/audiopolicy/common/include/RoutingStrategy.h
deleted file mode 100644
index f8a1cd6..0000000
--- a/services/audiopolicy/common/include/RoutingStrategy.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-namespace android {
-
-// Time in milliseconds after media stopped playing during which we consider that the
-// sonification should be as unobtrusive as during the time media was playing.
-#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
-
-enum routing_strategy {
- STRATEGY_NONE = -1,
- STRATEGY_MEDIA,
- STRATEGY_PHONE,
- STRATEGY_SONIFICATION,
- STRATEGY_SONIFICATION_RESPECTFUL,
- STRATEGY_DTMF,
- STRATEGY_ENFORCED_AUDIBLE,
- STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
- STRATEGY_ACCESSIBILITY,
- STRATEGY_REROUTING,
- NUM_STRATEGIES
-};
-
-}; //namespace android
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 5ccc8fd..48b5271 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -20,6 +20,23 @@
#include <utils/Log.h>
#include <math.h>
+namespace android {
+
+/**
+ * VolumeSource is the discriminent for volume management on an output.
+ * It used to be the stream type by legacy, it may be host volume group or a volume curves if
+ * we allow to have more than one curve per volume group.
+ */
+enum VolumeSource : std::underlying_type<audio_stream_type_t>::type;
+static const VolumeSource VOLUME_SOURCE_NONE = static_cast<VolumeSource>(AUDIO_STREAM_DEFAULT);
+
+static inline VolumeSource streamToVolumeSource(audio_stream_type_t stream) {
+ return static_cast<VolumeSource>(stream);
+}
+
+
+} // namespace android
+
// Absolute min volume in dB (can be represented in single precision normal float value)
#define VOLUME_MIN_DB (-758)
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 46a2a40..605fc1c 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -17,9 +17,20 @@
#pragma once
#include <system/audio.h>
+#include <vector>
+
+namespace android {
+
+using StreamTypeVector = std::vector<audio_stream_type_t>;
+
+static const audio_attributes_t defaultAttr = AUDIO_ATTRIBUTES_INITIALIZER;
+
+} // namespace android
static const audio_format_t gDynamicFormat = AUDIO_FORMAT_DEFAULT;
+static const uint32_t SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY = 5000;
+
// For mixed output and inputs, the policy will use max mixer sampling rates.
// Do not limit sampling rate otherwise
#define SAMPLE_RATE_HZ_MAX 192000
@@ -76,6 +87,21 @@
}
/**
+ * Check whether audio device has encoding capability.
+ *
+ * @param[in] device to consider
+ *
+ * @return true if device has encoding capability, false otherwise..
+ */
+static inline bool device_has_encoding_capability(audio_devices_t device)
+{
+ if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ return true;
+ }
+ return false;
+}
+
+/**
* Returns the priority of a given audio source for capture. The priority is used when more than one
* capture session is active on a given input stream to determine which session drives routing and
* effect configuration.
@@ -136,3 +162,25 @@
}
return format1 == format2;
}
+
+/**
+ * @brief hasStream checks if a given stream type is found in the list of streams
+ * @param streams collection of stream types to consider.
+ * @param streamType to consider
+ * @return true if voice stream is found in the given streams, false otherwise
+ */
+static inline bool hasStream(const android::StreamTypeVector &streams,
+ audio_stream_type_t streamType)
+{
+ return std::find(begin(streams), end(streams), streamType) != end(streams);
+}
+
+/**
+ * @brief hasVoiceStream checks if a voice stream is found in the list of streams
+ * @param streams collection to consider.
+ * @return true if voice stream is found in the given streams, false otherwise
+ */
+static inline bool hasVoiceStream(const android::StreamTypeVector &streams)
+{
+ return hasStream(streams, AUDIO_STREAM_VOICE_CALL);
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index e5ebab7..c9037a1 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -19,7 +19,6 @@
"src/Serializer.cpp",
"src/SoundTriggerSession.cpp",
"src/TypeConverter.cpp",
- "src/VolumeCurve.cpp",
],
shared_libs: [
"libcutils",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
index 555412e..6e29632 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioIODescriptorInterface.h
@@ -16,6 +16,8 @@
#pragma once
+#include "DeviceDescriptor.h"
+
namespace android {
/**
@@ -34,4 +36,36 @@
virtual void setPatchHandle(audio_patch_handle_t handle) = 0;
};
+template <class IoDescriptor, class Filter>
+sp<DeviceDescriptor> findPreferredDevice(
+ IoDescriptor& desc, Filter filter, bool& active, const DeviceVector& devices)
+{
+ auto activeClients = desc->clientsList(true /*activeOnly*/);
+ auto activeClientsWithRoute =
+ desc->clientsList(true /*activeOnly*/, filter, true /*preferredDevice*/);
+ active = activeClients.size() > 0;
+ if (active && activeClients.size() == activeClientsWithRoute.size()) {
+ return devices.getDeviceFromId(activeClientsWithRoute[0]->preferredDeviceId());
+ }
+ return nullptr;
+}
+
+template <class IoCollection, class Filter>
+sp<DeviceDescriptor> findPreferredDevice(
+ IoCollection& ioCollection, Filter filter, const DeviceVector& devices)
+{
+ sp<DeviceDescriptor> device;
+ for (size_t i = 0; i < ioCollection.size(); i++) {
+ auto desc = ioCollection.valueAt(i);
+ bool active;
+ sp<DeviceDescriptor> curDevice = findPreferredDevice(desc, filter, active, devices);
+ if (active && curDevice == nullptr) {
+ return nullptr;
+ } else if (curDevice != nullptr) {
+ device = curDevice;
+ }
+ }
+ return device;
+}
+
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index d4cfd1e..803cfac 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -143,6 +143,16 @@
void trackEffectEnabled(const sp<EffectDescriptor> &effect, bool enabled);
+ /**
+ * @brief clearSessionRoutesForDevice: when a device is disconnected, and if this device has
+ * been chosen as the preferred device by any client, the policy manager shall
+ * prevent from using this device any more by clearing all the session routes involving this
+ * device.
+ * In other words, the preferred device port id of these clients will be resetted to NONE.
+ * @param disconnectedDevice device to be disconnected
+ */
+ void clearSessionRoutesForDevice(const sp<DeviceDescriptor> &disconnectedDevice);
+
void dump(String8 *dst) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 14b995b..cf9519b 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -16,18 +16,20 @@
#pragma once
+#define __STDC_LIMIT_MACROS
+#include <inttypes.h>
+
#include <sys/types.h>
#include <utils/Errors.h>
#include <utils/Timers.h>
#include <utils/KeyedVector.h>
#include <system/audio.h>
-#include <RoutingStrategy.h>
#include "AudioIODescriptorInterface.h"
#include "AudioPort.h"
#include "ClientDescriptor.h"
#include "DeviceDescriptor.h"
-#include <map>
+#include <vector>
namespace android {
@@ -35,6 +37,105 @@
class AudioMix;
class AudioPolicyClientInterface;
+class ActivityTracking
+{
+public:
+ virtual ~ActivityTracking() = default;
+ bool isActive(uint32_t inPastMs = 0, nsecs_t sysTime = 0) const
+ {
+ if (mActivityCount > 0) {
+ return true;
+ }
+ if (inPastMs == 0) {
+ return false;
+ }
+ if (sysTime == 0) {
+ sysTime = systemTime();
+ }
+ if (ns2ms(sysTime - mStopTime) < inPastMs) {
+ return true;
+ }
+ return false;
+ }
+ void changeActivityCount(int delta)
+ {
+ if ((delta + (int)mActivityCount) < 0) {
+ LOG_ALWAYS_FATAL("%s: invalid delta %d, refCount %d", __func__, delta, mActivityCount);
+ }
+ mActivityCount += delta;
+ if (!mActivityCount) {
+ setStopTime(systemTime());
+ }
+ }
+ uint32_t getActivityCount() const { return mActivityCount; }
+ nsecs_t getStopTime() const { return mStopTime; }
+ void setStopTime(nsecs_t stopTime) { mStopTime = stopTime; }
+
+ virtual void dump(String8 *dst, int spaces) const
+ {
+ dst->appendFormat("%*s- ActivityCount: %d, StopTime: %" PRId64 ", ", spaces, "",
+ getActivityCount(), getStopTime());
+ }
+private:
+ uint32_t mActivityCount = 0;
+ nsecs_t mStopTime = 0;
+};
+
+/**
+ * @brief VolumeActivity: it tracks the activity for volume policy (volume index, mute,
+ * memorize previous stop, and store mute if incompatible device with another strategy.
+ */
+class VolumeActivity : public ActivityTracking
+{
+public:
+ bool isMuted() const { return mMuteCount > 0; }
+ int getMuteCount() const { return mMuteCount; }
+ int incMuteCount() { return ++mMuteCount; }
+ int decMuteCount() { return mMuteCount > 0 ? --mMuteCount : -1; }
+
+ void dump(String8 *dst, int spaces) const override
+ {
+ ActivityTracking::dump(dst, spaces);
+ dst->appendFormat(", Volume: %.03f, MuteCount: %02d\n", mCurVolumeDb, mMuteCount);
+ }
+ void setVolume(float volume) { mCurVolumeDb = volume; }
+ float getVolume() const { return mCurVolumeDb; }
+
+private:
+ int mMuteCount = 0; /**< mute request counter */
+ float mCurVolumeDb = NAN; /**< current volume in dB. */
+};
+/**
+ * Note: volume activities shall be indexed by CurvesId if we want to allow multiple
+ * curves per volume group, inferring a mute management or volume balancing between HW and SW is
+ * done
+ */
+using VolumeActivities = std::map<VolumeSource, VolumeActivity>;
+
+/**
+ * @brief The Activity class: it tracks the activity for volume policy (volume index, mute,
+ * memorize previous stop, and store mute if incompatible device with another strategy.
+ * Having this class prevents from looping on all attributes (legacy streams) of the strategy
+ */
+class RoutingActivity : public ActivityTracking
+{
+public:
+ void setMutedByDevice( bool isMuted) { mIsMutedByDevice = isMuted; }
+ bool isMutedByDevice() const { return mIsMutedByDevice; }
+
+ void dump(String8 *dst, int spaces) const override {
+ ActivityTracking::dump(dst, spaces);
+ dst->appendFormat("\n");
+ }
+private:
+ /**
+ * strategies muted because of incompatible device selection.
+ * See AudioPolicyManager::checkDeviceMuteStrategies()
+ */
+ bool mIsMutedByDevice = false;
+};
+using RoutingActivities = std::map<product_strategy_t, RoutingActivity>;
+
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
@@ -62,25 +163,87 @@
bool force);
/**
- * Changes the stream active count and mActiveClients only.
- * This does not change the client->active() state or the output descriptor's
- * global active count.
+ * @brief setStopTime set the stop time due to the client stoppage or a re routing of this
+ * client
+ * @param client to be considered
+ * @param sysTime when the client stopped/was rerouted
*/
- virtual void changeStreamActiveCount(const sp<TrackClientDescriptor>& client, int delta);
- uint32_t streamActiveCount(audio_stream_type_t stream) const
- { return mActiveCount[stream]; }
+ void setStopTime(const sp<TrackClientDescriptor>& client, nsecs_t sysTime);
/**
* Changes the client->active() state and the output descriptor's global active count,
* along with the stream active count and mActiveClients.
* The client must be previously added by the base class addClient().
+ * In case of duplicating thread, client shall be added on the duplicated thread, not on the
+ * involved outputs but setClientActive will be called on all output to track strategy and
+ * active client for a given output.
+ * Active ref count of the client will be incremented/decremented through setActive API
*/
- void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
+ virtual void setClientActive(const sp<TrackClientDescriptor>& client, bool active);
- bool isActive(uint32_t inPastMs = 0) const;
- bool isStreamActive(audio_stream_type_t stream,
- uint32_t inPastMs = 0,
- nsecs_t sysTime = 0) const;
+ bool isActive(uint32_t inPastMs) const;
+ bool isActive(VolumeSource volumeSource = VOLUME_SOURCE_NONE,
+ uint32_t inPastMs = 0,
+ nsecs_t sysTime = 0) const;
+ bool isAnyActive(VolumeSource volumeSourceToIgnore) const;
+
+ std::vector<VolumeSource> getActiveVolumeSources() const {
+ std::vector<VolumeSource> activeList;
+ for (const auto &iter : mVolumeActivities) {
+ if (iter.second.isActive()) {
+ activeList.push_back(iter.first);
+ }
+ }
+ return activeList;
+ }
+ uint32_t getActivityCount(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).getActivityCount() : 0;
+ }
+ bool isMuted(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).isMuted() : false;
+ }
+ int getMuteCount(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).getMuteCount() : 0;
+ }
+ int incMuteCount(VolumeSource vs)
+ {
+ return mVolumeActivities[vs].incMuteCount();
+ }
+ int decMuteCount(VolumeSource vs)
+ {
+ return mVolumeActivities[vs].decMuteCount();
+ }
+ void setCurVolume(VolumeSource vs, float volume)
+ {
+ // Even if not activity for this group registered, need to create anyway
+ mVolumeActivities[vs].setVolume(volume);
+ }
+ float getCurVolume(VolumeSource vs) const
+ {
+ return mVolumeActivities.find(vs) != std::end(mVolumeActivities) ?
+ mVolumeActivities.at(vs).getVolume() : NAN;
+ }
+
+ bool isStrategyActive(product_strategy_t ps, uint32_t inPastMs = 0, nsecs_t sysTime = 0) const
+ {
+ return mRoutingActivities.find(ps) != std::end(mRoutingActivities)?
+ mRoutingActivities.at(ps).isActive(inPastMs, sysTime) : false;
+ }
+ bool isStrategyMutedByDevice(product_strategy_t ps) const
+ {
+ return mRoutingActivities.find(ps) != std::end(mRoutingActivities)?
+ mRoutingActivities.at(ps).isMutedByDevice() : false;
+ }
+ void setStrategyMutedByDevice(product_strategy_t ps, bool isMuted)
+ {
+ mRoutingActivities[ps].setMutedByDevice(isMuted);
+ }
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
@@ -95,7 +258,8 @@
void setPatchHandle(audio_patch_handle_t handle) override;
TrackClientVector clientsList(bool activeOnly = false,
- routing_strategy strategy = STRATEGY_NONE, bool preferredDeviceOnly = false) const;
+ product_strategy_t strategy = PRODUCT_STRATEGY_NONE,
+ bool preferredDeviceOnly = false) const;
// override ClientMapHandler to abort when removing a client when active.
void removeClient(audio_port_handle_t portId) override {
@@ -105,40 +269,36 @@
// it is possible that when a client is removed, we could remove its
// associated active count by calling changeStreamActiveCount(),
// but that would be hiding a problem, so we log fatal instead.
- auto it2 = mActiveClients.find(client);
- LOG_ALWAYS_FATAL_IF(it2 != mActiveClients.end(),
- "%s(%d) removing client portId %d which is active (count %zu)",
- __func__, mId, portId, it2->second);
+ auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+ LOG_ALWAYS_FATAL_IF(clientIter != mActiveClients.end(),
+ "%s(%d) removing client portId %d which is active (count %d)",
+ __func__, mId, portId, client->getActivityCount());
ClientMapHandler<TrackClientDescriptor>::removeClient(portId);
}
- using ActiveClientMap = std::map<sp<TrackClientDescriptor>, size_t /* count */>;
- // required for duplicating thread
- const ActiveClientMap& getActiveClients() const {
+ const TrackClientVector& getActiveClients() const {
return mActiveClients;
}
DeviceVector mDevices; /**< current devices this output is routed to */
- nsecs_t mStopTime[AUDIO_STREAM_CNT];
- int mMuteCount[AUDIO_STREAM_CNT]; // mute request counter
- bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
- // device selection. See checkDeviceMuteStrategies()
AudioMix *mPolicyMix = nullptr; // non NULL when used by a dynamic policy
protected:
const sp<AudioPort> mPort;
AudioPolicyClientInterface * const mClientInterface;
- float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume in dB
- uint32_t mActiveCount[AUDIO_STREAM_CNT]; // number of streams of each type active on this output
uint32_t mGlobalActiveCount = 0; // non-client-specific active count
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
- // The ActiveClientMap shows the clients that contribute to the streams counts
+ // The ActiveClients shows the clients that contribute to the @VolumeSource counts
// and may include upstream clients from a duplicating thread.
// Compare with the ClientMap (mClients) which are external AudioTrack clients of the
// output descriptor (and do not count internal PatchTracks).
- ActiveClientMap mActiveClients;
+ TrackClientVector mActiveClients;
+
+ RoutingActivities mRoutingActivities; /**< track routing activity on this ouput.*/
+
+ VolumeActivities mVolumeActivities; /**< track volume activity on this ouput.*/
};
// Audio output driven by a software mixer in audio flinger.
@@ -154,13 +314,19 @@
void setDevices(const DeviceVector &devices) { mDevices = devices; }
bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
virtual DeviceVector supportedDevices() const;
+ virtual bool deviceSupportsEncodedFormats(audio_devices_t device);
virtual uint32_t latency();
virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
virtual bool isFixedVolume(audio_devices_t device);
sp<SwAudioOutputDescriptor> subOutput1() { return mOutput1; }
sp<SwAudioOutputDescriptor> subOutput2() { return mOutput2; }
- void changeStreamActiveCount(
- const sp<TrackClientDescriptor>& client, int delta) override;
+ void setClientActive(const sp<TrackClientDescriptor>& client, bool active) override;
+ void setAllClientsInactive()
+ {
+ for (const auto &client : clientsList(true)) {
+ setClientActive(client, false);
+ }
+ }
virtual bool setVolume(float volume,
audio_stream_type_t stream,
audio_devices_t device,
@@ -253,25 +419,52 @@
public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
{
public:
- bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return whether a stream is playing remotely, override to change the definition of
+ * return whether any source contributing to VolumeSource is playing remotely, override
+ * to change the definition of
* local/remote playback, used for instance by notification manager to not make
* media players lose audio focus when not playing locally
* For the base implementation, "remotely" means playing during screen mirroring which
* uses an output for playback with a non-empty, non "0" address.
*/
- bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return whether a stream is playing, but not on a "remote" device.
+ * return whether any source contributing to VolumeSource is playing, but not on a "remote"
+ * device.
* Override to change the definition of a local/remote playback.
* Used for instance by policy manager to alter the speaker playback ("speaker safe" behavior)
* when media plays or not locally.
* For the base implementation, "remotely" means playing during screen mirroring.
*/
- bool isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
+
+ /**
+ * @brief isStrategyActiveOnSameModule checks if the given strategy is active (or was active
+ * in the past) on the given output and all the outputs belonging to the same HW Module
+ * the same module than the given output
+ * @param outputDesc to be considered
+ * @param ps product strategy to be checked upon activity status
+ * @param inPastMs if 0, check currently, otherwise, check in the past
+ * @param sysTime shall be set if request is done for the past activity.
+ * @return true if an output following the strategy is active on the same module than desc,
+ * false otherwise
+ */
+ bool isStrategyActiveOnSameModule(product_strategy_t ps,
+ const sp<SwAudioOutputDescriptor>& desc,
+ uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
+
+ /**
+ * @brief clearSessionRoutesForDevice: when a device is disconnected, and if this device has
+ * been chosen as the preferred device by any client, the policy manager shall
+ * prevent from using this device any more by clearing all the session routes involving this
+ * device.
+ * In other words, the preferred device port id of these clients will be resetted to NONE.
+ * @param disconnectedDevice device to be disconnected
+ */
+ void clearSessionRoutesForDevice(const sp<DeviceDescriptor> &disconnectedDevice);
/**
* returns the A2DP output handle if it is open or 0 otherwise
@@ -293,9 +486,21 @@
sp<SwAudioOutputDescriptor> getPrimaryOutput() const;
/**
- * return true if any output is playing anything besides the stream to ignore
+ * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+ * hold the volume source to be ignored
+ * @param volumeSourceToIgnore source not considered in the activity detection
+ * @return true if any output is active for any source except the one to be ignored
*/
- bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+ bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+ {
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+ if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+ return true;
+ }
+ }
+ return false;
+ }
audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
@@ -308,12 +513,24 @@
public DefaultKeyedVector< audio_io_handle_t, sp<HwAudioOutputDescriptor> >
{
public:
- bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+ bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return true if any output is playing anything besides the stream to ignore
+ * @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
+ * hold the volume source to be ignored
+ * @param volumeSourceToIgnore source not considered in the activity detection
+ * @return true if any output is active for any source except the one to be ignored
*/
- bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+ bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
+ {
+ for (size_t i = 0; i < size(); i++) {
+ const sp<AudioOutputDescriptor> &outputDesc = valueAt(i);
+ if (outputDesc->isAnyActive(volumeSourceToIgnore)) {
+ return true;
+ }
+ }
+ return false;
+ }
void dump(String8 *dst) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index d52eb3d..2264d8f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -20,7 +20,6 @@
#include <unordered_set>
#include <AudioGain.h>
-#include <VolumeCurve.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
@@ -40,13 +39,11 @@
AudioPolicyConfig(HwModuleCollection &hwModules,
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
- sp<DeviceDescriptor> &defaultOutputDevice,
- VolumeCurvesCollection *volumes = nullptr)
+ sp<DeviceDescriptor> &defaultOutputDevice)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
mDefaultOutputDevice(defaultOutputDevice),
- mVolumeCurves(volumes),
mIsSpeakerDrcEnabled(false)
{}
@@ -58,13 +55,6 @@
mSource = file;
}
- void setVolumes(const VolumeCurvesCollection &volumes)
- {
- if (mVolumeCurves != nullptr) {
- *mVolumeCurves = volumes;
- }
- }
-
void setHwModules(const HwModuleCollection &hwModules)
{
mHwModules = hwModules;
@@ -182,7 +172,6 @@
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
sp<DeviceDescriptor> &mDefaultOutputDevice;
- VolumeCurvesCollection *mVolumeCurves;
// TODO: remove when legacy conf file is removed. true on devices that use DRC on the
// DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
// Note: remove also speaker_drc_enabled from global configuration of XML config file.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index e6a62d9..d6f24b2 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -23,9 +23,10 @@
#include <system/audio.h>
#include <utils/String8.h>
-namespace android {
+#include <DeviceDescriptor.h>
+#include <AudioOutputDescriptor.h>
-class SwAudioOutputDescriptor;
+namespace android {
/**
* custom mix entry in mPolicyMixes
@@ -42,12 +43,12 @@
android::AudioMix *getMix();
- void setMix(AudioMix &mix);
+ void setMix(const AudioMix &mix);
void dump(String8 *dst, int spaces, int index) const;
private:
- AudioMix mMix; // Audio policy mix descriptor
+ AudioMix mMix; // Audio policy mix descriptor
sp<SwAudioOutputDescriptor> mOutput; // Corresponding output stream
};
@@ -67,17 +68,28 @@
* Try to find an output descriptor for the given attributes.
*
* @param[in] attributes to consider fowr the research of output descriptor.
- * @param[out] desc to return if an output could be found.
- *
- * @return NO_ERROR if an output was found for the given attribute (in this case, the
- * descriptor output param is initialized), error code otherwise.
+ * @param[out] desc to return if an primary output could be found.
+ * @param[out] secondaryDesc other desc that the audio should be routed to.
*/
- status_t getOutputForAttr(audio_attributes_t attributes, uid_t uid,
- sp<SwAudioOutputDescriptor> &desc);
+ status_t getOutputForAttr(const audio_attributes_t& attributes, uid_t uid,
+ sp<SwAudioOutputDescriptor> &primaryDesc,
+ std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs);
sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
const DeviceVector &availableDeviceTypes,
- AudioMix **policyMix);
+ AudioMix **policyMix) const;
+
+ /**
+ * @brief try to find a matching mix for a given output descriptor and returns the associated
+ * output device.
+ * @param output to be considered
+ * @param availableOutputDevices list of output devices currently reachable
+ * @param policyMix to be returned if any mix matching ouput descriptor
+ * @return device selected from the mix attached to the output, null pointer otherwise
+ */
+ sp<DeviceDescriptor> getDeviceAndMixForOutput(const sp<SwAudioOutputDescriptor> &output,
+ const DeviceVector &availableOutputDevices,
+ AudioMix **policyMix = nullptr);
status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
@@ -86,6 +98,11 @@
status_t getDevicesForUid(uid_t uid, Vector<AudioDeviceTypeAddr>& devices) const;
void dump(String8 *dst) const;
+
+private:
+ enum class MixMatchStatus { MATCH, NO_MATCH, INVALID_MIX };
+ MixMatchStatus mixMatch(const AudioMix* mix, size_t mixIndex,
+ const audio_attributes_t& attributes, uid_t uid);
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index a187029..4bb225d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -22,14 +22,15 @@
#include <sys/types.h>
#include <system/audio.h>
-#include <system/audio_policy.h>
+#include <media/AudioProductStrategy.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
+#include <policy.h>
+#include <Volume.h>
#include "AudioPatch.h"
#include "EffectDescriptor.h"
-#include "RoutingStrategy.h"
namespace android {
@@ -41,10 +42,12 @@
{
public:
ClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
- audio_attributes_t attributes, audio_config_base_t config,
- audio_port_handle_t preferredDeviceId) :
+ audio_attributes_t attributes, audio_config_base_t config,
+ audio_port_handle_t preferredDeviceId,
+ bool isPreferredDeviceForExclusiveUse = false) :
mPortId(portId), mUid(uid), mSessionId(sessionId), mAttributes(attributes),
- mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false) {}
+ mConfig(config), mPreferredDeviceId(preferredDeviceId), mActive(false),
+ mPreferredDeviceForExclusiveUse(isPreferredDeviceForExclusiveUse){}
~ClientDescriptor() override = default;
virtual void dump(String8 *dst, int spaces, int index) const;
@@ -58,8 +61,9 @@
audio_port_handle_t preferredDeviceId() const { return mPreferredDeviceId; };
void setPreferredDeviceId(audio_port_handle_t preferredDeviceId) {
mPreferredDeviceId = preferredDeviceId;
- };
- void setActive(bool active) { mActive = active; }
+ }
+ bool isPreferredDeviceForExclusiveUse() const { return mPreferredDeviceForExclusiveUse; }
+ virtual void setActive(bool active) { mActive = active; }
bool active() const { return mActive; }
bool hasPreferredDevice(bool activeOnly = false) const {
return mPreferredDeviceId != AUDIO_PORT_HANDLE_NONE && (!activeOnly || mActive);
@@ -73,17 +77,23 @@
const audio_config_base_t mConfig;
audio_port_handle_t mPreferredDeviceId; // selected input device port ID
bool mActive;
+ bool mPreferredDeviceForExclusiveUse = false;
};
class TrackClientDescriptor: public ClientDescriptor
{
public:
TrackClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_session_t sessionId,
- audio_attributes_t attributes, audio_config_base_t config,
- audio_port_handle_t preferredDeviceId, audio_stream_type_t stream,
- routing_strategy strategy, audio_output_flags_t flags) :
- ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId),
- mStream(stream), mStrategy(strategy), mFlags(flags) {}
+ audio_attributes_t attributes, audio_config_base_t config,
+ audio_port_handle_t preferredDeviceId, audio_stream_type_t stream,
+ product_strategy_t strategy, VolumeSource volumeSource,
+ audio_output_flags_t flags,
+ bool isPreferredDeviceForExclusiveUse,
+ std::vector<wp<SwAudioOutputDescriptor>> secondaryOutputs) :
+ ClientDescriptor(portId, uid, sessionId, attributes, config, preferredDeviceId,
+ isPreferredDeviceForExclusiveUse),
+ mStream(stream), mStrategy(strategy), mVolumeSource(volumeSource), mFlags(flags),
+ mSecondaryOutputs(std::move(secondaryOutputs)) {}
~TrackClientDescriptor() override = default;
using ClientDescriptor::dump;
@@ -92,12 +102,45 @@
audio_output_flags_t flags() const { return mFlags; }
audio_stream_type_t stream() const { return mStream; }
- routing_strategy strategy() const { return mStrategy; }
+ product_strategy_t strategy() const { return mStrategy; }
+ const std::vector<wp<SwAudioOutputDescriptor>>& getSecondaryOutputs() const {
+ return mSecondaryOutputs;
+ };
+ VolumeSource volumeSource() const { return mVolumeSource; }
+
+ void setActive(bool active) override
+ {
+ int delta = active ? 1 : -1;
+ changeActivityCount(delta);
+ }
+ void changeActivityCount(int delta)
+ {
+ if (delta > 0) {
+ mActivityCount += delta;
+ } else {
+ LOG_ALWAYS_FATAL_IF(!mActivityCount, "%s(%s) invalid delta %d, inactive client",
+ __func__, toShortString().c_str(), delta);
+ LOG_ALWAYS_FATAL_IF(static_cast<int>(mActivityCount) < -delta,
+ "%s(%s) invalid delta %d, active client count %d",
+ __func__, toShortString().c_str(), delta, mActivityCount);
+ mActivityCount += delta;
+ }
+ ClientDescriptor::setActive(mActivityCount > 0);
+ }
+ uint32_t getActivityCount() const { return mActivityCount; }
private:
const audio_stream_type_t mStream;
- const routing_strategy mStrategy;
+ const product_strategy_t mStrategy;
+ const VolumeSource mVolumeSource;
const audio_output_flags_t mFlags;
+ const std::vector<wp<SwAudioOutputDescriptor>> mSecondaryOutputs;
+
+ /**
+ * required for duplicating thread, prevent from removing active client from an output
+ * involved in a duplication.
+ */
+ uint32_t mActivityCount = 0;
};
class RecordClientDescriptor: public ClientDescriptor
@@ -136,7 +179,8 @@
public:
SourceClientDescriptor(audio_port_handle_t portId, uid_t uid, audio_attributes_t attributes,
const sp<AudioPatch>& patchDesc, const sp<DeviceDescriptor>& srcDevice,
- audio_stream_type_t stream, routing_strategy strategy);
+ audio_stream_type_t stream, product_strategy_t strategy,
+ VolumeSource volumeSource);
~SourceClientDescriptor() override = default;
sp<AudioPatch> patchDesc() const { return mPatchDesc; }
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index b581665..cc43fe6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -44,8 +44,18 @@
const FormatVector& encodedFormats() const { return mEncodedFormats; }
+ audio_format_t getEncodedFormat() { return mCurrentEncodedFormat; }
+
+ void setEncodedFormat(audio_format_t format) {
+ mCurrentEncodedFormat = format;
+ }
+
bool equals(const sp<DeviceDescriptor>& other) const;
+ bool hasCurrentEncodedFormat() const;
+
+ bool supportsFormat(audio_format_t format);
+
// AudioPortConfig
virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -69,6 +79,7 @@
audio_devices_t mDeviceType;
FormatVector mEncodedFormats;
audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
+ audio_format_t mCurrentEncodedFormat;
};
class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
@@ -88,9 +99,10 @@
audio_devices_t types() const { return mDeviceTypes; }
- // If 'address' is empty, a device with a non-empty address may be returned
- // if there is no device with the specified 'type' and empty address.
- sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address = {}) const;
+ // If 'address' is empty and 'codec' is AUDIO_FORMAT_DEFAULT, a device with a non-empty
+ // address may be returned if there is no device with the specified 'type' and empty address.
+ sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address,
+ audio_format_t codec) const;
DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
/**
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index 2dc33ab..7f01dc5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -16,7 +16,7 @@
#pragma once
-#include <RoutingStrategy.h>
+#include <policy.h>
#include <system/audio_effect.h>
#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
@@ -28,14 +28,26 @@
class EffectDescriptor : public RefBase
{
public:
+ EffectDescriptor(const effect_descriptor_t *desc, bool isMusicEffect,
+ int id, int io, int session) :
+ mId(id), mIo(io), mSession(session), mEnabled(false),
+ mIsMusicEffect(isMusicEffect)
+ {
+ memcpy (&mDesc, desc, sizeof(effect_descriptor_t));
+ }
+
void dump(String8 *dst, int spaces = 0) const;
int mId; // effect unique ID
int mIo; // io the effect is attached to
- routing_strategy mStrategy; // routing strategy the effect is associated to
int mSession; // audio session the effect is on
effect_descriptor_t mDesc; // effect descriptor
bool mEnabled; // enabled state: CPU load being used or not
+
+ bool isMusicEffect() const { return mIsMusicEffect; }
+
+private:
+ bool mIsMusicEffect;
};
class EffectDescriptorCollection : public KeyedVector<int, sp<EffectDescriptor> >
@@ -44,7 +56,7 @@
EffectDescriptorCollection();
status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
- uint32_t strategy, int session, int id);
+ int session, int id, bool isMusicEffect);
status_t unregisterEffect(int id);
sp<EffectDescriptor> getEffect(int id) const;
status_t setEffectEnabled(int id, bool enabled);
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index d7dc4b0..eb34da4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -130,9 +130,11 @@
public:
sp<HwModule> getModuleFromName(const char *name) const;
- sp<HwModule> getModuleForDeviceTypes(audio_devices_t device) const;
+ sp<HwModule> getModuleForDeviceTypes(audio_devices_t device,
+ audio_format_t encodedFormat) const;
- sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device) const;
+ sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
+ audio_format_t encodedFormat) const;
DeviceVector getAvailableDevicesFromModuleName(const char *name,
const DeviceVector &availableDevices) const;
@@ -149,6 +151,7 @@
* @param type of the device requested
* @param address of the device requested
* @param name of the device that requested
+ * @param encodedFormat if not AUDIO_FORMAT_DEFAULT, must match one supported format
* @param matchAddress true if a strong match is required
* @param allowToCreate true if allowed to create dynamic device (e.g. hdmi, usb...)
* @return device descriptor associated to the type (and address if matchAddress is true)
@@ -156,6 +159,7 @@
sp<DeviceDescriptor> getDeviceDescriptor(const audio_devices_t type,
const char *address,
const char *name,
+ audio_format_t encodedFormat,
bool allowToCreate = false,
bool matchAddress = true) const;
@@ -171,7 +175,8 @@
*/
sp<DeviceDescriptor> createDevice(const audio_devices_t type,
const char *address,
- const char *name) const;
+ const char *name,
+ const audio_format_t encodedFormat) const;
/**
* @brief cleanUpForDevice: loop on all profiles of all modules to remove device from
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index d0c05a5..e0b56d4 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -94,7 +94,10 @@
bool supportsDeviceTypes(audio_devices_t device) const
{
if (audio_is_output_devices(device)) {
- return mSupportedDevices.types() & device;
+ if (deviceSupportsEncodedFormats(device)) {
+ return mSupportedDevices.types() & device;
+ }
+ return false;
}
return mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN);
}
@@ -116,6 +119,19 @@
return mSupportedDevices.contains(device);
}
+ bool deviceSupportsEncodedFormats(audio_devices_t device) const
+ {
+ if (device == AUDIO_DEVICE_NONE) {
+ return true; // required for isOffloadSupported() check
+ }
+ DeviceVector deviceList =
+ mSupportedDevices.getDevicesFromTypeMask(device);
+ if (!deviceList.empty()) {
+ return deviceList.itemAt(0)->hasCurrentEncodedFormat();
+ }
+ return false;
+ }
+
void clearSupportedDevices() { mSupportedDevices.clear(); }
void addSupportedDevice(const sp<DeviceDescriptor> &device)
{
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
new file mode 100644
index 0000000..d408446
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <Volume.h>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <vector>
+
+namespace android {
+
+class IVolumeCurves
+{
+public:
+ virtual ~IVolumeCurves() = default;
+
+ virtual void clearCurrentVolumeIndex() = 0;
+ virtual void addCurrentVolumeIndex(audio_devices_t device, int index) = 0;
+ virtual bool canBeMuted() const = 0;
+ virtual int getVolumeIndexMin() const = 0;
+ virtual int getVolumeIndex(audio_devices_t device) const = 0;
+ virtual int getVolumeIndexMax() const = 0;
+ virtual float volIndexToDb(device_category device, int indexInUi) const = 0;
+ virtual bool hasVolumeIndexForDevice(audio_devices_t device) const = 0;
+ virtual status_t initVolume(int indexMin, int indexMax) = 0;
+ virtual std::vector<audio_attributes_t> getAttributes() const = 0;
+ virtual std::vector<audio_stream_type_t> getStreamTypes() const = 0;
+ virtual void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const = 0;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
deleted file mode 100644
index 750da55..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <Volume.h>
-#include <utils/Errors.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class IVolumeCurvesCollection
-{
-public:
- virtual ~IVolumeCurvesCollection() = default;
-
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
- int index) = 0;
- virtual bool canBeMuted(audio_stream_type_t stream) = 0;
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const = 0;
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device) = 0;
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const = 0;
- virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
- int indexInUi) const = 0;
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
-
- virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
- virtual void switchVolumeCurve(audio_stream_type_t src, audio_stream_type_t dst) = 0;
- virtual void restoreOriginVolumeCurve(audio_stream_type_t stream)
- {
- switchVolumeCurve(stream, stream);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const = 0;
-
- virtual void dump(String8 *dst) const = 0;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
deleted file mode 100644
index 76ec198..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "IVolumeCurvesCollection.h"
-#include <policy.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-#include <string>
-#include <utility>
-
-namespace android {
-
-struct CurvePoint
-{
- CurvePoint() {}
- CurvePoint(int index, int attenuationInMb) :
- mIndex(index), mAttenuationInMb(attenuationInMb) {}
- uint32_t mIndex;
- int mAttenuationInMb;
-};
-
-inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
-{
- return lhs.mIndex < rhs.mIndex;
-}
-
-// A volume curve for a given use case and device category
-// It contains of list of points of this curve expressing the attenuation in Millibels for
-// a given volume index from 0 to 100
-class VolumeCurve : public RefBase
-{
-public:
- VolumeCurve(device_category device, audio_stream_type_t stream) :
- mDeviceCategory(device), mStreamType(stream) {}
-
- device_category getDeviceCategory() const { return mDeviceCategory; }
- audio_stream_type_t getStreamType() const { return mStreamType; }
-
- void add(const CurvePoint &point) { mCurvePoints.add(point); }
-
- float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
-
- void dump(String8 *result) const;
-
-private:
- SortedVector<CurvePoint> mCurvePoints;
- device_category mDeviceCategory;
- audio_stream_type_t mStreamType;
-};
-
-// Volume Curves for a given use case indexed by device category
-class VolumeCurvesForStream : public KeyedVector<device_category, sp<VolumeCurve> >
-{
-public:
- VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
- {
- mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
- }
-
- sp<VolumeCurve> getCurvesFor(device_category device) const
- {
- if (indexOfKey(device) < 0) {
- return 0;
- }
- return valueFor(device);
- }
-
- int getVolumeIndex(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
- if (mIndexCur.indexOfKey(device) < 0) {
- device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
- }
- return mIndexCur.valueFor(device);
- }
-
- bool canBeMuted() const { return mCanBeMuted; }
- void clearCurrentVolumeIndex() { mIndexCur.clear(); }
- void addCurrentVolumeIndex(audio_devices_t device, int index) { mIndexCur.add(device, index); }
-
- void setVolumeIndexMin(int volIndexMin) { mIndexMin = volIndexMin; }
- int getVolumeIndexMin() const { return mIndexMin; }
-
- void setVolumeIndexMax(int volIndexMax) { mIndexMax = volIndexMax; }
- int getVolumeIndexMax() const { return mIndexMax; }
-
- bool hasVolumeIndexForDevice(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- return mIndexCur.indexOfKey(device) >= 0;
- }
-
- const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
- {
- ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
- return mOriginVolumeCurves.valueFor(deviceCategory);
- }
- void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
- {
- ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
- replaceValueFor(deviceCategory, volumeCurve);
- }
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- device_category deviceCategory = volumeCurve->getDeviceCategory();
- ssize_t index = indexOfKey(deviceCategory);
- if (index < 0) {
- // Keep track of original Volume Curves per device category in order to switch curves.
- mOriginVolumeCurves.add(deviceCategory, volumeCurve);
- return KeyedVector::add(deviceCategory, volumeCurve);
- }
- return index;
- }
-
- float volIndexToDb(device_category deviceCat, int indexInUi) const
- {
- sp<VolumeCurve> vc = getCurvesFor(deviceCat);
- if (vc != 0) {
- return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
- } else {
- ALOGE("Invalid device category %d for Volume Curve", deviceCat);
- return 0.0f;
- }
- }
-
- void dump(String8 *dst, int spaces, bool curvePoints = false) const;
-
-private:
- KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
- KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
- int mIndexMin; /**< min volume index. */
- int mIndexMax; /**< max volume index. */
- bool mCanBeMuted; /**< true is the stream can be muted. */
-};
-
-// Collection of Volume Curves indexed by use case
-class VolumeCurvesCollection : public KeyedVector<audio_stream_type_t, VolumeCurvesForStream>,
- public IVolumeCurvesCollection
-{
-public:
- VolumeCurvesCollection()
- {
- // Create an empty collection of curves
- for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
- audio_stream_type_t stream = static_cast<audio_stream_type_t>(i);
- KeyedVector::add(stream, VolumeCurvesForStream());
- }
- }
-
- // Once XML has been parsed, must be call first to sanity check table and initialize indexes
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
- {
- editValueAt(stream).setVolumeIndexMin(indexMin);
- editValueAt(stream).setVolumeIndexMax(indexMax);
- return NO_ERROR;
- }
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream)
- {
- editCurvesFor(stream).clearCurrentVolumeIndex();
- }
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index)
- {
- editCurvesFor(stream).addCurrentVolumeIndex(device, index);
- }
- virtual bool canBeMuted(audio_stream_type_t stream) { return getCurvesFor(stream).canBeMuted(); }
-
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMin();
- }
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMax();
- }
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
- {
- return getCurvesFor(stream).getVolumeIndex(device);
- }
- virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
- {
- const VolumeCurvesForStream &sourceCurves = getCurvesFor(streamSrc);
- VolumeCurvesForStream &dstCurves = editCurvesFor(streamDst);
- ALOG_ASSERT(sourceCurves.size() == dstCurves.size(), "device category not aligned");
- for (size_t index = 0; index < sourceCurves.size(); index++) {
- device_category cat = sourceCurves.keyAt(index);
- dstCurves.setVolumeCurve(cat, sourceCurves.getOriginVolumeCurve(cat));
- }
- }
- virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
- {
- return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const
- {
- return getCurvesFor(stream).hasVolumeIndexForDevice(device);
- }
-
- void dump(String8 *dst) const override;
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- audio_stream_type_t streamType = volumeCurve->getStreamType();
- return editCurvesFor(streamType).add(volumeCurve);
- }
- VolumeCurvesForStream &editCurvesFor(audio_stream_type_t stream)
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return editValueAt(stream);
- }
- const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return valueFor(stream);
- }
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 55d4db4..1fa1123 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -257,15 +257,21 @@
void AudioInputDescriptor::close()
{
if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ // clean up active clients if any (can happen if close() is called to force
+ // clients to reconnect
+ for (const auto &client : getClientIterable()) {
+ if (client->active()) {
+ ALOGW("%s client with port ID %d still active on input %d",
+ __func__, client->portId(), mId);
+ setClientActive(client, false);
+ stop();
+ }
+ }
+
mClientInterface->closeInput(mIoHandle);
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
- // do not call stop() here as stop() is supposed to be called after
- // setClientActive(client, false) and we don't know how many clients
- // are still active at this time
- if (isActive()) {
- mProfile->curActiveCount--;
- }
+
mProfile->curOpenCount--;
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < mProfile->curActiveCount,
"%s(%d): mProfile->curOpenCount %d < mProfile->curActiveCount %d.",
@@ -505,6 +511,19 @@
}
}
+void AudioInputCollection::clearSessionRoutesForDevice(
+ const sp<DeviceDescriptor> &disconnectedDevice)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioInputDescriptor> inputDesc = valueAt(i);
+ for (const auto& client : inputDesc->getClientIterable()) {
+ if (client->preferredDeviceId() == disconnectedDevice->getId()) {
+ client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
+ }
+ }
+ }
+}
+
void AudioInputCollection::dump(String8 *dst) const
{
dst->append("\nInputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 643cbd1..7293bc4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,19 +34,8 @@
AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
AudioPolicyClientInterface *clientInterface)
- : mPort(port)
- , mClientInterface(clientInterface)
+ : mPort(port), mClientInterface(clientInterface)
{
- // clear usage count for all stream types
- for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
- mActiveCount[i] = 0;
- mCurVolume[i] = -1.0;
- mMuteCount[i] = 0;
- mStopTime[i] = 0;
- }
- for (int i = 0; i < NUM_STRATEGIES; i++) {
- mStrategyMutedByDevice[i] = false;
- }
if (mPort.get() != nullptr) {
mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
if (mPort->mGains.size() > 0) {
@@ -88,117 +77,73 @@
return hasSameHwModuleAs(outputDesc);
}
-void AudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
- int delta)
+void AudioOutputDescriptor::setStopTime(const sp<TrackClientDescriptor>& client, nsecs_t sysTime)
{
- if (delta == 0) return;
- const audio_stream_type_t stream = client->stream();
- if ((delta + (int)mActiveCount[stream]) < 0) {
- // any mismatched active count will abort.
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active stream count %d",
- __func__, client->toShortString().c_str(), delta, mActiveCount[stream]);
- // mActiveCount[stream] = 0;
- // return;
- }
- mActiveCount[stream] += delta;
-
- if (delta > 0) {
- mActiveClients[client] += delta;
- } else {
- auto it = mActiveClients.find(client);
- if (it == mActiveClients.end()) { // client not found!
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, inactive client",
- __func__, client->toShortString().c_str(), delta);
- } else if (it->second < -delta) { // invalid delta!
- LOG_ALWAYS_FATAL("%s(%s) invalid delta %d, active client count %zu",
- __func__, client->toShortString().c_str(), delta, it->second);
- }
- it->second += delta;
- if (it->second == 0) {
- (void)mActiveClients.erase(it);
- }
- }
-
- ALOGV("%s stream %d, count %d", __FUNCTION__, stream, mActiveCount[stream]);
+ mVolumeActivities[client->volumeSource()].setStopTime(sysTime);
+ mRoutingActivities[client->strategy()].setStopTime(sysTime);
}
void AudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
{
- LOG_ALWAYS_FATAL_IF(getClient(client->portId()) == nullptr,
- "%s(%d) does not exist on output descriptor", __func__, client->portId());
-
- if (active == client->active()) {
- ALOGW("%s(%s): ignored active: %d, current stream count %d",
- __func__, client->toShortString().c_str(),
- active, mActiveCount[client->stream()]);
+ auto clientIter = std::find(begin(mActiveClients), end(mActiveClients), client);
+ if (active == (clientIter != end(mActiveClients))) {
+ ALOGW("%s(%s): ignored active: %d, current stream count %d", __func__,
+ client->toShortString().c_str(), active,
+ mRoutingActivities.at(client->strategy()).getActivityCount());
return;
}
+ if (active) {
+ mActiveClients.push_back(client);
+ } else {
+ mActiveClients.erase(clientIter);
+ }
const int delta = active ? 1 : -1;
- changeStreamActiveCount(client, delta);
+ // If ps is unknown, it is time to track it!
+ mRoutingActivities[client->strategy()].changeActivityCount(delta);
+ mVolumeActivities[client->volumeSource()].changeActivityCount(delta);
// Handle non-client-specific activity ref count
int32_t oldGlobalActiveCount = mGlobalActiveCount;
if (!active && mGlobalActiveCount < 1) {
ALOGW("%s(%s): invalid deactivation with globalRefCount %d",
- __func__, client->toShortString().c_str(), mGlobalActiveCount);
+ __func__, client->toShortString().c_str(), mGlobalActiveCount);
mGlobalActiveCount = 1;
}
mGlobalActiveCount += delta;
- if ((oldGlobalActiveCount == 0) && (mGlobalActiveCount > 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
+ if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ if ((oldGlobalActiveCount == 0) || (mGlobalActiveCount == 0)) {
mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
- MIX_STATE_MIXING);
- }
- } else if ((oldGlobalActiveCount > 0) && (mGlobalActiveCount == 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
- MIX_STATE_IDLE);
+ mGlobalActiveCount > 0 ? MIX_STATE_MIXING : MIX_STATE_IDLE);
}
}
-
client->setActive(active);
}
+bool AudioOutputDescriptor::isActive(VolumeSource vs, uint32_t inPastMs, nsecs_t sysTime) const
+{
+ return (vs == VOLUME_SOURCE_NONE) ?
+ isActive(inPastMs) : (mVolumeActivities.find(vs) != std::end(mVolumeActivities)?
+ mVolumeActivities.at(vs).isActive(inPastMs, sysTime) : false);
+}
+
bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const
{
nsecs_t sysTime = 0;
if (inPastMs != 0) {
sysTime = systemTime();
}
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
+ for (const auto &iter : mVolumeActivities) {
+ if (iter.first == streamToVolumeSource(AUDIO_STREAM_PATCH)) {
continue;
}
- if (isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+ if (iter.second.isActive(inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
- uint32_t inPastMs,
- nsecs_t sysTime) const
-{
- if (mActiveCount[stream] != 0) {
- return true;
- }
- if (inPastMs == 0) {
- return false;
- }
- if (sysTime == 0) {
- sysTime = systemTime();
- }
- if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
- return true;
- }
- return false;
-}
-
-
bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused)
{
return false;
@@ -213,9 +158,9 @@
// We actually change the volume if:
// - the float value returned by computeVolume() changed
// - the force flag is set
- if (volume != mCurVolume[stream] || force) {
+ if (volume != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
- mCurVolume[stream] = volume;
+ setCurVolume(static_cast<VolumeSource>(stream), volume);
return true;
}
return false;
@@ -247,20 +192,28 @@
port->ext.mix.hw_module = getModuleHandle();
}
-TrackClientVector AudioOutputDescriptor::clientsList(bool activeOnly, routing_strategy strategy,
+TrackClientVector AudioOutputDescriptor::clientsList(bool activeOnly, product_strategy_t strategy,
bool preferredDeviceOnly) const
{
TrackClientVector clients;
for (const auto &client : getClientIterable()) {
if ((!activeOnly || client->active())
- && (strategy == STRATEGY_NONE || strategy == client->strategy())
- && (!preferredDeviceOnly || client->hasPreferredDevice())) {
+ && (strategy == PRODUCT_STRATEGY_NONE || strategy == client->strategy())
+ && (!preferredDeviceOnly ||
+ (client->hasPreferredDevice() && !client->isPreferredDeviceForExclusiveUse()))) {
clients.push_back(client);
}
}
return clients;
}
+bool AudioOutputDescriptor::isAnyActive(VolumeSource volumeSourceToIgnore) const
+{
+ return std::find_if(begin(mActiveClients), end(mActiveClients),
+ [&volumeSourceToIgnore](const auto &client) {
+ return client->volumeSource() != volumeSourceToIgnore; }) != end(mActiveClients);
+}
+
void AudioOutputDescriptor::dump(String8 *dst) const
{
dst->appendFormat(" ID: %d\n", mId);
@@ -269,20 +222,22 @@
dst->appendFormat(" Channels: %08x\n", mChannelMask);
dst->appendFormat(" Devices: %s\n", devices().toString().c_str());
dst->appendFormat(" Global active count: %u\n", mGlobalActiveCount);
- dst->append(" Stream volume activeCount muteCount\n");
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- dst->appendFormat(" %02d %.03f %02d %02d\n",
- i, mCurVolume[i], streamActiveCount((audio_stream_type_t)i), mMuteCount[i]);
+ for (const auto &iter : mRoutingActivities) {
+ dst->appendFormat(" Product Strategy id: %d", iter.first);
+ iter.second.dump(dst, 4);
+ }
+ for (const auto &iter : mVolumeActivities) {
+ dst->appendFormat(" Volume Activities id: %d", iter.first);
+ iter.second.dump(dst, 4);
}
dst->append(" AudioTrack Clients:\n");
ClientMapHandler<TrackClientDescriptor>::dump(dst);
dst->append("\n");
- if (mActiveClients.size() > 0) {
+ if (!mActiveClients.empty()) {
dst->append(" AudioTrack active (stream) clients:\n");
size_t index = 0;
- for (const auto& clientPair : mActiveClients) {
- dst->appendFormat(" Refcount: %zu", clientPair.second);
- clientPair.first->dump(dst, 2, index++);
+ for (const auto& client : mActiveClients) {
+ client->dump(dst, 2, index++);
}
dst->append(" \n");
}
@@ -364,6 +319,16 @@
return filteredDevices.filter(devices);
}
+bool SwAudioOutputDescriptor::deviceSupportsEncodedFormats(audio_devices_t device)
+{
+ if (isDuplicated()) {
+ return (mOutput1->deviceSupportsEncodedFormats(device)
+ || mOutput2->deviceSupportsEncodedFormats(device));
+ } else {
+ return mProfile->deviceSupportsEncodedFormats(device);
+ }
+}
+
uint32_t SwAudioOutputDescriptor::latency()
{
if (isDuplicated()) {
@@ -373,15 +338,14 @@
}
}
-void SwAudioOutputDescriptor::changeStreamActiveCount(const sp<TrackClientDescriptor>& client,
- int delta)
+void SwAudioOutputDescriptor::setClientActive(const sp<TrackClientDescriptor>& client, bool active)
{
// forward usage count change to attached outputs
if (isDuplicated()) {
- mOutput1->changeStreamActiveCount(client, delta);
- mOutput2->changeStreamActiveCount(client, delta);
+ mOutput1->setClientActive(client, active);
+ mOutput2->setClientActive(client, active);
}
- AudioOutputDescriptor::changeStreamActiveCount(client, delta);
+ AudioOutputDescriptor::setClientActive(client, active);
}
bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device)
@@ -430,19 +394,16 @@
uint32_t delayMs,
bool force)
{
- bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
-
- if (changed) {
- // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
- // enabled
- float volume = Volume::DbToAmpl(mCurVolume[stream]);
- if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
- mClientInterface->setStreamVolume(
- AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs);
- }
- mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs);
+ if (!AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force)) {
+ return false;
}
- return changed;
+ // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is enabled
+ float volumeAmpl = Volume::DbToAmpl(getCurVolume(static_cast<VolumeSource>(stream)));
+ if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+ mClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volumeAmpl, mIoHandle, delayMs);
+ }
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ return true;
}
status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
@@ -553,6 +514,17 @@
void SwAudioOutputDescriptor::close()
{
if (mIoHandle != AUDIO_IO_HANDLE_NONE) {
+ // clean up active clients if any (can happen if close() is called to force
+ // clients to reconnect
+ for (const auto &client : getClientIterable()) {
+ if (client->active()) {
+ ALOGW("%s client with port ID %d still active on output %d",
+ __func__, client->portId(), mId);
+ setClientActive(client, false);
+ stop();
+ }
+ }
+
AudioParameter param;
param.add(String8("closing"), String8("true"));
mClientInterface->setParameters(mIoHandle, param.toString());
@@ -561,11 +533,6 @@
LOG_ALWAYS_FATAL_IF(mProfile->curOpenCount < 1, "%s profile open count %u",
__FUNCTION__, mProfile->curOpenCount);
- // do not call stop() here as stop() is supposed to be called after setClientActive(false)
- // and we don't know how many streams are still active at this time
- if (isActive()) {
- mProfile->curActiveCount--;
- }
mProfile->curOpenCount--;
mIoHandle = AUDIO_IO_HANDLE_NONE;
}
@@ -639,24 +606,24 @@
}
// SwAudioOutputCollection implementation
-bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool SwAudioOutputCollection::isStreamActiveLocally(audio_stream_type_t stream, uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveLocally(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)
&& ((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
return true;
}
@@ -664,14 +631,13 @@
return false;
}
-bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
- uint32_t inPastMs) const
+bool SwAudioOutputCollection::isActiveRemotely(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
if (((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
- outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
// do not consider re routing (when the output is going to a dynamic policy)
// as "remote playback"
if (outputDesc->mPolicyMix == NULL) {
@@ -682,12 +648,28 @@
return false;
}
+bool SwAudioOutputCollection::isStrategyActiveOnSameModule(product_strategy_t ps,
+ const sp<SwAudioOutputDescriptor>& desc,
+ uint32_t inPastMs, nsecs_t sysTime) const
+{
+ for (size_t i = 0; i < size(); i++) {
+ const sp<SwAudioOutputDescriptor> otherDesc = valueAt(i);
+ if (desc->sharesHwModuleWith(otherDesc) &&
+ otherDesc->isStrategyActive(ps, inPastMs, sysTime)) {
+ return true;
+ }
+ }
+ return false;
+}
+
audio_io_handle_t SwAudioOutputCollection::getA2dpOutput() const
{
for (size_t i = 0; i < size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
if (!outputDesc->isDuplicated() &&
- outputDesc->devices().types() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+ outputDesc->devices().types() & AUDIO_DEVICE_OUT_ALL_A2DP &&
+ outputDesc->deviceSupportsEncodedFormats(
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
return this->keyAt(i);
}
}
@@ -738,22 +720,6 @@
return NULL;
}
-bool SwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
- for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
- if (s == (size_t) streamToIgnore) {
- continue;
- }
- for (size_t i = 0; i < size(); i++) {
- const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
- if (outputDesc->streamActiveCount((audio_stream_type_t)s)!= 0) {
- return true;
- }
- }
- }
- return false;
-}
-
sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputForClient(audio_port_handle_t portId)
{
for (size_t i = 0; i < size(); i++) {
@@ -765,6 +731,19 @@
return 0;
}
+void SwAudioOutputCollection::clearSessionRoutesForDevice(
+ const sp<DeviceDescriptor> &disconnectedDevice)
+{
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioOutputDescriptor> outputDesc = valueAt(i);
+ for (const auto& client : outputDesc->getClientIterable()) {
+ if (client->preferredDeviceId() == disconnectedDevice->getId()) {
+ client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
+ }
+ }
+ }
+}
+
void SwAudioOutputCollection::dump(String8 *dst) const
{
dst->append("\nOutputs dump:\n");
@@ -775,34 +754,18 @@
}
// HwAudioOutputCollection implementation
-bool HwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+bool HwAudioOutputCollection::isActive(VolumeSource volumeSource, uint32_t inPastMs) const
{
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < this->size(); i++) {
const sp<HwAudioOutputDescriptor> outputDesc = this->valueAt(i);
- if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+ if (outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
return true;
}
}
return false;
}
-bool HwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
-{
- for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
- if (s == (size_t) streamToIgnore) {
- continue;
- }
- for (size_t i = 0; i < size(); i++) {
- const sp<HwAudioOutputDescriptor> outputDesc = valueAt(i);
- if (outputDesc->streamActiveCount((audio_stream_type_t)s) != 0) {
- return true;
- }
- }
- }
- return false;
-}
-
void HwAudioOutputCollection::dump(String8 *dst) const
{
dst->append("\nOutputs dump:\n");
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index cd1c2f2..3a4db90 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -41,9 +41,7 @@
const audio_port_config &cfg = cfgs[i];
dst->appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(cfg.ext.device.type, device);
- dst->appendFormat("Device ID %d %s", cfg.id, device.c_str());
+ dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
} else {
dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index d18091c..23d764e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -42,7 +42,7 @@
mOutput.clear();
}
-void AudioPolicyMix::setMix(AudioMix &mix)
+void AudioPolicyMix::setMix(const AudioMix &mix)
{
mMix = mix;
}
@@ -66,9 +66,7 @@
RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
dst->appendFormat("%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());
- std::string deviceLiteral;
- deviceToString(mMix.mDeviceType, deviceLiteral);
- dst->appendFormat("%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
+ dst->appendFormat("%*s- device type: %s\n", spaces, "", toString(mMix.mDeviceType).c_str());
dst->appendFormat("%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());
@@ -156,132 +154,200 @@
}
}
-status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
- sp<SwAudioOutputDescriptor> &desc)
+status_t AudioPolicyMixCollection::getOutputForAttr(
+ const audio_attributes_t& attributes, uid_t uid, sp<SwAudioOutputDescriptor> &primaryDesc,
+ std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs)
{
ALOGV("getOutputForAttr() querying %zu mixes:", size());
- desc = 0;
+ primaryDesc = 0;
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
+ sp<SwAudioOutputDescriptor> policyDesc = policyMix->getOutput();
+ if (!policyDesc) {
+ ALOGV("%s: Skiping %zu: Mix has no output", __func__, i);
+ continue;
+ }
+
AudioMix *mix = policyMix->getMix();
+ const bool primaryOutputMix = !is_mix_loopback_render(mix->mRouteFlags);
- if (mix->mMixType == MIX_TYPE_PLAYERS) {
- // TODO if adding more player rules (currently only 2), make rule handling "generic"
- // as there is no difference in the treatment of usage- or uid-based rules
- bool hasUsageMatchRules = false;
- bool hasUsageExcludeRules = false;
- bool usageMatchFound = false;
- bool usageExclusionFound = false;
+ if (primaryOutputMix && primaryDesc != 0) {
+ ALOGV("%s: Skiping %zu: Primary output already found", __func__, i);
+ continue; // Primary output already found
+ }
- bool hasUidMatchRules = false;
- bool hasUidExcludeRules = false;
- bool uidMatchFound = false;
- bool uidExclusionFound = false;
+ switch (mixMatch(mix, i, attributes, uid)) {
+ case MixMatchStatus::INVALID_MIX: return BAD_VALUE; // TODO: Do we really want to abort?
+ case MixMatchStatus::NO_MATCH:
+ ALOGV("%s: Mix %zu: does not match", __func__, i);
+ continue; // skip the mix
+ case MixMatchStatus::MATCH:;
+ }
- bool hasAddrMatch = false;
-
- // iterate over all mix criteria to list what rules this mix contains
- for (size_t j = 0; j < mix->mCriteria.size(); j++) {
- ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
- i, j, mix->mCriteria.size());
-
- // if there is an address match, prioritize that match
- if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
- strncmp(attributes.tags + strlen("addr="),
- mix->mDeviceAddress.string(),
- AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
- hasAddrMatch = true;
- break;
- }
-
- switch (mix->mCriteria[j].mRule) {
- case RULE_MATCH_ATTRIBUTE_USAGE:
- ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
- mix->mCriteria[j].mValue.mUsage);
- hasUsageMatchRules = true;
- if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
- // found one match against all allowed usages
- usageMatchFound = true;
- }
- break;
- case RULE_EXCLUDE_ATTRIBUTE_USAGE:
- ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
- mix->mCriteria[j].mValue.mUsage);
- hasUsageExcludeRules = true;
- if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
- // found this usage is to be excluded
- usageExclusionFound = true;
- }
- break;
- case RULE_MATCH_UID:
- ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
- hasUidMatchRules = true;
- if (mix->mCriteria[j].mValue.mUid == uid) {
- // found one UID match against all allowed UIDs
- uidMatchFound = true;
- }
- break;
- case RULE_EXCLUDE_UID:
- ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
- hasUidExcludeRules = true;
- if (mix->mCriteria[j].mValue.mUid == uid) {
- // found this UID is to be excluded
- uidExclusionFound = true;
- }
- break;
- default:
- break;
- }
-
- // consistency checks: for each "dimension" of rules (usage, uid...), we can
- // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
- if (hasUsageMatchRules && hasUsageExcludeRules) {
- ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
- " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", i);
- return BAD_VALUE;
- }
- if (hasUidMatchRules && hasUidExcludeRules) {
- ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
- " and RULE_EXCLUDE_UID in mix %zu", i);
- return BAD_VALUE;
- }
-
- if ((hasUsageExcludeRules && usageExclusionFound)
- || (hasUidExcludeRules && uidExclusionFound)) {
- break; // stop iterating on criteria because an exclusion was found (will fail)
- }
-
- }//iterate on mix criteria
-
- // determine if exiting on success (or implicit failure as desc is 0)
- if (hasAddrMatch ||
- !((hasUsageExcludeRules && usageExclusionFound) ||
- (hasUsageMatchRules && !usageMatchFound) ||
- (hasUidExcludeRules && uidExclusionFound) ||
- (hasUidMatchRules && !uidMatchFound))) {
- ALOGV("\tgetOutputForAttr will use mix %zu", i);
- desc = policyMix->getOutput();
+ policyDesc->mPolicyMix = mix;
+ if (primaryOutputMix) {
+ primaryDesc = policyDesc;
+ ALOGV("%s: Mix %zu: set primary desc", __func__, i);
+ } else {
+ if (policyDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) {
+ ALOGV("%s: Mix %zu ignored as secondaryOutput because not opened yet", __func__, i);
+ } else {
+ ALOGV("%s: Add a secondary desc %zu", __func__, i);
+ secondaryDescs->push_back(policyDesc);
}
+ }
+ }
+ return (primaryDesc == nullptr && secondaryDescs->empty()) ? BAD_VALUE : NO_ERROR;
+}
- } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
- if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
- strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+AudioPolicyMixCollection::MixMatchStatus AudioPolicyMixCollection::mixMatch(
+ const AudioMix* mix, size_t mixIndex, const audio_attributes_t& attributes, uid_t uid) {
+
+ if (mix->mMixType == MIX_TYPE_PLAYERS) {
+ // Loopback render mixes are created from a public API and thus restricted
+ // to non sensible audio that have not opted out.
+ if (is_mix_loopback_render(mix->mRouteFlags)) {
+ if ((attributes.flags & AUDIO_FLAG_NO_CAPTURE) == AUDIO_FLAG_NO_CAPTURE) {
+ return MixMatchStatus::NO_MATCH;
+ }
+ if (!(attributes.usage == AUDIO_USAGE_UNKNOWN ||
+ attributes.usage == AUDIO_USAGE_MEDIA ||
+ attributes.usage == AUDIO_USAGE_GAME)) {
+ return MixMatchStatus::NO_MATCH;
+ }
+ }
+ // TODO if adding more player rules (currently only 2), make rule handling "generic"
+ // as there is no difference in the treatment of usage- or uid-based rules
+ bool hasUsageMatchRules = false;
+ bool hasUsageExcludeRules = false;
+ bool usageMatchFound = false;
+ bool usageExclusionFound = false;
+
+ bool hasUidMatchRules = false;
+ bool hasUidExcludeRules = false;
+ bool uidMatchFound = false;
+ bool uidExclusionFound = false;
+
+ bool hasAddrMatch = false;
+
+ // iterate over all mix criteria to list what rules this mix contains
+ for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+ ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
+ mixIndex, j, mix->mCriteria.size());
+
+ // if there is an address match, prioritize that match
+ if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
strncmp(attributes.tags + strlen("addr="),
mix->mDeviceAddress.string(),
AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
- desc = policyMix->getOutput();
+ hasAddrMatch = true;
+ break;
}
+
+ switch (mix->mCriteria[j].mRule) {
+ case RULE_MATCH_ATTRIBUTE_USAGE:
+ ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
+ mix->mCriteria[j].mValue.mUsage);
+ hasUsageMatchRules = true;
+ if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+ // found one match against all allowed usages
+ usageMatchFound = true;
+ }
+ break;
+ case RULE_EXCLUDE_ATTRIBUTE_USAGE:
+ ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
+ mix->mCriteria[j].mValue.mUsage);
+ hasUsageExcludeRules = true;
+ if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+ // found this usage is to be excluded
+ usageExclusionFound = true;
+ }
+ break;
+ case RULE_MATCH_UID:
+ ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+ hasUidMatchRules = true;
+ if (mix->mCriteria[j].mValue.mUid == uid) {
+ // found one UID match against all allowed UIDs
+ uidMatchFound = true;
+ }
+ break;
+ case RULE_EXCLUDE_UID:
+ ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+ hasUidExcludeRules = true;
+ if (mix->mCriteria[j].mValue.mUid == uid) {
+ // found this UID is to be excluded
+ uidExclusionFound = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // consistency checks: for each "dimension" of rules (usage, uid...), we can
+ // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
+ if (hasUsageMatchRules && hasUsageExcludeRules) {
+ ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
+ " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", mixIndex);
+ return MixMatchStatus::INVALID_MIX;
+ }
+ if (hasUidMatchRules && hasUidExcludeRules) {
+ ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
+ " and RULE_EXCLUDE_UID in mix %zu", mixIndex);
+ return MixMatchStatus::INVALID_MIX;
+ }
+
+ if ((hasUsageExcludeRules && usageExclusionFound)
+ || (hasUidExcludeRules && uidExclusionFound)) {
+ break; // stop iterating on criteria because an exclusion was found (will fail)
+ }
+
+ }//iterate on mix criteria
+
+ // determine if exiting on success (or implicit failure as desc is 0)
+ if (hasAddrMatch ||
+ !((hasUsageExcludeRules && usageExclusionFound) ||
+ (hasUsageMatchRules && !usageMatchFound) ||
+ (hasUidExcludeRules && uidExclusionFound) ||
+ (hasUidMatchRules && !uidMatchFound))) {
+ ALOGV("\tgetOutputForAttr will use mix %zu", mixIndex);
+ return MixMatchStatus::MATCH;
}
- if (desc != 0) {
- desc->mPolicyMix = mix;
- return NO_ERROR;
+
+ } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
+ if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
+ strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+ strncmp(attributes.tags + strlen("addr="),
+ mix->mDeviceAddress.string(),
+ AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+ return MixMatchStatus::MATCH;
}
}
- return BAD_VALUE;
+ return MixMatchStatus::NO_MATCH;
+}
+
+sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
+ const sp<SwAudioOutputDescriptor> &output,
+ const DeviceVector &availableOutputDevices,
+ AudioMix **policyMix)
+{
+ for (size_t i = 0; i < size(); i++) {
+ if (valueAt(i)->getOutput() == output) {
+ AudioMix *mix = valueAt(i)->getMix();
+ if (policyMix != nullptr)
+ *policyMix = mix;
+ // This Desc is involved in a Mix, which has the highest prio
+ audio_devices_t deviceType = mix->mDeviceType;
+ String8 address = mix->mDeviceAddress;
+ ALOGV("%s: device (0x%x, addr=%s) forced by mix",
+ __FUNCTION__, deviceType, address.c_str());
+ return availableOutputDevices.getDevice(deviceType, address, AUDIO_FORMAT_DEFAULT);
+ }
+ }
+ return nullptr;
}
sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
- audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix)
+ audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix) const
{
for (size_t i = 0; i < size(); i++) {
AudioMix *mix = valueAt(i)->getMix();
@@ -296,7 +362,8 @@
// assuming PolicyMix only for remote submix for input
// so mix->mDeviceType can only be AUDIO_DEVICE_OUT_REMOTE_SUBMIX
audio_devices_t device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
- auto mixDevice = availDevices.getDevice(device, mix->mDeviceAddress);
+ auto mixDevice =
+ availDevices.getDevice(device, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
if (mixDevice != nullptr) {
if (policyMix != NULL) {
*policyMix = mix;
@@ -321,7 +388,7 @@
ALOGV("getInputMixForAttr looking for address %s\n mixes available:", address.string());
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
- AudioMix *mix = policyMix->getMix();
+ const AudioMix *mix = policyMix->getMix();
ALOGV("\tmix %zu address=%s", i, mix->mDeviceAddress.string());
}
#endif
@@ -378,7 +445,7 @@
// for each player mix: remove existing rules that match or exclude this uid
for (size_t i = 0; i < size(); i++) {
bool foundUidRule = false;
- AudioMix *mix = valueAt(i)->getMix();
+ const AudioMix *mix = valueAt(i)->getMix();
if (mix->mMixType != MIX_TYPE_PLAYERS) {
continue;
}
@@ -389,11 +456,11 @@
if ((rule == RULE_EXCLUDE_UID || rule == RULE_MATCH_UID)
&& uid == mix->mCriteria[j].mValue.mUid) {
foundUidRule = true;
- criteriaToRemove.push_back(j);
+ criteriaToRemove.insert(criteriaToRemove.begin(), j);
}
}
if (foundUidRule) {
- for (size_t j = criteriaToRemove.size() - 1; j >= 0; j--) {
+ for (size_t j = 0; j < criteriaToRemove.size(); j++) {
mix->mCriteria.removeAt(criteriaToRemove[j]);
}
}
@@ -406,7 +473,7 @@
// for each player mix: find rules that don't exclude this uid, and add the device to the list
for (size_t i = 0; i < size(); i++) {
bool ruleAllowsUid = true;
- AudioMix *mix = valueAt(i)->getMix();
+ const AudioMix *mix = valueAt(i)->getMix();
if (mix->mMixType != MIX_TYPE_PLAYERS) {
continue;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
index 82d64c9..ad07ab1 100644
--- a/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/ClientDescriptor.cpp
@@ -20,6 +20,7 @@
#include <sstream>
#include <utils/Log.h>
#include <utils/String8.h>
+#include <TypeConverter.h>
#include "AudioGain.h"
#include "AudioOutputDescriptor.h"
#include "AudioPatch.h"
@@ -45,6 +46,7 @@
mPortId, mSessionId, mUid);
dst->appendFormat("%*s- Format: %08x Sampling rate: %d Channels: %08x\n", spaces, "",
mConfig.format, mConfig.sample_rate, mConfig.channel_mask);
+ dst->appendFormat("%*s- Attributes: %s\n", spaces, "", toString(mAttributes).c_str());
dst->appendFormat("%*s- Preferred Device Id: %08x\n", spaces, "", mPreferredDeviceId);
dst->appendFormat("%*s- State: %s\n", spaces, "", mActive ? "Active" : "Inactive");
}
@@ -53,6 +55,7 @@
{
ClientDescriptor::dump(dst, spaces, index);
dst->appendFormat("%*s- Stream: %d flags: %08x\n", spaces, "", mStream, mFlags);
+ dst->appendFormat("%*s- Refcount: %d\n", spaces, "", mActivityCount);
}
std::string TrackClientDescriptor::toShortString() const
@@ -82,10 +85,11 @@
SourceClientDescriptor::SourceClientDescriptor(audio_port_handle_t portId, uid_t uid,
audio_attributes_t attributes, const sp<AudioPatch>& patchDesc,
const sp<DeviceDescriptor>& srcDevice, audio_stream_type_t stream,
- routing_strategy strategy) :
+ product_strategy_t strategy, VolumeSource volumeSource) :
TrackClientDescriptor::TrackClientDescriptor(portId, uid, AUDIO_SESSION_NONE, attributes,
AUDIO_CONFIG_BASE_INITIALIZER, AUDIO_PORT_HANDLE_NONE,
- stream, strategy, AUDIO_OUTPUT_FLAG_NONE),
+ stream, strategy, volumeSource, AUDIO_OUTPUT_FLAG_NONE, false,
+ {} /* Sources do not support secondary outputs*/),
mPatchDesc(patchDesc), mSrcDevice(srcDevice)
{
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 01111c5..91961d0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -18,6 +18,8 @@
//#define LOG_NDEBUG 0
#include <audio_utils/string.h>
+#include <media/TypeConverter.h>
+#include <set>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
#include "AudioGain.h"
@@ -37,11 +39,16 @@
AUDIO_PORT_ROLE_SOURCE),
mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
{
+ mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX || type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
mAddress = String8("0");
}
- /* FIXME: read from APM config file */
- if (type == AUDIO_DEVICE_OUT_HDMI) {
+ /* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
+ * FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
+ * For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
+ * something like 'encodedFormats="AUDIO_FORMAT_PCM_16_BIT"' on the HDMI devicePort.
+ */
+ if (type == AUDIO_DEVICE_OUT_HDMI && mEncodedFormats.isEmpty()) {
mEncodedFormats.add(AUDIO_FORMAT_AC3);
mEncodedFormats.add(AUDIO_FORMAT_IEC61937);
}
@@ -58,21 +65,57 @@
mId = getNextUniqueId();
}
-void DeviceDescriptor::detach()
-{
+void DeviceDescriptor::detach() {
mId = AUDIO_PORT_HANDLE_NONE;
AudioPort::detach();
}
+template<typename T>
+bool checkEqual(const T& f1, const T& f2)
+{
+ std::set<typename T::value_type> s1(f1.begin(), f1.end());
+ std::set<typename T::value_type> s2(f2.begin(), f2.end());
+ return s1 == s2;
+}
+
bool DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
{
// Devices are considered equal if they:
// - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
// - have the same address
+ // - have the same encodingFormats (if device supports encoding)
if (other == 0) {
return false;
}
- return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
+
+ return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress) &&
+ checkEqual(mEncodedFormats, other->mEncodedFormats);
+}
+
+bool DeviceDescriptor::hasCurrentEncodedFormat() const
+{
+ if (!device_has_encoding_capability(type())) {
+ return true;
+ }
+ if (mEncodedFormats.isEmpty()) {
+ return true;
+ }
+
+ return (mCurrentEncodedFormat != AUDIO_FORMAT_DEFAULT);
+}
+
+bool DeviceDescriptor::supportsFormat(audio_format_t format)
+{
+ if (mEncodedFormats.isEmpty()) {
+ return true;
+ }
+
+ for (const auto& devFormat : mEncodedFormats) {
+ if (devFormat == format) {
+ return true;
+ }
+ }
+ return false;
}
void DeviceVector::refreshTypes()
@@ -87,7 +130,7 @@
ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
{
for (size_t i = 0; i < size(); i++) {
- if (item->equals(itemAt(i))) {
+ if (itemAt(i)->equals(item)) { // item may be null sp<>, i.e. AUDIO_DEVICE_NONE
return i;
}
}
@@ -167,12 +210,18 @@
return deviceTypes;
}
-sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address) const
+sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address,
+ audio_format_t format) const
{
sp<DeviceDescriptor> device;
for (size_t i = 0; i < size(); i++) {
if (itemAt(i)->type() == type) {
- if (address == "" || itemAt(i)->address() == address) {
+ // If format is specified, match it and ignore address
+ // Otherwise if address is specified match it
+ // Otherwise always match
+ if (((address == "" || itemAt(i)->address() == address) &&
+ format == AUDIO_FORMAT_DEFAULT) ||
+ (itemAt(i)->supportsFormat(format) && format != AUDIO_FORMAT_DEFAULT)) {
device = itemAt(i);
if (itemAt(i)->address() == address) {
break;
@@ -180,8 +229,8 @@
}
}
}
- ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p",
- __func__, type, address.string(), device.get());
+ ALOGV("DeviceVector::%s() for type %08x address \"%s\" found %p format %08x",
+ __func__, type, address.string(), device.get(), format);
return device;
}
@@ -298,10 +347,9 @@
if (!mTagName.isEmpty()) {
dst->appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.string());
}
- std::string deviceLiteral;
- if (deviceToString(mDeviceType, deviceLiteral)) {
- dst->appendFormat("%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
- }
+
+ dst->appendFormat("%*s- type: %-48s\n", spaces, "", ::android::toString(mDeviceType).c_str());
+
if (mAddress.size() != 0) {
dst->appendFormat("%*s- address: %-32s\n", spaces, "", mAddress.string());
}
@@ -353,9 +401,8 @@
void DeviceDescriptor::log() const
{
- std::string device;
- deviceToString(mDeviceType, device);
- ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType, device.c_str(),
+ ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType,
+ ::android::toString(mDeviceType).c_str(),
mAddress.string());
AudioPort::log(" ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 40c49e7..89f9899 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -24,8 +24,9 @@
void EffectDescriptor::dump(String8 *dst, int spaces) const
{
+ dst->appendFormat("%*sID: %d\n", spaces, "", mId);
dst->appendFormat("%*sI/O: %d\n", spaces, "", mIo);
- dst->appendFormat("%*sStrategy: %d\n", spaces, "", mStrategy);
+ dst->appendFormat("%*sMusic Effect: %s\n", spaces, "", isMusicEffect()? "yes" : "no");
dst->appendFormat("%*sSession: %d\n", spaces, "", mSession);
dst->appendFormat("%*sName: %s\n", spaces, "", mDesc.name);
dst->appendFormat("%*s%s\n", spaces, "", mEnabled ? "Enabled" : "Disabled");
@@ -41,9 +42,8 @@
status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
- uint32_t strategy,
int session,
- int id)
+ int id, bool isMusicEffect)
{
if (getEffect(id) != nullptr) {
ALOGW("%s effect %s already registered", __FUNCTION__, desc->name);
@@ -59,18 +59,11 @@
if (mTotalEffectsMemory > mTotalEffectsMemoryMaxUsed) {
mTotalEffectsMemoryMaxUsed = mTotalEffectsMemory;
}
- ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
- desc->name, io, strategy, session, id);
+ ALOGV("registerEffect() effect %s, io %d, session %d id %d",
+ desc->name, io, session, id);
ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
- sp<EffectDescriptor> effectDesc = new EffectDescriptor();
- memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
- effectDesc->mId = id;
- effectDesc->mIo = io;
- effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
- effectDesc->mSession = session;
- effectDesc->mEnabled = false;
-
+ sp<EffectDescriptor> effectDesc = new EffectDescriptor(desc, isMusicEffect, id, io, session);
add(id, effectDesc);
return NO_ERROR;
@@ -161,7 +154,7 @@
{
for (size_t i = 0; i < size(); i++) {
sp<EffectDescriptor> effectDesc = valueAt(i);
- if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
+ if (effectDesc->mEnabled && (effectDesc->isMusicEffect()) &&
((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
effectDesc->mDesc.name, effectDesc->mSession);
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 7d2d094..ec7ff57 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -273,32 +273,34 @@
return nullptr;
}
-sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t device) const
+sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t type,
+ audio_format_t encodedFormat) const
{
for (const auto& module : *this) {
- const auto& profiles = audio_is_output_device(device) ?
+ const auto& profiles = audio_is_output_device(type) ?
module->getOutputProfiles() : module->getInputProfiles();
for (const auto& profile : profiles) {
- if (profile->supportsDeviceTypes(device)) {
- return module;
+ if (profile->supportsDeviceTypes(type)) {
+ if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+ DeviceVector declaredDevices = module->getDeclaredDevices();
+ sp <DeviceDescriptor> deviceDesc =
+ declaredDevices.getDevice(type, String8(), encodedFormat);
+ if (deviceDesc) {
+ return module;
+ }
+ } else {
+ return module;
+ }
}
}
}
return nullptr;
}
-sp <HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device) const
+sp<HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device,
+ audio_format_t encodedFormat) const
{
- for (const auto& module : *this) {
- const auto& profiles = audio_is_output_device(device->type()) ?
- module->getOutputProfiles() : module->getInputProfiles();
- for (const auto& profile : profiles) {
- if (profile->supportsDevice(device)) {
- return module;
- }
- }
- }
- return nullptr;
+ return getModuleForDeviceTypes(device->type(), encodedFormat);
}
DeviceVector HwModuleCollection::getAvailableDevicesFromModuleName(
@@ -314,6 +316,7 @@
sp<DeviceDescriptor> HwModuleCollection::getDeviceDescriptor(const audio_devices_t deviceType,
const char *address,
const char *name,
+ const audio_format_t encodedFormat,
bool allowToCreate,
bool matchAddress) const
{
@@ -325,8 +328,12 @@
for (const auto& hwModule : *this) {
DeviceVector moduleDevices = hwModule->getAllDevices();
- auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress);
+ auto moduleDevice = moduleDevices.getDevice(deviceType, devAddress, encodedFormat);
if (moduleDevice) {
+ if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
+ moduleDevice->setEncodedFormat(encodedFormat);
+ }
+ moduleDevice->setAddress(devAddress);
if (allowToCreate) {
moduleDevice->attach(hwModule);
}
@@ -334,18 +341,19 @@
}
}
if (!allowToCreate) {
- ALOGE("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
+ ALOGV("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
name, deviceType, address);
return nullptr;
}
- return createDevice(deviceType, address, name);
+ return createDevice(deviceType, address, name, encodedFormat);
}
sp<DeviceDescriptor> HwModuleCollection::createDevice(const audio_devices_t type,
const char *address,
- const char *name) const
+ const char *name,
+ const audio_format_t encodedFormat) const
{
- sp<HwModule> hwModule = getModuleForDeviceTypes(type);
+ sp<HwModule> hwModule = getModuleForDeviceTypes(type, encodedFormat);
if (hwModule == 0) {
ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
address);
@@ -354,8 +362,9 @@
sp<DeviceDescriptor> device = new DeviceDescriptor(type, String8(name));
device->setName(String8(name));
device->setAddress(String8(address));
+ device->setEncodedFormat(encodedFormat);
- // Add the device to the list of dynamic devices
+ // Add the device to the list of dynamic devices
hwModule->addDynamicDevice(device);
// Reciprocally attach the device to the module
device->attach(hwModule);
@@ -370,7 +379,8 @@
if (profile->supportsDevice(device, false /*matchAdress*/)) {
// @todo quid of audio profile? import the profile from device of the same type?
- const auto &isoTypeDeviceForProfile = profile->getSupportedDevices().getDevice(type);
+ const auto &isoTypeDeviceForProfile =
+ profile->getSupportedDevices().getDevice(type, String8(), AUDIO_FORMAT_DEFAULT);
device->importAudioPort(isoTypeDeviceForProfile, true /* force */);
ALOGV("%s: adding device %s to profile %s", __FUNCTION__,
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 1154654..81d3968 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -140,6 +140,8 @@
static constexpr const char *roleSource = "source"; /**< <attribute role source value>. */
/** optional: device address, char string less than 64. */
static constexpr const char *address = "address";
+ /** optional: the list of encoded audio formats that are known to be supported. */
+ static constexpr const char *encodedFormats = "encodedFormats";
};
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
@@ -199,25 +201,6 @@
static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
};
-struct VolumeTraits : public AndroidCollectionTraits<VolumeCurve, VolumeCurvesCollection>
-{
- static constexpr const char *tag = "volume";
- static constexpr const char *collectionTag = "volumes";
- static constexpr const char *volumePointTag = "point";
- static constexpr const char *referenceTag = "reference";
-
- struct Attributes
- {
- static constexpr const char *stream = "stream";
- static constexpr const char *deviceCategory = "deviceCategory";
- static constexpr const char *reference = "ref";
- static constexpr const char *referenceName = "name";
- };
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
- // No Children
-};
-
struct SurroundSoundTraits
{
static constexpr const char *tag = "surroundSound";
@@ -511,7 +494,13 @@
ALOGW("%s: bad type %08x", __func__, type);
return Status::fromStatusT(BAD_VALUE);
}
- Element deviceDesc = new DeviceDescriptor(type, String8(name.c_str()));
+ std::string encodedFormatsLiteral = getXmlAttribute(cur, Attributes::encodedFormats);
+ ALOGV("%s: %s %s=%s", __func__, tag, Attributes::encodedFormats, encodedFormatsLiteral.c_str());
+ FormatVector encodedFormats;
+ if (!encodedFormatsLiteral.empty()) {
+ encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
+ }
+ Element deviceDesc = new DeviceDescriptor(type, encodedFormats, String8(name.c_str()));
std::string address = getXmlAttribute(cur, Attributes::address);
if (!address.empty()) {
@@ -695,67 +684,6 @@
return NO_ERROR;
}
-Return<VolumeTraits::Element> VolumeTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
-{
- std::string streamTypeLiteral = getXmlAttribute(cur, Attributes::stream);
- if (streamTypeLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- audio_stream_type_t streamType;
- if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
- ALOGE("%s: Invalid %s", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- std::string deviceCategoryLiteral = getXmlAttribute(cur, Attributes::deviceCategory);
- if (deviceCategoryLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::deviceCategory);
- return Status::fromStatusT(BAD_VALUE);
- }
- device_category deviceCategory;
- if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
- ALOGE("%s: Invalid %s=%s", __func__, Attributes::deviceCategory,
- deviceCategoryLiteral.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
-
- std::string referenceName = getXmlAttribute(cur, Attributes::reference);
- const xmlNode *ref = NULL;
- if (!referenceName.empty()) {
- ref = getReference<VolumeTraits>(cur->parent, referenceName);
- if (ref == NULL) {
- ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
- }
-
- Element volCurve = new VolumeCurve(deviceCategory, streamType);
-
- for (const xmlNode *child = referenceName.empty() ? cur->xmlChildrenNode : ref->xmlChildrenNode;
- child != NULL; child = child->next) {
- if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(volumePointTag))) {
- auto pointDefinition = make_xmlUnique(xmlNodeListGetString(
- child->doc, child->xmlChildrenNode, 1));
- if (pointDefinition == nullptr) {
- return Status::fromStatusT(BAD_VALUE);
- }
- ALOGV("%s: %s=%s",
- __func__, tag, reinterpret_cast<const char*>(pointDefinition.get()));
- std::vector<int32_t> point;
- collectionFromString<DefaultTraits<int32_t>>(
- reinterpret_cast<const char*>(pointDefinition.get()), point, ",");
- if (point.size() != 2) {
- ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
- reinterpret_cast<const char*>(pointDefinition.get()));
- return Status::fromStatusT(BAD_VALUE);
- }
- volCurve->add(CurvePoint(point[0], point[1]));
- }
- }
- return volCurve;
-}
-
status_t SurroundSoundTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
{
config->setDefaultSurroundFormats();
@@ -843,14 +771,6 @@
}
config->setHwModules(modules);
- // deserialize volume section
- VolumeTraits::Collection volumes;
- status = deserializeCollection<VolumeTraits>(root, &volumes, config);
- if (status != NO_ERROR) {
- return status;
- }
- config->setVolumes(volumes);
-
// Global Configuration
GlobalConfigTraits::deserialize(root, config);
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index 6f48eae..7c76d8a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -45,6 +45,7 @@
const RouteFlagTypeConverter::Table RouteFlagTypeConverter::mTable[] = {
MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_RENDER),
MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK),
+ MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER),
MAKE_STRING_FROM_ENUM(MIX_ROUTE_FLAG_ALL),
TERMINATOR
};
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
deleted file mode 100644
index 620f361..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::VolumeCurve"
-//#define LOG_NDEBUG 0
-
-#include "VolumeCurve.h"
-#include "TypeConverter.h"
-
-namespace android {
-
-float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
-{
- ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
-
- size_t nbCurvePoints = mCurvePoints.size();
- // the volume index in the UI is relative to the min and max volume indices for this stream
- int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
- if (indexInUi < volIndexMin) {
- ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
- indexInUi = volIndexMin;
- } else if (indexInUi > volIndexMax) {
- ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
- indexInUi = volIndexMax;
- }
- int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
-
- // Where would this volume index been inserted in the curve point
- size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
- if (indexInUiPosition >= nbCurvePoints) {
- //use last point of table
- return mCurvePoints[nbCurvePoints - 1].mAttenuationInMb / 100.0f;
- }
- if (indexInUiPosition == 0) {
- if (indexInUiPosition != mCurvePoints[0].mIndex) {
- return VOLUME_MIN_DB; // out of bounds
- }
- return mCurvePoints[0].mAttenuationInMb / 100.0f;
- }
- // linear interpolation in the attenuation table in dB
- float decibels = (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f) +
- ((float)(volIdx - mCurvePoints[indexInUiPosition - 1].mIndex)) *
- ( ((mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f) -
- (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f)) /
- ((float)(mCurvePoints[indexInUiPosition].mIndex -
- mCurvePoints[indexInUiPosition - 1].mIndex)) );
-
- ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
- mDeviceCategory, mStreamType,
- mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
- mCurvePoints[indexInUiPosition].mIndex,
- ((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
- ((float)mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f));
-
- return decibels;
-}
-
-void VolumeCurve::dump(String8 *dst) const
-{
- dst->append(" {");
- for (size_t i = 0; i < mCurvePoints.size(); i++) {
- dst->appendFormat("(%3d, %5d)",
- mCurvePoints[i].mIndex, mCurvePoints[i].mAttenuationInMb);
- dst->append(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
- }
-}
-
-void VolumeCurvesForStream::dump(String8 *dst, int spaces = 0, bool curvePoints) const
-{
- if (!curvePoints) {
- dst->appendFormat("%s %02d %02d ",
- mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
- for (size_t i = 0; i < mIndexCur.size(); i++) {
- dst->appendFormat("%04x : %02d, ", mIndexCur.keyAt(i), mIndexCur.valueAt(i));
- }
- dst->append("\n");
- return;
- }
-
- for (size_t i = 0; i < size(); i++) {
- std::string deviceCatLiteral;
- DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
- dst->appendFormat("%*s %s :",
- spaces, "", deviceCatLiteral.c_str());
- valueAt(i)->dump(dst);
- }
- dst->append("\n");
-}
-
-void VolumeCurvesCollection::dump(String8 *dst) const
-{
- dst->append("\nStreams dump:\n");
- dst->append(
- " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat(" %02zu ", i);
- valueAt(i).dump(dst);
- }
- dst->append("\nVolume Curves for Use Cases (aka Stream types) dump:\n");
- for (size_t i = 0; i < size(); i++) {
- std::string streamTypeLiteral;
- StreamTypeConverter::toString(keyAt(i), streamTypeLiteral);
- dst->appendFormat(
- " %s (%02zu): Curve points for device category (index, attenuation in millibel)\n",
- streamTypeLiteral.c_str(), i);
- valueAt(i).dump(dst, 2, true);
- }
-}
-
-} // namespace android
diff --git a/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
new file mode 100644
index 0000000..57bd4f8
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Input Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="a2dp input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="a2dp input"
+ sources="BT A2DP In"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 42c52de..b28381b 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!-- Copyright (C) 2015 The Android Open Source Project
+<!-- Copyright (C) 2019 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -173,8 +173,8 @@
</module>
- <!-- A2dp Audio HAL -->
- <xi:include href="a2dp_audio_policy_configuration.xml"/>
+ <!-- A2dp Input Audio HAL -->
+ <xi:include href="a2dp_in_audio_policy_configuration.xml"/>
<!-- Usb Audio HAL -->
<xi:include href="usb_audio_policy_configuration.xml"/>
@@ -182,8 +182,8 @@
<!-- Remote Submix Audio HAL -->
<xi:include href="r_submix_audio_policy_configuration.xml"/>
- <!-- Hearing aid Audio HAL -->
- <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+ <!-- Bluetooth Audio HAL -->
+ <xi:include href="bluetooth_audio_policy_configuration.xml"/>
<!-- MSD Audio HAL (optional) -->
<xi:include href="msd_audio_policy_configuration.xml"/>
@@ -191,7 +191,11 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
new file mode 100644
index 0000000..b4cc1d3
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+
+ <!-- Modules section:
+ There is one section per audio HW module present on the platform.
+ Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+ The module names are the same as in current .conf file:
+ “primary”, “A2DP”, “remote_submix”, “USB”
+ Each module will contain the following sections:
+ “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+ module.
+ This contains both permanently attached devices and removable devices.
+ “mixPorts”: listing all output and input streams exposed by the audio HAL
+ “routes”: list of possible connections between input and output devices or between stream and
+ devices.
+ "route": is defined by an attribute:
+ -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+ -"sink": the sink involved in this route
+ -"sources": all the sources than can be connected to the sink via vis route
+ “attachedDevices”: permanently attached devices.
+ The attachedDevices section is a list of devices names. The names correspond to device names
+ defined in <devicePorts> section.
+ “defaultOutputDevice”: device to be used by default when no policy rule applies
+ -->
+ <modules>
+ <!-- Primary Audio HAL -->
+ <module name="primary" halVersion="3.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ <item>Built-In Back Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="deep_buffer" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="compressed_offload" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_MP3"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC_LC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="voice_tx" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </mixPort>
+ <mixPort name="voice_rx" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+ <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <gains>
+ <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+ minValueMB="-8400"
+ maxValueMB="4000"
+ defaultValueMB="0"
+ stepValueMB="100"/>
+ </gains>
+ </devicePort>
+ <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ </devicePorts>
+ <!-- route declaration, i.e. list all available sources for a given sink -->
+ <routes>
+ <route type="mix" sink="Earpiece"
+ sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+ <route type="mix" sink="Speaker"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headset"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headphones"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+ <route type="mix" sink="Telephony Tx"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
+ <route type="mix" sink="voice_rx"
+ sources="Telephony Rx"/>
+ </routes>
+
+ </module>
+
+ <!-- A2dp Audio HAL -->
+ <xi:include href="a2dp_audio_policy_configuration.xml"/>
+
+ <!-- Usb Audio HAL -->
+ <xi:include href="usb_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ <!-- Hearing aid Audio HAL -->
+ <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 58768c3..9ad609d 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -30,11 +30,21 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
<!-- End of Volume section -->
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
new file mode 100644
index 0000000..5f1ca31
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_generic_tv.xml
@@ -0,0 +1,49 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="false"/>
+
+ <modules>
+ <!-- Primary Audio HAL -->
+ <xi:include href="primary_audio_policy_configuration_tv.xml"/>
+
+ <!-- Usb Audio HAL -->
+ <xi:include href="usb_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_stub.xml b/services/audiopolicy/config/audio_policy_configuration_stub.xml
index 26c381f..8350eb8 100644
--- a/services/audiopolicy/config/audio_policy_configuration_stub.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_stub.xml
@@ -15,6 +15,9 @@
-->
<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="false"/>
+
<modules>
<!-- Stub Audio HAL -->
<xi:include href="stub_audio_policy_configuration.xml"/>
@@ -26,5 +29,6 @@
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
new file mode 100644
index 0000000..ce78eb0
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000,16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
index 3c48e88..e6e6bdb 100644
--- a/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/hearing_aid_audio_policy_configuration.xml
@@ -2,7 +2,7 @@
<!-- Hearing aid Audio HAL Audio Policy Configuration file -->
<module name="hearing_aid" halVersion="2.0">
<mixPorts>
- <mixPort name="hearing aid output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <mixPort name="hearing aid output" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="24000,16000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration.xml b/services/audiopolicy/config/primary_audio_policy_configuration.xml
index 5b7ae7f..eedc96b 100644
--- a/services/audiopolicy/config/primary_audio_policy_configuration.xml
+++ b/services/audiopolicy/config/primary_audio_policy_configuration.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8"?>
-<!-- Default Primary Audio HAL Module Audio Policy Configuration include flie -->
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file -->
<module name="primary" halVersion="2.0">
<attachedDevices>
<item>Speaker</item>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
new file mode 100644
index 0000000..826015a
--- /dev/null
+++ b/services/audiopolicy/config/primary_audio_policy_configuration_tv.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file for TV -->
+<module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT" />
+ <mixPort name="tunnel" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC" />
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+ <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink"
+ encodedFormats="AUDIO_FORMAT_AC3 AUDIO_FORMAT_IEC61937" />
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker" sources="primary output"/>
+ <route type="mix" sink="Out Aux Digital" sources="primary output,direct,tunnel"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engine/Android.mk b/services/audiopolicy/engine/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/engine/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
new file mode 100644
index 0000000..e6ede07
--- /dev/null
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -0,0 +1,19 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_headers {
+ name: "libaudiopolicyengine_common_headers",
+ host_supported: true,
+ export_include_dirs: ["include"],
+}
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
new file mode 100644
index 0000000..6ff8512
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <EngineConfig.h>
+#include <AudioPolicyManagerInterface.h>
+#include <ProductStrategy.h>
+#include <VolumeGroup.h>
+
+namespace android {
+namespace audio_policy {
+
+class EngineBase : public AudioPolicyManagerInterface
+{
+public:
+ ///
+ /// from AudioPolicyManagerInterface
+ ///
+ android::status_t initCheck() override;
+
+ void setObserver(AudioPolicyManagerObserver *observer) override;
+
+ status_t setPhoneState(audio_mode_t mode) override;
+
+ audio_mode_t getPhoneState() const override { return mPhoneState; }
+
+ status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) override
+ {
+ mForceUse[usage] = config;
+ return NO_ERROR;
+ }
+
+ audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const override
+ {
+ return mForceUse[usage];
+ }
+ android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
+ audio_policy_dev_state_t /*state*/) override
+ {
+ return NO_ERROR;
+ }
+ product_strategy_t getProductStrategyForAttributes(
+ const audio_attributes_t &attr) const override;
+
+ audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const override;
+
+ audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const override;
+
+ StreamTypeVector getStreamTypesForProductStrategy(product_strategy_t ps) const override;
+
+ AttributesVector getAllAttributesForProductStrategy(product_strategy_t ps) const override;
+
+ StrategyVector getOrderedProductStrategies() const override;
+
+ status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const override;
+
+ VolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const override;
+
+ VolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const override;
+
+ IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const override
+ {
+ return mVolumeGroups.find(group) != end(mVolumeGroups) ?
+ mVolumeGroups.at(group)->getVolumeCurves() : nullptr;
+ }
+
+ VolumeGroupVector getVolumeGroups() const override;
+
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const override;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const override;
+
+ StreamTypeVector getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const override;
+
+ AttributesVector getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const override;
+
+ status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) const override;
+
+ void dump(String8 *dst) const override;
+
+
+ engineConfig::ParsingResult loadAudioPolicyEngineConfig();
+
+ const ProductStrategyMap &getProductStrategies() const { return mProductStrategies; }
+
+ ProductStrategyMap &getProductStrategies() { return mProductStrategies; }
+
+ product_strategy_t getProductStrategyForStream(audio_stream_type_t stream) const;
+
+ product_strategy_t getProductStrategyByName(const std::string &name) const;
+
+ AudioPolicyManagerObserver *getApmObserver() const { return mApmObserver; }
+
+ inline bool isInCall() const
+ {
+ return is_state_in_call(getPhoneState());
+ }
+
+ VolumeSource toVolumeSource(audio_stream_type_t stream) const
+ {
+ return static_cast<VolumeSource>(stream);
+ }
+
+ status_t switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
+
+ status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+
+ private:
+ AudioPolicyManagerObserver *mApmObserver = nullptr;
+
+ ProductStrategyMap mProductStrategies;
+ VolumeGroupMap mVolumeGroups;
+ audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
+
+ /** current forced use configuration. */
+ audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT] = {};
+};
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
new file mode 100644
index 0000000..767a8ed
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "VolumeGroup.h"
+
+#include <system/audio.h>
+#include <AudioPolicyManagerInterface.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+
+namespace android {
+
+/**
+ * @brief The ProductStrategy class describes for each product_strategy_t identifier the
+ * associated audio attributes, the device types to use, the device address to use.
+ * The identifier is voluntarily not strongly typed in order to be extensible by OEM.
+ */
+class ProductStrategy : public virtual RefBase, private HandleGenerator<uint32_t>
+{
+private:
+ struct AudioAttributes {
+ audio_stream_type_t mStream = AUDIO_STREAM_DEFAULT;
+ volume_group_t mVolumeGroup = VOLUME_GROUP_NONE;
+ audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ };
+
+ using AudioAttributesVector = std::vector<AudioAttributes>;
+
+public:
+ ProductStrategy(const std::string &name);
+
+ void addAttributes(const AudioAttributes &audioAttributes);
+
+ std::vector<android::AudioAttributes> listAudioAttributes() const;
+
+ std::string getName() const { return mName; }
+ AttributesVector getAudioAttributes() const;
+ product_strategy_t getId() const { return mId; }
+ StreamTypeVector getSupportedStreams() const;
+
+ /**
+ * @brief matches checks if the given audio attributes shall follow the strategy.
+ * Order of the attributes within a strategy matters.
+ * If only the usage is available, the check is performed on the usages of the given
+ * attributes, otherwise all fields must match.
+ * @param attributes to consider
+ * @return true if attributes matches with the strategy, false otherwise.
+ */
+ bool matches(const audio_attributes_t attributes) const;
+
+ bool supportStreamType(const audio_stream_type_t &streamType) const;
+
+ void setDeviceAddress(const std::string &address)
+ {
+ mDeviceAddress = address;
+ }
+
+ std::string getDeviceAddress() const { return mDeviceAddress; }
+
+ void setDeviceTypes(audio_devices_t devices)
+ {
+ mApplicableDevices = devices;
+ }
+
+ audio_devices_t getDeviceTypes() const { return mApplicableDevices; }
+
+ audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
+ audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
+ volume_group_t getDefaultVolumeGroup() const;
+
+ bool isDefault() const;
+
+ void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ std::string mName;
+
+ AudioAttributesVector mAttributesVector;
+
+ product_strategy_t mId;
+
+ std::string mDeviceAddress; /**< Device address applicable for this strategy, maybe empty */
+
+ /**
+ * Applicable device(s) type mask for this strategy.
+ */
+ audio_devices_t mApplicableDevices = AUDIO_DEVICE_NONE;
+};
+
+class ProductStrategyMap : public std::map<product_strategy_t, sp<ProductStrategy> >
+{
+public:
+ /**
+ * @brief initialize: set default product strategy in cache.
+ */
+ void initialize();
+ /**
+ * @brief getProductStrategyForAttribute. The order of the vector is dimensionning.
+ * @param attr
+ * @return applicable product strategy for the given attribute, default if none applicable.
+ */
+ product_strategy_t getProductStrategyForAttributes(const audio_attributes_t &attr) const;
+
+ product_strategy_t getProductStrategyForStream(audio_stream_type_t stream) const;
+
+ audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
+
+ audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+
+ /**
+ * @brief getAttributesForProductStrategy can be called from
+ * AudioManager: in this case, the product strategy IS the former routing strategy
+ * CarAudioManager: in this case, the product strategy IS the car usage
+ * [getAudioAttributesForCarUsage]
+ * OemExtension: in this case, the product strategy IS the Oem usage
+ *
+ * @param strategy
+ * @return audio attributes (or at least one of the attributes) following the given strategy.
+ */
+ audio_attributes_t getAttributesForProductStrategy(product_strategy_t strategy) const;
+
+ audio_devices_t getDeviceTypesForProductStrategy(product_strategy_t strategy) const;
+
+ std::string getDeviceAddressForProductStrategy(product_strategy_t strategy) const;
+
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
+ product_strategy_t getDefault() const;
+
+ void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
new file mode 100644
index 0000000..54314e3
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "IVolumeCurves.h"
+#include <policy.h>
+#include <AudioPolicyManagerInterface.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+#include <string>
+#include <map>
+#include <utility>
+
+namespace android {
+
+struct CurvePoint
+{
+ CurvePoint() {}
+ CurvePoint(int index, int attenuationInMb) :
+ mIndex(index), mAttenuationInMb(attenuationInMb) {}
+ uint32_t mIndex;
+ int mAttenuationInMb;
+};
+
+inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
+{
+ return lhs.mIndex < rhs.mIndex;
+}
+
+// A volume curve for a given use case and device category
+// It contains of list of points of this curve expressing the attenuation in Millibels for
+// a given volume index from 0 to 100
+class VolumeCurve : public RefBase
+{
+public:
+ VolumeCurve(device_category device) : mDeviceCategory(device) {}
+
+ void add(const CurvePoint &point) { mCurvePoints.add(point); }
+
+ float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const;
+
+ device_category getDeviceCategory() const { return mDeviceCategory; }
+
+private:
+ const device_category mDeviceCategory;
+ SortedVector<CurvePoint> mCurvePoints;
+};
+
+// Volume Curves for a given use case indexed by device category
+class VolumeCurves : public KeyedVector<device_category, sp<VolumeCurve> >,
+ public IVolumeCurves
+{
+public:
+ VolumeCurves(int indexMin = 0, int indexMax = 100) :
+ mIndexMin(indexMin), mIndexMax(indexMax)
+ {
+ addCurrentVolumeIndex(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+ }
+ status_t initVolume(int indexMin, int indexMax) override
+ {
+ mIndexMin = indexMin;
+ mIndexMax = indexMax;
+ return NO_ERROR;
+ }
+
+ sp<VolumeCurve> getCurvesFor(device_category device) const
+ {
+ if (indexOfKey(device) < 0) {
+ return 0;
+ }
+ return valueFor(device);
+ }
+
+ virtual int getVolumeIndex(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
+ if (mIndexCur.find(device) == end(mIndexCur)) {
+ device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
+ }
+ return mIndexCur.at(device);
+ }
+
+ virtual bool canBeMuted() const { return mCanBeMuted; }
+ virtual void clearCurrentVolumeIndex() { mIndexCur.clear(); }
+ void addCurrentVolumeIndex(audio_devices_t device, int index) override
+ {
+ mIndexCur[device] = index;
+ }
+
+ int getVolumeIndexMin() const { return mIndexMin; }
+
+ int getVolumeIndexMax() const { return mIndexMax; }
+
+ bool hasVolumeIndexForDevice(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ return mIndexCur.find(device) != end(mIndexCur);
+ }
+
+ status_t switchCurvesFrom(const VolumeCurves &referenceCurves)
+ {
+ if (size() != referenceCurves.size()) {
+ ALOGE("%s! device category not aligned, cannot switch", __FUNCTION__);
+ return BAD_TYPE;
+ }
+ for (size_t index = 0; index < size(); index++) {
+ device_category cat = keyAt(index);
+ setVolumeCurve(cat, referenceCurves.getOriginVolumeCurve(cat));
+ }
+ return NO_ERROR;
+ }
+ status_t restoreOriginVolumeCurve()
+ {
+ return switchCurvesFrom(*this);
+ }
+
+ const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
+ {
+ ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
+ return mOriginVolumeCurves.valueFor(deviceCategory);
+ }
+ void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
+ {
+ ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
+ replaceValueFor(deviceCategory, volumeCurve);
+ }
+
+ ssize_t add(const sp<VolumeCurve> &volumeCurve)
+ {
+ device_category deviceCategory = volumeCurve->getDeviceCategory();
+ ssize_t index = indexOfKey(deviceCategory);
+ if (index < 0) {
+ // Keep track of original Volume Curves per device category in order to switch curves.
+ mOriginVolumeCurves.add(deviceCategory, volumeCurve);
+ return KeyedVector::add(deviceCategory, volumeCurve);
+ }
+ return index;
+ }
+
+ virtual float volIndexToDb(device_category deviceCat, int indexInUi) const
+ {
+ sp<VolumeCurve> vc = getCurvesFor(deviceCat);
+ if (vc != 0) {
+ return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+ } else {
+ ALOGE("Invalid device category %d for Volume Curve", deviceCat);
+ return 0.0f;
+ }
+ }
+ void addAttributes(const audio_attributes_t &attr)
+ {
+ mAttributes.push_back(attr);
+ }
+ AttributesVector getAttributes() const override { return mAttributes; }
+ void addStreamType(audio_stream_type_t stream)
+ {
+ mStreams.push_back(stream);
+ }
+ StreamTypeVector getStreamTypes() const override { return mStreams; }
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const override;
+
+private:
+ KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
+ std::map<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+ int mIndexMin; /**< min volume index. */
+ int mIndexMax; /**< max volume index. */
+ const bool mCanBeMuted = true; /**< true is the stream can be muted. */
+
+ AttributesVector mAttributes;
+ StreamTypeVector mStreams; /**< Keep it for legacy. */
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
new file mode 100644
index 0000000..c34b406
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioPolicyManagerInterface.h>
+#include <VolumeCurve.h>
+#include <system/audio.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <utils/Errors.h>
+
+namespace android {
+
+class VolumeGroup : public virtual RefBase, private HandleGenerator<uint32_t>
+{
+public:
+ VolumeGroup(const std::string &name, int indexMin, int indexMax);
+ std::string getName() const { return mName; }
+ volume_group_t getId() const { return mId; }
+
+ void add(const sp<VolumeCurve> &curve);
+
+ VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
+
+ void addSupportedAttributes(const audio_attributes_t &attr);
+ AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+
+ void addSupportedStream(audio_stream_type_t stream);
+ StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
+
+ void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ const std::string mName;
+ const volume_group_t mId;
+ VolumeCurves mGroupVolumeCurves;
+};
+
+class VolumeGroupMap : public std::map<volume_group_t, sp<VolumeGroup> >
+{
+public:
+ void dump(String8 *dst, int spaces = 0) const;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
new file mode 100644
index 0000000..4fe7b42
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Base"
+//#define LOG_NDEBUG 0
+
+#include "EngineBase.h"
+#include "EngineDefaultConfig.h"
+#include <TypeConverter.h>
+
+namespace android {
+namespace audio_policy {
+
+void EngineBase::setObserver(AudioPolicyManagerObserver *observer)
+{
+ ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
+ mApmObserver = observer;
+}
+
+status_t EngineBase::initCheck()
+{
+ return (mApmObserver != nullptr)? NO_ERROR : NO_INIT;
+}
+
+status_t EngineBase::setPhoneState(audio_mode_t state)
+{
+ ALOGV("setPhoneState() state %d", state);
+
+ if (state < 0 || state >= AUDIO_MODE_CNT) {
+ ALOGW("setPhoneState() invalid state %d", state);
+ return BAD_VALUE;
+ }
+
+ if (state == mPhoneState ) {
+ ALOGW("setPhoneState() setting same state %d", state);
+ return BAD_VALUE;
+ }
+
+ // store previous phone state for management of sonification strategy below
+ int oldState = mPhoneState;
+ mPhoneState = state;
+
+ if (!is_state_in_call(oldState) && is_state_in_call(state)) {
+ ALOGV(" Entering call in setPhoneState()");
+ switchVolumeCurve(AUDIO_STREAM_VOICE_CALL, AUDIO_STREAM_DTMF);
+ } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
+ ALOGV(" Exiting call in setPhoneState()");
+ restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
+ }
+ return NO_ERROR;
+}
+
+product_strategy_t EngineBase::getProductStrategyForAttributes(const audio_attributes_t &attr) const
+{
+ return mProductStrategies.getProductStrategyForAttributes(attr);
+}
+
+audio_stream_type_t EngineBase::getStreamTypeForAttributes(const audio_attributes_t &attr) const
+{
+ return mProductStrategies.getStreamTypeForAttributes(attr);
+}
+
+audio_attributes_t EngineBase::getAttributesForStreamType(audio_stream_type_t stream) const
+{
+ return mProductStrategies.getAttributesForStreamType(stream);
+}
+
+product_strategy_t EngineBase::getProductStrategyForStream(audio_stream_type_t stream) const
+{
+ return mProductStrategies.getProductStrategyForStream(stream);
+}
+
+product_strategy_t EngineBase::getProductStrategyByName(const std::string &name) const
+{
+ for (const auto &iter : mProductStrategies) {
+ if (iter.second->getName() == name) {
+ return iter.second->getId();
+ }
+ }
+ return PRODUCT_STRATEGY_NONE;
+}
+
+engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
+{
+ auto loadProductStrategies =
+ [](auto& strategyConfigs, auto& productStrategies, auto& volumeGroups) {
+ for (auto& strategyConfig : strategyConfigs) {
+ sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
+ for (const auto &group : strategyConfig.attributesGroups) {
+ const auto &iter = std::find_if(begin(volumeGroups), end(volumeGroups),
+ [&group](const auto &volumeGroup) {
+ return group.volumeGroup == volumeGroup.second->getName(); });
+ ALOG_ASSERT(iter != end(volumeGroups), "Invalid Volume Group Name %s",
+ group.volumeGroup.c_str());
+ if (group.stream != AUDIO_STREAM_DEFAULT) {
+ iter->second->addSupportedStream(group.stream);
+ }
+ for (const auto &attr : group.attributesVect) {
+ strategy->addAttributes({group.stream, iter->second->getId(), attr});
+ iter->second->addSupportedAttributes(attr);
+ }
+ }
+ product_strategy_t strategyId = strategy->getId();
+ productStrategies[strategyId] = strategy;
+ }
+ };
+ auto loadVolumeGroups = [](auto &volumeConfigs, auto &volumeGroups) {
+ for (auto &volumeConfig : volumeConfigs) {
+ sp<VolumeGroup> volumeGroup = new VolumeGroup(volumeConfig.name, volumeConfig.indexMin,
+ volumeConfig.indexMax);
+ volumeGroups[volumeGroup->getId()] = volumeGroup;
+
+ for (auto &configCurve : volumeConfig.volumeCurves) {
+ device_category deviceCat = DEVICE_CATEGORY_SPEAKER;
+ if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory, deviceCat)) {
+ ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
+ continue;
+ }
+ sp<VolumeCurve> curve = new VolumeCurve(deviceCat);
+ for (auto &point : configCurve.curvePoints) {
+ curve->add({point.index, point.attenuationInMb});
+ }
+ volumeGroup->add(curve);
+ }
+ }
+ };
+ auto result = engineConfig::parse();
+ if (result.parsedConfig == nullptr) {
+ ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
+ engineConfig::Config config = gDefaultEngineConfig;
+ android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
+ result = {std::make_unique<engineConfig::Config>(config),
+ static_cast<size_t>(ret == NO_ERROR ? 0 : 1)};
+ }
+ ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
+ loadVolumeGroups(result.parsedConfig->volumeGroups, mVolumeGroups);
+ loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies,
+ mVolumeGroups);
+ mProductStrategies.initialize();
+ return result;
+}
+
+StrategyVector EngineBase::getOrderedProductStrategies() const
+{
+ auto findByFlag = [](const auto &productStrategies, auto flag) {
+ return std::find_if(begin(productStrategies), end(productStrategies),
+ [&](const auto &strategy) {
+ for (const auto &attributes : strategy.second->getAudioAttributes()) {
+ if ((attributes.flags & flag) == flag) {
+ return true;
+ }
+ }
+ return false;
+ });
+ };
+ auto strategies = mProductStrategies;
+ auto enforcedAudibleStrategyIter = findByFlag(strategies, AUDIO_FLAG_AUDIBILITY_ENFORCED);
+
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED &&
+ enforcedAudibleStrategyIter != strategies.end()) {
+ auto enforcedAudibleStrategy = *enforcedAudibleStrategyIter;
+ strategies.erase(enforcedAudibleStrategyIter);
+ strategies.insert(begin(strategies), enforcedAudibleStrategy);
+ }
+ StrategyVector orderedStrategies;
+ for (const auto &iter : strategies) {
+ orderedStrategies.push_back(iter.second->getId());
+ }
+ return orderedStrategies;
+}
+
+StreamTypeVector EngineBase::getStreamTypesForProductStrategy(product_strategy_t ps) const
+{
+ // @TODO default music stream to control volume if no group?
+ return (mProductStrategies.find(ps) != end(mProductStrategies)) ?
+ mProductStrategies.at(ps)->getSupportedStreams() :
+ StreamTypeVector(AUDIO_STREAM_MUSIC);
+}
+
+AttributesVector EngineBase::getAllAttributesForProductStrategy(product_strategy_t ps) const
+{
+ return (mProductStrategies.find(ps) != end(mProductStrategies)) ?
+ mProductStrategies.at(ps)->getAudioAttributes() : AttributesVector();
+}
+
+status_t EngineBase::listAudioProductStrategies(AudioProductStrategyVector &strategies) const
+{
+ for (const auto &iter : mProductStrategies) {
+ const auto &productStrategy = iter.second;
+ strategies.push_back(
+ {productStrategy->getName(), productStrategy->listAudioAttributes(),
+ productStrategy->getId()});
+ }
+ return NO_ERROR;
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForAttributes(const audio_attributes_t &attr) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForAttributes(attr);
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s", toString(attr).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForStreamType(audio_stream_type_t stream) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForStreamType(stream);
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s",
+ toString(stream).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+status_t EngineBase::switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+{
+ auto srcCurves = getVolumeCurvesForStreamType(streamSrc);
+ auto dstCurves = getVolumeCurvesForStreamType(streamDst);
+
+ if (srcCurves == nullptr || dstCurves == nullptr) {
+ return BAD_VALUE;
+ }
+ return dstCurves->switchCurvesFrom(*srcCurves);
+}
+
+status_t EngineBase::restoreOriginVolumeCurve(audio_stream_type_t stream)
+{
+ VolumeCurves *curves = getVolumeCurvesForStreamType(stream);
+ return curves != nullptr ? curves->switchCurvesFrom(*curves) : BAD_VALUE;
+}
+
+VolumeGroupVector EngineBase::getVolumeGroups() const
+{
+ VolumeGroupVector group;
+ for (const auto &iter : mVolumeGroups) {
+ group.push_back(iter.first);
+ }
+ return group;
+}
+
+volume_group_t EngineBase::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ return mProductStrategies.getVolumeGroupForAttributes(attr);
+}
+
+volume_group_t EngineBase::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ return mProductStrategies.getVolumeGroupForStreamType(stream);
+}
+
+StreamTypeVector EngineBase::getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const
+{
+ // @TODO default music stream to control volume if no group?
+ return (mVolumeGroups.find(volumeGroup) != end(mVolumeGroups)) ?
+ mVolumeGroups.at(volumeGroup)->getStreamTypes() :
+ StreamTypeVector(AUDIO_STREAM_MUSIC);
+}
+
+AttributesVector EngineBase::getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const
+{
+ return (mVolumeGroups.find(volumeGroup) != end(mVolumeGroups)) ?
+ mVolumeGroups.at(volumeGroup)->getSupportedAttributes() : AttributesVector();
+}
+
+status_t EngineBase::listAudioVolumeGroups(AudioVolumeGroupVector &groups) const
+{
+ for (const auto &iter : mVolumeGroups) {
+ groups.push_back({iter.second->getName(), iter.second->getId(),
+ iter.second->getSupportedAttributes(), iter.second->getStreamTypes()});
+ }
+ return NO_ERROR;
+}
+
+void EngineBase::dump(String8 *dst) const
+{
+ mProductStrategies.dump(dst, 2);
+ mVolumeGroups.dump(dst, 2);
+}
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
new file mode 100644
index 0000000..fede0d9
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+namespace android {
+/**
+ * @brief AudioProductStrategies hard coded array of strategies to fill new engine API contract.
+ */
+const engineConfig::ProductStrategies gOrderedStrategies = {
+ {"STRATEGY_PHONE",
+ {
+ {"phone", AUDIO_STREAM_VOICE_CALL, "AUDIO_STREAM_VOICE_CALL",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT, 0,
+ ""}},
+ },
+ {"sco", AUDIO_STREAM_BLUETOOTH_SCO, "AUDIO_STREAM_BLUETOOTH_SCO",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_SCO,
+ ""}},
+ }
+ },
+ },
+ {"STRATEGY_SONIFICATION",
+ {
+ {"ring", AUDIO_STREAM_RING, "AUDIO_STREAM_RING",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+ AUDIO_SOURCE_DEFAULT, 0, ""}}
+ },
+ {"alarm", AUDIO_STREAM_ALARM, "AUDIO_STREAM_ALARM",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, 0, ""}},
+ }
+ },
+ },
+ {"STRATEGY_ENFORCED_AUDIBLE",
+ {
+ {"", AUDIO_STREAM_ENFORCED_AUDIBLE, "AUDIO_STREAM_ENFORCED_AUDIBLE",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_AUDIBILITY_ENFORCED, ""}}
+ }
+ },
+ },
+ {"STRATEGY_ACCESSIBILITY",
+ {
+ {"", AUDIO_STREAM_ACCESSIBILITY, "AUDIO_STREAM_ACCESSIBILITY",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AUDIO_SOURCE_DEFAULT, 0, ""}}
+ }
+ },
+ },
+ {"STRATEGY_SONIFICATION_RESPECTFUL",
+ {
+ {"", AUDIO_STREAM_NOTIFICATION, "AUDIO_STREAM_NOTIFICATION",
+ {
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
+ AUDIO_SOURCE_DEFAULT, 0, ""}
+ }
+ }
+ },
+ },
+ {"STRATEGY_MEDIA",
+ {
+ {"music", AUDIO_STREAM_MUSIC, "AUDIO_STREAM_MUSIC",
+ {
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANT, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}
+ },
+ },
+ {"system", AUDIO_STREAM_SYSTEM, "AUDIO_STREAM_SYSTEM",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AUDIO_SOURCE_DEFAULT, 0, ""}}
+ }
+ },
+ },
+ {"STRATEGY_DTMF",
+ {
+ {"", AUDIO_STREAM_DTMF, "AUDIO_STREAM_DTMF",
+ {
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AUDIO_SOURCE_DEFAULT, 0, ""}
+ }
+ }
+ },
+ },
+ {"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
+ {
+ {"", AUDIO_STREAM_TTS, "AUDIO_STREAM_TTS",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_BEACON, ""}}
+ }
+ },
+ },
+ {"STRATEGY_REROUTING",
+ {
+ {"", AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+ }
+ },
+ },
+ {"STRATEGY_PATCH",
+ {
+ {"", AUDIO_STREAM_PATCH, "AUDIO_STREAM_PATCH",
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+ }
+ },
+ }
+};
+
+const engineConfig::Config gDefaultEngineConfig = {
+ 1.0,
+ gOrderedStrategies,
+ {},
+ {},
+ {}
+};
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
new file mode 100644
index 0000000..16e6690
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/ProductStrategy"
+//#define LOG_NDEBUG 0
+
+#include "ProductStrategy.h"
+
+#include <media/TypeConverter.h>
+#include <utils/String8.h>
+#include <cstdint>
+#include <string>
+
+#include <log/log.h>
+
+
+namespace android {
+
+ProductStrategy::ProductStrategy(const std::string &name) :
+ mName(name),
+ mId(static_cast<product_strategy_t>(HandleGenerator<uint32_t>::getNextHandle()))
+{
+}
+
+void ProductStrategy::addAttributes(const AudioAttributes &audioAttributes)
+{
+ mAttributesVector.push_back(audioAttributes);
+}
+
+std::vector<android::AudioAttributes> ProductStrategy::listAudioAttributes() const
+{
+ std::vector<android::AudioAttributes> androidAa;
+ for (const auto &attr : mAttributesVector) {
+ androidAa.push_back({attr.mVolumeGroup, attr.mStream, attr.mAttributes});
+ }
+ return androidAa;
+}
+
+AttributesVector ProductStrategy::getAudioAttributes() const
+{
+ AttributesVector attrVector;
+ for (const auto &attrGroup : mAttributesVector) {
+ attrVector.push_back(attrGroup.mAttributes);
+ }
+ if (not attrVector.empty()) {
+ return attrVector;
+ }
+ return { AUDIO_ATTRIBUTES_INITIALIZER };
+}
+
+bool ProductStrategy::matches(const audio_attributes_t attr) const
+{
+ return std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [&attr](const auto &supportedAttr) {
+ return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr);
+ }) != end(mAttributesVector);
+}
+
+audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(
+ const audio_attributes_t &attr) const
+{
+ const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [&attr](const auto &supportedAttr) {
+ return AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr); });
+ return iter != end(mAttributesVector) ? iter->mStream : AUDIO_STREAM_DEFAULT;
+}
+
+audio_attributes_t ProductStrategy::getAttributesForStreamType(audio_stream_type_t streamType) const
+{
+ const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [&streamType](const auto &supportedAttr) {
+ return supportedAttr.mStream == streamType; });
+ return iter != end(mAttributesVector) ? iter->mAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+}
+
+bool ProductStrategy::isDefault() const
+{
+ return std::find_if(begin(mAttributesVector), end(mAttributesVector), [](const auto &attr) {
+ return attr.mAttributes == defaultAttr; }) != end(mAttributesVector);
+}
+
+StreamTypeVector ProductStrategy::getSupportedStreams() const
+{
+ StreamTypeVector streams;
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (std::find(begin(streams), end(streams), supportedAttr.mStream) == end(streams) &&
+ supportedAttr.mStream != AUDIO_STREAM_DEFAULT) {
+ streams.push_back(supportedAttr.mStream);
+ }
+ }
+ return streams;
+}
+
+bool ProductStrategy::supportStreamType(const audio_stream_type_t &streamType) const
+{
+ return std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [&streamType](const auto &supportedAttr) {
+ return supportedAttr.mStream == streamType; }) != end(mAttributesVector);
+}
+
+volume_group_t ProductStrategy::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr)) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (supportedAttr.mStream == stream) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getDefaultVolumeGroup() const
+{
+ const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [](const auto &attr) {return attr.mAttributes == defaultAttr;});
+ return iter != end(mAttributesVector) ? iter->mVolumeGroup : VOLUME_GROUP_NONE;
+}
+
+void ProductStrategy::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
+ std::string deviceLiteral;
+ if (!OutputDeviceConverter::toString(mApplicableDevices, deviceLiteral)) {
+ ALOGE("%s: failed to convert device %d", __FUNCTION__, mApplicableDevices);
+ }
+ dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
+ deviceLiteral.c_str(), mDeviceAddress.c_str());
+
+ for (const auto &attr : mAttributesVector) {
+ dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mVolumeGroup,
+ android::toString(attr.mStream).c_str());
+ dst->appendFormat("%*s Attributes: ", spaces + 3, "");
+ std::string attStr =
+ attr.mAttributes == defaultAttr ? "{ Any }" : android::toString(attr.mAttributes);
+ dst->appendFormat("%s\n", attStr.c_str());
+ }
+}
+
+product_strategy_t ProductStrategyMap::getProductStrategyForAttributes(
+ const audio_attributes_t &attr) const
+{
+ for (const auto &iter : *this) {
+ if (iter.second->matches(attr)) {
+ return iter.second->getId();
+ }
+ }
+ ALOGV("%s: No matching product strategy for attributes %s, return default", __FUNCTION__,
+ toString(attr).c_str());
+ return getDefault();
+}
+
+audio_attributes_t ProductStrategyMap::getAttributesForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &iter : *this) {
+ const auto strategy = iter.second;
+ if (strategy->supportStreamType(stream)) {
+ return strategy->getAttributesForStreamType(stream);
+ }
+ }
+ ALOGV("%s: No product strategy for stream %s, using default", __FUNCTION__,
+ toString(stream).c_str());
+ return {};
+}
+
+audio_stream_type_t ProductStrategyMap::getStreamTypeForAttributes(
+ const audio_attributes_t &attr) const
+{
+ for (const auto &iter : *this) {
+ audio_stream_type_t stream = iter.second->getStreamTypeForAttributes(attr);
+ if (stream != AUDIO_STREAM_DEFAULT) {
+ return stream;
+ }
+ }
+ ALOGV("%s: No product strategy for attributes %s, using default (aka MUSIC)", __FUNCTION__,
+ toString(attr).c_str());
+ return AUDIO_STREAM_MUSIC;
+}
+
+product_strategy_t ProductStrategyMap::getDefault() const
+{
+ if (mDefaultStrategy != PRODUCT_STRATEGY_NONE) {
+ return mDefaultStrategy;
+ }
+ for (const auto &iter : *this) {
+ if (iter.second->isDefault()) {
+ ALOGV("%s: using default %s", __FUNCTION__, iter.second->getName().c_str());
+ return iter.second->getId();
+ }
+ }
+ ALOGE("%s: No default product strategy defined", __FUNCTION__);
+ return PRODUCT_STRATEGY_NONE;
+}
+
+audio_attributes_t ProductStrategyMap::getAttributesForProductStrategy(
+ product_strategy_t strategy) const
+{
+ if (find(strategy) == end()) {
+ ALOGE("Invalid %d strategy requested", strategy);
+ return AUDIO_ATTRIBUTES_INITIALIZER;
+ }
+ return at(strategy)->getAudioAttributes()[0];
+}
+
+product_strategy_t ProductStrategyMap::getProductStrategyForStream(audio_stream_type_t stream) const
+{
+ for (const auto &iter : *this) {
+ if (iter.second->supportStreamType(stream)) {
+ return iter.second->getId();
+ }
+ }
+ ALOGV("%s: No product strategy for stream %d, using default", __FUNCTION__, stream);
+ return getDefault();
+}
+
+
+audio_devices_t ProductStrategyMap::getDeviceTypesForProductStrategy(
+ product_strategy_t strategy) const
+{
+ if (find(strategy) == end()) {
+ ALOGE("Invalid %d strategy requested, returning device for default strategy", strategy);
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return AUDIO_DEVICE_NONE;
+ }
+ return at(getDefault())->getDeviceTypes();
+ }
+ return at(strategy)->getDeviceTypes();
+}
+
+std::string ProductStrategyMap::getDeviceAddressForProductStrategy(product_strategy_t psId) const
+{
+ if (find(psId) == end()) {
+ ALOGE("Invalid %d strategy requested, returning device for default strategy", psId);
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return {};
+ }
+ return at(getDefault())->getDeviceAddress();
+ }
+ return at(psId)->getDeviceAddress();
+}
+
+volume_group_t ProductStrategyMap::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForAttributes(attr);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return VOLUME_GROUP_NONE;
+ }
+ return at(defaultStrategy)->getDefaultVolumeGroup();
+}
+
+volume_group_t ProductStrategyMap::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForStreamType(stream);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return VOLUME_GROUP_NONE;
+ }
+ return at(defaultStrategy)->getDefaultVolumeGroup();
+}
+
+void ProductStrategyMap::initialize()
+{
+ mDefaultStrategy = getDefault();
+ ALOG_ASSERT(mDefaultStrategy != PRODUCT_STRATEGY_NONE, "No default product strategy found");
+}
+
+void ProductStrategyMap::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("%*sProduct Strategies dump:", spaces, "");
+ for (const auto &iter : *this) {
+ iter.second->dump(dst, spaces + 2);
+ }
+}
+
+}
+
diff --git a/services/audiopolicy/engine/common/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
new file mode 100644
index 0000000..c352578
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::VolumeCurve"
+//#define LOG_NDEBUG 0
+
+#include "VolumeCurve.h"
+#include "TypeConverter.h"
+#include <media/TypeConverter.h>
+
+namespace android {
+
+float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
+{
+ ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
+ if (volIndexMin < 0 || volIndexMax < 0) {
+ // In order to let AudioService initialize the min and max, convention is to use -1
+ return NAN;
+ }
+ if (indexInUi < volIndexMin) {
+ // an index of 0 means mute request when volIndexMin > 0
+ if (indexInUi == 0) {
+ ALOGV("VOLUME forcing mute for index 0 with min index %d", volIndexMin);
+ return VOLUME_MIN_DB;
+ }
+ ALOGV("VOLUME remapping index from %d to min index %d", indexInUi, volIndexMin);
+ indexInUi = volIndexMin;
+ } else if (indexInUi > volIndexMax) {
+ ALOGV("VOLUME remapping index from %d to max index %d", indexInUi, volIndexMax);
+ indexInUi = volIndexMax;
+ }
+
+ size_t nbCurvePoints = mCurvePoints.size();
+ // the volume index in the UI is relative to the min and max volume indices for this stream
+ int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+ int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
+
+ // Where would this volume index been inserted in the curve point
+ size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
+ if (indexInUiPosition >= nbCurvePoints) {
+ //use last point of table
+ return mCurvePoints[nbCurvePoints - 1].mAttenuationInMb / 100.0f;
+ }
+ if (indexInUiPosition == 0) {
+ if (indexInUiPosition != mCurvePoints[0].mIndex) {
+ return VOLUME_MIN_DB; // out of bounds
+ }
+ return mCurvePoints[0].mAttenuationInMb / 100.0f;
+ }
+ // linear interpolation in the attenuation table in dB
+ float decibels = (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f) +
+ ((float)(volIdx - mCurvePoints[indexInUiPosition - 1].mIndex)) *
+ ( ((mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f) -
+ (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f)) /
+ ((float)(mCurvePoints[indexInUiPosition].mIndex -
+ mCurvePoints[indexInUiPosition - 1].mIndex)) );
+
+ ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
+ mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
+ mCurvePoints[indexInUiPosition].mIndex,
+ ((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
+ ((float)mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f));
+
+ return decibels;
+}
+
+void VolumeCurve::dump(String8 *dst, int spaces, bool curvePoints) const
+{
+ if (!curvePoints) {
+ return;
+ }
+ dst->append(" {");
+ for (size_t i = 0; i < mCurvePoints.size(); i++) {
+ dst->appendFormat("%*s(%3d, %5d)", spaces, "", mCurvePoints[i].mIndex,
+ mCurvePoints[i].mAttenuationInMb);
+ dst->appendFormat(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
+ }
+}
+
+void VolumeCurves::dump(String8 *dst, int spaces, bool curvePoints) const
+{
+ if (!curvePoints) {
+// dst->appendFormat("%*s%02d %s %03d %03d ", spaces, "",
+// mStream, mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ dst->appendFormat("%*s Can be muted Index Min Index Max Index Cur [device : index]...\n",
+ spaces + 1, "");
+ dst->appendFormat("%*s %s %02d %02d ", spaces + 1, "",
+ mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ for (const auto &pair : mIndexCur) {
+ dst->appendFormat("%04x : %02d, ", pair.first, pair.second);
+ }
+ dst->appendFormat("\n");
+ return;
+ }
+ std::string streamNames;
+ for (const auto &stream : mStreams) {
+ streamNames += android::toString(stream) + "("+std::to_string(stream)+") ";
+ }
+ dst->appendFormat("%*sVolume Curves Streams/Attributes, Curve points Streams for device"
+ " category (index, attenuation in millibel)\n", spaces, "");
+ dst->appendFormat("%*s Streams: %s \n", spaces, "", streamNames.c_str());
+ if (!mAttributes.empty()) dst->appendFormat("%*s Attributes:", spaces, "");
+ for (const auto &attributes : mAttributes) {
+ std::string attStr = attributes == defaultAttr ? "{ Any }" : android::toString(attributes);
+ dst->appendFormat("%*s %s\n", attributes == mAttributes.front() ? 0 : spaces + 13, "",
+ attStr.c_str());
+ }
+ for (size_t i = 0; i < size(); i++) {
+ std::string deviceCatLiteral;
+ DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
+ dst->appendFormat("%*s %s :", spaces, "", deviceCatLiteral.c_str());
+ valueAt(i)->dump(dst, 1, true);
+ }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
new file mode 100644
index 0000000..e189807
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/VolumeGroup"
+//#define LOG_NDEBUG 0
+
+#include "VolumeGroup.h"
+#include <media/TypeConverter.h>
+#include <utils/String8.h>
+#include <cstdint>
+#include <string>
+
+#include <log/log.h>
+
+
+namespace android {
+
+//
+// VolumeGroup implementation
+//
+VolumeGroup::VolumeGroup(const std::string &name, int indexMin, int indexMax) :
+ mName(name), mId(static_cast<volume_group_t>(HandleGenerator<uint32_t>::getNextHandle())),
+ mGroupVolumeCurves(VolumeCurves(indexMin, indexMax))
+{
+}
+
+void VolumeGroup::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
+ mGroupVolumeCurves.dump(dst, spaces + 2, true);
+ mGroupVolumeCurves.dump(dst, spaces + 2, false);
+ dst->appendFormat("\n");
+}
+
+void VolumeGroup::add(const sp<VolumeCurve> &curve)
+{
+ mGroupVolumeCurves.add(curve);
+}
+
+void VolumeGroup::addSupportedAttributes(const audio_attributes_t &attr)
+{
+ mGroupVolumeCurves.addAttributes(attr);
+}
+
+void VolumeGroup::addSupportedStream(audio_stream_type_t stream)
+{
+ mGroupVolumeCurves.addStreamType(stream);
+}
+
+//
+// VolumeGroupMap implementation
+//
+void VolumeGroupMap::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*sVolume Groups dump:", spaces, "");
+ for (const auto &iter : *this) {
+ iter.second->dump(dst, spaces + 2);
+ }
+}
+
+} // namespace android
+
diff --git a/services/audiopolicy/engine/config/Android.mk b/services/audiopolicy/engine/config/Android.mk
new file mode 100644
index 0000000..0b292a5
--- /dev/null
+++ b/services/audiopolicy/engine/config/Android.mk
@@ -0,0 +1,42 @@
+LOCAL_PATH := $(call my-dir)
+
+##################################################################
+# Component build
+##################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_EXPORT_C_INCLUDE_DIRS) \
+ external/libxml2/include \
+ external/icu/icu4c/source/common
+
+LOCAL_SRC_FILES := \
+ src/EngineConfig.cpp
+
+LOCAL_CFLAGS += -Wall -Werror -Wextra
+
+LOCAL_SHARED_LIBRARIES := \
+ libmedia_helper \
+ libandroidicu \
+ libxml2 \
+ libutils \
+ liblog \
+ libcutils
+
+LOCAL_STATIC_LIBRARIES := \
+ libaudiopolicycomponents
+
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+LOCAL_MODULE := libaudiopolicyengineconfig
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudio_system_headers \
+ libaudiopolicycommon
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
new file mode 100644
index 0000000..7f5ed5e
--- /dev/null
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+#include <string>
+#include <vector>
+#include <utils/Errors.h>
+
+struct _xmlNode;
+struct _xmlDoc;
+
+namespace android {
+namespace engineConfig {
+
+/** Default path of audio policy usages configuration file. */
+constexpr char DEFAULT_PATH[] = "/vendor/etc/audio_policy_engine_configuration.xml";
+
+/** Directories where the effect libraries will be search for. */
+constexpr const char* POLICY_USAGE_LIBRARY_PATH[] = {"/odm/etc/", "/vendor/etc/", "/system/etc/"};
+
+using AttributesVector = std::vector<audio_attributes_t>;
+using StreamVector = std::vector<audio_stream_type_t>;
+
+struct AttributesGroup {
+ std::string name;
+ audio_stream_type_t stream;
+ std::string volumeGroup;
+ AttributesVector attributesVect;
+};
+
+using AttributesGroups = std::vector<AttributesGroup>;
+
+struct CurvePoint {
+ int index;
+ int attenuationInMb;
+};
+using CurvePoints = std::vector<CurvePoint>;
+
+struct VolumeCurve {
+ std::string deviceCategory;
+ CurvePoints curvePoints;
+};
+using VolumeCurves = std::vector<VolumeCurve>;
+
+struct VolumeGroup {
+ std::string name;
+ int indexMin;
+ int indexMax;
+ VolumeCurves volumeCurves;
+};
+using VolumeGroups = std::vector<VolumeGroup>;
+
+struct ProductStrategy {
+ std::string name;
+ AttributesGroups attributesGroups;
+};
+
+using ProductStrategies = std::vector<ProductStrategy>;
+
+using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePairs = std::vector<ValuePair>;
+
+struct CriterionType
+{
+ std::string name;
+ bool isInclusive;
+ ValuePairs valuePairs;
+};
+
+using CriterionTypes = std::vector<CriterionType>;
+
+struct Criterion
+{
+ std::string name;
+ std::string typeName;
+ std::string defaultLiteralValue;
+};
+
+using Criteria = std::vector<Criterion>;
+
+struct Config {
+ float version;
+ ProductStrategies productStrategies;
+ Criteria criteria;
+ CriterionTypes criterionTypes;
+ VolumeGroups volumeGroups;
+};
+
+/** Result of `parse(const char*)` */
+struct ParsingResult {
+ /** Parsed config, nullptr if the xml lib could not load the file */
+ std::unique_ptr<Config> parsedConfig;
+ size_t nbSkippedElement; //< Number of skipped invalid product strategies
+};
+
+/** Parses the provided audio policy usage configuration.
+ * @return audio policy usage @see Config
+ */
+ParsingResult parse(const char* path = DEFAULT_PATH);
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups);
+
+} // namespace engineConfig
+} // namespace android
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
new file mode 100644
index 0000000..1ad7739
--- /dev/null
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -0,0 +1,699 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Config"
+//#define LOG_NDEBUG 0
+
+#include "EngineConfig.h"
+#include <policy.h>
+#include <cutils/properties.h>
+#include <media/TypeConverter.h>
+#include <media/convert.h>
+#include <utils/Log.h>
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <sstream>
+#include <istream>
+
+#include <cstdint>
+#include <string>
+
+
+namespace android {
+
+using utilities::convertTo;
+
+namespace engineConfig {
+
+static constexpr const char *gVersionAttribute = "version";
+static const char *const gReferenceElementName = "reference";
+static const char *const gReferenceAttributeName = "name";
+
+template<typename E, typename C>
+struct BaseSerializerTraits {
+ typedef E Element;
+ typedef C Collection;
+ typedef void* PtrSerializingCtx;
+};
+
+struct AttributesGroupTraits : public BaseSerializerTraits<AttributesGroup, AttributesGroups> {
+ static constexpr const char *tag = "AttributesGroup";
+ static constexpr const char *collectionTag = "AttributesGroups";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *streamType = "streamType";
+ static constexpr const char *volumeGroup = "volumeGroup";
+ };
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
+};
+
+struct ProductStrategyTraits : public BaseSerializerTraits<ProductStrategy, ProductStrategies> {
+ static constexpr const char *tag = "ProductStrategy";
+ static constexpr const char *collectionTag = "ProductStrategies";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ };
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
+};
+struct ValueTraits : public BaseSerializerTraits<ValuePair, ValuePairs> {
+ static constexpr const char *tag = "value";
+ static constexpr const char *collectionTag = "values";
+
+ struct Attributes {
+ static constexpr const char *literal = "literal";
+ static constexpr const char *numerical = "numerical";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct CriterionTypeTraits : public BaseSerializerTraits<CriterionType, CriterionTypes> {
+ static constexpr const char *tag = "criterion_type";
+ static constexpr const char *collectionTag = "criterion_types";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *type = "type";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct CriterionTraits : public BaseSerializerTraits<Criterion, Criteria> {
+ static constexpr const char *tag = "criterion";
+ static constexpr const char *collectionTag = "criteria";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *type = "type";
+ static constexpr const char *defaultVal = "default";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct VolumeTraits : public BaseSerializerTraits<VolumeCurve, VolumeCurves> {
+ static constexpr const char *tag = "volume";
+ static constexpr const char *collectionTag = "volumes";
+ static constexpr const char *volumePointTag = "point";
+
+ struct Attributes {
+ static constexpr const char *deviceCategory = "deviceCategory";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *reference = "ref"; /**< For volume curves factorization. */
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct VolumeGroupTraits : public BaseSerializerTraits<VolumeGroup, VolumeGroups> {
+ static constexpr const char *tag = "volumeGroup";
+ static constexpr const char *collectionTag = "volumeGroups";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *indexMin = "indexMin";
+ static constexpr const char *indexMax = "indexMax";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+
+using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
+
+std::string getXmlAttribute(const xmlNode *cur, const char *attribute)
+{
+ xmlCharUnique charPtr(xmlGetProp(cur, reinterpret_cast<const xmlChar *>(attribute)), xmlFree);
+ if (charPtr == NULL) {
+ return "";
+ }
+ std::string value(reinterpret_cast<const char*>(charPtr.get()));
+ return value;
+}
+
+static void getReference(const _xmlNode *root, const _xmlNode *&refNode, const std::string &refName,
+ const char *collectionTag)
+{
+ for (root = root->xmlChildrenNode; root != NULL; root = root->next) {
+ if (!xmlStrcmp(root->name, (const xmlChar *)collectionTag)) {
+ for (xmlNode *cur = root->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if ((!xmlStrcmp(cur->name, (const xmlChar *)gReferenceElementName))) {
+ std::string name = getXmlAttribute(cur, gReferenceAttributeName);
+ if (refName == name) {
+ refNode = cur;
+ return;
+ }
+ }
+ }
+ }
+ }
+ return;
+}
+
+template <class Trait>
+static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
+ typename Trait::Collection &collection,
+ size_t &nbSkippedElement)
+{
+ for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if (xmlStrcmp(cur->name, (const xmlChar *)Trait::collectionTag) &&
+ xmlStrcmp(cur->name, (const xmlChar *)Trait::tag)) {
+ continue;
+ }
+ const xmlNode *child = cur;
+ if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
+ child = child->xmlChildrenNode;
+ }
+ for (; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
+ status_t status = Trait::deserialize(doc, child, collection);
+ if (status != NO_ERROR) {
+ nbSkippedElement += 1;
+ }
+ }
+ }
+ if (!xmlStrcmp(cur->name, (const xmlChar *)Trait::tag)) {
+ return NO_ERROR;
+ }
+ }
+ return NO_ERROR;
+}
+
+static constexpr const char *attributesAttributeRef = "attributesRef"; /**< for factorization. */
+
+static status_t parseAttributes(const _xmlNode *cur, audio_attributes_t &attributes)
+{
+ for (; cur != NULL; cur = cur->next) {
+ if (!xmlStrcmp(cur->name, (const xmlChar *)("ContentType"))) {
+ std::string contentTypeXml = getXmlAttribute(cur, "value");
+ audio_content_type_t contentType;
+ if (not AudioContentTypeConverter::fromString(contentTypeXml.c_str(), contentType)) {
+ ALOGE("Invalid content type %s", contentTypeXml.c_str());
+ return BAD_VALUE;
+ }
+ attributes.content_type = contentType;
+ ALOGV("%s content type %s", __FUNCTION__, contentTypeXml.c_str());
+ }
+ if (!xmlStrcmp(cur->name, (const xmlChar *)("Usage"))) {
+ std::string usageXml = getXmlAttribute(cur, "value");
+ audio_usage_t usage;
+ if (not UsageTypeConverter::fromString(usageXml.c_str(), usage)) {
+ ALOGE("Invalid usage %s", usageXml.c_str());
+ return BAD_VALUE;
+ }
+ attributes.usage = usage;
+ ALOGV("%s usage %s", __FUNCTION__, usageXml.c_str());
+ }
+ if (!xmlStrcmp(cur->name, (const xmlChar *)("Flags"))) {
+ std::string flags = getXmlAttribute(cur, "value");
+
+ ALOGV("%s flags %s", __FUNCTION__, flags.c_str());
+ attributes.flags = AudioFlagConverter::maskFromString(flags, " ");
+ }
+ if (!xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+ std::string bundleKey = getXmlAttribute(cur, "key");
+ std::string bundleValue = getXmlAttribute(cur, "value");
+
+ ALOGV("%s Bundle %s %s", __FUNCTION__, bundleKey.c_str(), bundleValue.c_str());
+
+ std::string tags(bundleKey + "=" + bundleValue);
+ std::strncpy(attributes.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ }
+ }
+ return NO_ERROR;
+}
+
+static status_t deserializeAttributes(_xmlDoc *doc, const _xmlNode *cur,
+ audio_attributes_t &attributes) {
+ // Retrieve content type, usage, flags, and bundle from xml
+ for (; cur != NULL; cur = cur->next) {
+ if (not xmlStrcmp(cur->name, (const xmlChar *)("Attributes"))) {
+ const xmlNode *attrNode = cur;
+ std::string attrRef = getXmlAttribute(cur, attributesAttributeRef);
+ if (!attrRef.empty()) {
+ getReference(xmlDocGetRootElement(doc), attrNode, attrRef, attributesAttributeRef);
+ if (attrNode == NULL) {
+ ALOGE("%s: No reference found for %s", __FUNCTION__, attrRef.c_str());
+ return BAD_VALUE;
+ }
+ return deserializeAttributes(doc, attrNode->xmlChildrenNode, attributes);
+ }
+ return parseAttributes(attrNode->xmlChildrenNode, attributes);
+ }
+ if (not xmlStrcmp(cur->name, (const xmlChar *)("ContentType")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Usage")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Flags")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+ return parseAttributes(cur, attributes);
+ }
+ }
+ return BAD_VALUE;
+}
+
+static status_t deserializeAttributesCollection(_xmlDoc *doc, const _xmlNode *cur,
+ AttributesVector &collection)
+{
+ status_t ret = BAD_VALUE;
+ // Either we do provide only one attributes or a collection of supported attributes
+ for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if (not xmlStrcmp(cur->name, (const xmlChar *)("Attributes")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("ContentType")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Usage")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Flags")) ||
+ not xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ ret = deserializeAttributes(doc, cur, attributes);
+ if (ret == NO_ERROR) {
+ collection.push_back(attributes);
+ // We are done if the "Attributes" balise is omitted, only one Attributes is allowed
+ if (xmlStrcmp(cur->name, (const xmlChar *)("Attributes"))) {
+ return ret;
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+status_t AttributesGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+ Collection &attributesGroup)
+{
+ std::string name = getXmlAttribute(child, Attributes::name);
+ if (name.empty()) {
+ ALOGV("AttributesGroupTraits No attribute %s found", Attributes::name);
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+ std::string volumeGroup = getXmlAttribute(child, Attributes::volumeGroup);
+ if (volumeGroup.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::volumeGroup);
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::volumeGroup, volumeGroup.c_str());
+
+ audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
+ std::string streamTypeXml = getXmlAttribute(child, Attributes::streamType);
+ if (streamTypeXml.empty()) {
+ ALOGV("%s: No attribute %s found", __FUNCTION__, Attributes::streamType);
+ } else {
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::streamType, streamTypeXml.c_str());
+ if (not StreamTypeConverter::fromString(streamTypeXml.c_str(), streamType)) {
+ ALOGE("Invalid stream type %s", streamTypeXml.c_str());
+ return BAD_VALUE;
+ }
+ }
+ AttributesVector attributesVect;
+ deserializeAttributesCollection(doc, child, attributesVect);
+
+ attributesGroup.push_back({name, streamType, volumeGroup, attributesVect});
+ return NO_ERROR;
+}
+
+status_t ValueTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &values)
+{
+ std::string literal = getXmlAttribute(child, Attributes::literal);
+ if (literal.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
+ return BAD_VALUE;
+ }
+ uint32_t numerical = 0;
+ std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
+ if (numericalTag.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
+ return BAD_VALUE;
+ }
+ if (!convertTo(numericalTag, numerical)) {
+ ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
+ return BAD_VALUE;
+ }
+ values.push_back({numerical, literal});
+ return NO_ERROR;
+}
+
+status_t CriterionTypeTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+ Collection &criterionTypes)
+{
+ std::string name = getXmlAttribute(child, Attributes::name);
+ if (name.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::name, name.c_str());
+
+ std::string type = getXmlAttribute(child, Attributes::type);
+ if (type.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::type);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::type, type.c_str());
+ bool isInclusive(type == "inclusive");
+
+ ValuePairs pairs;
+ size_t nbSkippedElements = 0;
+ deserializeCollection<ValueTraits>(doc, child, pairs, nbSkippedElements);
+ criterionTypes.push_back({name, isInclusive, pairs});
+ return NO_ERROR;
+}
+
+status_t CriterionTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child,
+ Collection &criteria)
+{
+ std::string name = getXmlAttribute(child, Attributes::name);
+ if (name.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+ std::string defaultValue = getXmlAttribute(child, Attributes::defaultVal);
+ if (defaultValue.empty()) {
+ // Not mandatory to provide a default value for a criterion, even it is recommanded...
+ ALOGV("%s: No attribute %s found (but recommanded)", __FUNCTION__, Attributes::defaultVal);
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::defaultVal, defaultValue.c_str());
+
+ std::string typeName = getXmlAttribute(child, Attributes::type);
+ if (typeName.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::type, typeName.c_str());
+
+ criteria.push_back({name, typeName, defaultValue});
+ return NO_ERROR;
+}
+
+status_t ProductStrategyTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
+ Collection &strategies)
+{
+ std::string name = getXmlAttribute(child, Attributes::name);
+ if (name.empty()) {
+ ALOGE("ProductStrategyTraits No attribute %s found", Attributes::name);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+
+ size_t skipped = 0;
+ AttributesGroups attrGroups;
+ deserializeCollection<AttributesGroupTraits>(doc, child, attrGroups, skipped);
+
+ strategies.push_back({name, attrGroups});
+ return NO_ERROR;
+}
+
+status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string deviceCategory = getXmlAttribute(root, Attributes::deviceCategory);
+ if (deviceCategory.empty()) {
+ ALOGW("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
+ }
+ std::string referenceName = getXmlAttribute(root, Attributes::reference);
+ const _xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, collectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ }
+ // Retrieve curve point from reference element if found or directly from current curve
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ root->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, tag, reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ volumes.push_back({ deviceCategory, curvePoints });
+ return NO_ERROR;
+}
+
+status_t VolumeGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string name;
+ int indexMin = 0;
+ int indexMax = 0;
+ StreamVector streams = {};
+ AttributesVector attributesVect = {};
+
+ for (const xmlNode *child = root->xmlChildrenNode; child != NULL; child = child->next) {
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::name)) {
+ xmlCharUnique nameXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (nameXml == nullptr) {
+ return BAD_VALUE;
+ }
+ name = reinterpret_cast<const char*>(nameXml.get());
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMin)) {
+ xmlCharUnique indexMinXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMinXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMinLiteral(reinterpret_cast<const char*>(indexMinXml.get()));
+ if (!convertTo(indexMinLiteral, indexMin)) {
+ return BAD_VALUE;
+ }
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMax)) {
+ xmlCharUnique indexMaxXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMaxXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMaxLiteral(reinterpret_cast<const char*>(indexMaxXml.get()));
+ if (!convertTo(indexMaxLiteral, indexMax)) {
+ return BAD_VALUE;
+ }
+ }
+ }
+ deserializeAttributesCollection(doc, root, attributesVect);
+
+ std::string streamNames;
+ for (const auto &stream : streams) {
+ streamNames += android::toString(stream) + " ";
+ }
+ std::string attrmNames;
+ for (const auto &attr : attributesVect) {
+ attrmNames += android::toString(attr) + "\n";
+ }
+ ALOGV("%s: group=%s indexMin=%d, indexMax=%d streams=%s attributes=%s",
+ __func__, name.c_str(), indexMin, indexMax, streamNames.c_str(), attrmNames.c_str( ));
+
+ VolumeCurves groupVolumeCurves;
+ size_t skipped = 0;
+ deserializeCollection<VolumeTraits>(doc, root, groupVolumeCurves, skipped);
+ volumes.push_back({ name, indexMin, indexMax, groupVolumeCurves });
+ return NO_ERROR;
+}
+
+static constexpr const char *legacyVolumecollectionTag = "volumes";
+static constexpr const char *legacyVolumeTag = "volume";
+
+status_t deserializeLegacyVolume(_xmlDoc *doc, const _xmlNode *cur,
+ std::map<std::string, VolumeCurves> &legacyVolumes)
+{
+ std::string streamTypeLiteral = getXmlAttribute(cur, "stream");
+ if (streamTypeLiteral.empty()) {
+ ALOGE("%s: No attribute stream found", __func__);
+ return BAD_VALUE;
+ }
+ std::string deviceCategoryLiteral = getXmlAttribute(cur, "deviceCategory");
+ if (deviceCategoryLiteral.empty()) {
+ ALOGE("%s: No attribute deviceCategory found", __func__);
+ return BAD_VALUE;
+ }
+ std::string referenceName = getXmlAttribute(cur, "ref");
+ const xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, legacyVolumecollectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ ALOGV("%s: reference found for %s", __func__, referenceName.c_str());
+ }
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ cur->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)VolumeTraits::volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, legacyVolumeTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, VolumeTraits::volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ legacyVolumes[streamTypeLiteral].push_back({ deviceCategoryLiteral, curvePoints });
+ return NO_ERROR;
+}
+
+static status_t deserializeLegacyVolumeCollection(_xmlDoc *doc, const _xmlNode *cur,
+ VolumeGroups &volumeGroups,
+ size_t &nbSkippedElement)
+{
+ std::map<std::string, VolumeCurves> legacyVolumeMap;
+ for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if (xmlStrcmp(cur->name, (const xmlChar *)legacyVolumecollectionTag)) {
+ continue;
+ }
+ const xmlNode *child = cur->xmlChildrenNode;
+ for (; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)legacyVolumeTag)) {
+
+ status_t status = deserializeLegacyVolume(doc, child, legacyVolumeMap);
+ if (status != NO_ERROR) {
+ nbSkippedElement += 1;
+ }
+ }
+ }
+ }
+ for (const auto &volumeMapIter : legacyVolumeMap) {
+ // In order to let AudioService setting the min and max (compatibility), set Min and Max
+ // to -1 except for private streams
+ audio_stream_type_t streamType;
+ if (!StreamTypeConverter::fromString(volumeMapIter.first, streamType)) {
+ ALOGE("%s: Invalid stream %s", __func__, volumeMapIter.first.c_str());
+ return BAD_VALUE;
+ }
+ int indexMin = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 0 : -1;
+ int indexMax = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 100 : -1;
+ volumeGroups.push_back({ volumeMapIter.first, indexMin, indexMax, volumeMapIter.second });
+ }
+ return NO_ERROR;
+}
+
+ParsingResult parse(const char* path) {
+ xmlDocPtr doc;
+ doc = xmlParseFile(path);
+ if (doc == NULL) {
+ ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ return {nullptr, 0};
+ }
+ xmlNodePtr cur = xmlDocGetRootElement(doc);
+ if (cur == NULL) {
+ ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+ xmlFreeDoc(doc);
+ return {nullptr, 0};
+ }
+ if (xmlXIncludeProcess(doc) < 0) {
+ ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+ return {nullptr, 0};
+ }
+ std::string version = getXmlAttribute(cur, gVersionAttribute);
+ if (version.empty()) {
+ ALOGE("%s: No version found", __func__);
+ return {nullptr, 0};
+ }
+ size_t nbSkippedElements = 0;
+ auto config = std::make_unique<Config>();
+ config->version = std::stof(version);
+ deserializeCollection<ProductStrategyTraits>(
+ doc, cur, config->productStrategies, nbSkippedElements);
+ deserializeCollection<CriterionTraits>(
+ doc, cur, config->criteria, nbSkippedElements);
+ deserializeCollection<CriterionTypeTraits>(
+ doc, cur, config->criterionTypes, nbSkippedElements);
+ deserializeCollection<VolumeGroupTraits>(
+ doc, cur, config->volumeGroups, nbSkippedElements);
+
+ return {std::move(config), nbSkippedElements};
+}
+
+android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
+ xmlDocPtr doc;
+ doc = xmlParseFile(path);
+ if (doc == NULL) {
+ ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ xmlNodePtr cur = xmlDocGetRootElement(doc);
+ if (cur == NULL) {
+ ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+ xmlFreeDoc(doc);
+ return BAD_VALUE;
+ }
+ if (xmlXIncludeProcess(doc) < 0) {
+ ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ size_t nbSkippedElements = 0;
+ return deserializeLegacyVolumeCollection(doc, cur, volumeGroups, nbSkippedElements);
+}
+
+static const char *kConfigLocationList[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
+static const int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+static const int gApmXmlConfigFilePathMaxLength = 128;
+
+static constexpr const char *apmXmlConfigFileName = "audio_policy_configuration.xml";
+static constexpr const char *apmA2dpOffloadDisabledXmlConfigFileName =
+ "audio_policy_configuration_a2dp_offload_disabled.xml";
+
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups) {
+ char audioPolicyXmlConfigFile[gApmXmlConfigFilePathMaxLength];
+ std::vector<const char *> fileNames;
+ status_t ret;
+
+ if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
+ fileNames.push_back(apmA2dpOffloadDisabledXmlConfigFileName);
+ }
+ fileNames.push_back(apmXmlConfigFileName);
+
+ for (const char* fileName : fileNames) {
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile),
+ "%s/%s", kConfigLocationList[i], fileName);
+ ret = parseLegacyVolumeFile(audioPolicyXmlConfigFile, volumeGroups);
+ if (ret == NO_ERROR) {
+ return ret;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
+} // namespace engineConfig
+} // namespace android
diff --git a/services/audiopolicy/engine/interface/Android.bp b/services/audiopolicy/engine/interface/Android.bp
new file mode 100644
index 0000000..2ea42b6
--- /dev/null
+++ b/services/audiopolicy/engine/interface/Android.bp
@@ -0,0 +1,19 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_headers {
+ name: "libaudiopolicyengine_interface_headers",
+ host_supported: true,
+ export_include_dirs: ["."],
+}
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index 04594f5..38f3401 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -17,7 +17,10 @@
#pragma once
#include <AudioPolicyManagerObserver.h>
-#include <RoutingStrategy.h>
+#include <media/AudioProductStrategy.h>
+#include <media/AudioVolumeGroup.h>
+#include <IVolumeCurves.h>
+#include <policy.h>
#include <Volume.h>
#include <HwModule.h>
#include <DeviceDescriptor.h>
@@ -28,6 +31,10 @@
namespace android {
+using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
+using StrategyVector = std::vector<product_strategy_t>;
+using VolumeGroupVector = std::vector<volume_group_t>;
+
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
*/
@@ -50,42 +57,6 @@
virtual void setObserver(AudioPolicyManagerObserver *observer) = 0;
/**
- * Get the input device selected for a given input source.
- *
- * @param[in] inputSource to get the selected input device associated to
- *
- * @return selected input device for the given input source, may be none if error.
- */
- virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const = 0;
-
- /**
- * Get the output device associated to a given strategy.
- *
- * @param[in] stream type for which the selected ouput device is requested.
- *
- * @return selected ouput device for the given strategy, may be none if error.
- */
- virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const = 0;
-
- /**
- * Get the strategy selected for a given stream type.
- *
- * @param[in] stream: for which the selected strategy followed by is requested.
- *
- * @return strategy to be followed.
- */
- virtual routing_strategy getStrategyForStream(audio_stream_type_t stream) = 0;
-
- /**
- * Get the strategy selected for a given usage.
- *
- * @param[in] usage to get the selected strategy followed by.
- *
- * @return strategy to be followed.
- */
- virtual routing_strategy getStrategyForUsage(audio_usage_t usage) = 0;
-
- /**
* Set the Telephony Mode.
*
* @param[in] mode: Android Phone state (normal, ringtone, csv, in communication)
@@ -133,6 +104,200 @@
virtual status_t setDeviceConnectionState(const android::sp<android::DeviceDescriptor> devDesc,
audio_policy_dev_state_t state) = 0;
+ /**
+ * Get the strategy selected for a given audio attributes.
+ *
+ * @param[in] audio attributes to get the selected @product_strategy_t followed by.
+ *
+ * @return @product_strategy_t to be followed.
+ */
+ virtual product_strategy_t getProductStrategyForAttributes(
+ const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getOutputDevicesForAttributes retrieves the devices to be used for given
+ * audio attributes.
+ * @param attributes of the output requesting Device(s) selection
+ * @param preferedDevice valid reference if a prefered device is requested, nullptr otherwise.
+ * @param fromCache if true, the device is returned from internal cache,
+ * otherwise it is determined by current state (device connected,phone state,
+ * force use, a2dp output...)
+ * @return vector of selected device descriptors.
+ * Appropriate device for streams handled by the specified audio attributes according
+ * to current phone state, forced states, connected devices...
+ * if fromCache is true, the device is returned from internal cache,
+ * otherwise it is determined by current state (device connected,phone state, force use,
+ * a2dp output...)
+ * This allows to:
+ * 1 speed up process when the state is stable (when starting or stopping an output)
+ * 2 access to either current device selection (fromCache == true) or
+ * "future" device selection (fromCache == false) when called from a context
+ * where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+ * before manager updates its outputs.
+ */
+ virtual DeviceVector getOutputDevicesForAttributes(
+ const audio_attributes_t &attributes,
+ const sp<DeviceDescriptor> &preferedDevice = nullptr,
+ bool fromCache = false) const = 0;
+
+ /**
+ * @brief getOutputDevicesForStream Legacy function retrieving devices from a stream type.
+ * @param stream type of the output requesting Device(s) selection
+ * @param fromCache if true, the device is returned from internal cache,
+ * otherwise it is determined by current state (device connected,phone state,
+ * force use, a2dp output...)
+ * @return appropriate device for streams handled by the specified audio attributes according
+ * to current phone state, forced states, connected devices...
+ * if fromCache is true, the device is returned from internal cache,
+ * otherwise it is determined by current state (device connected,phone state, force use,
+ * a2dp output...)
+ * This allows to:
+ * 1 speed up process when the state is stable (when starting or stopping an output)
+ * 2 access to either current device selection (fromCache == true) or
+ * "future" device selection (fromCache == false) when called from a context
+ * where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+ * before manager updates its outputs.
+ */
+ virtual DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+ bool fromCache = false) const = 0;
+
+ /**
+ * Get the input device selected for given audio attributes.
+ *
+ * @param[in] attr audio attributes to consider
+ * @param[out] mix to be used if a mix has been installed for the given audio attributes.
+ * @return selected input device for the audio attributes, may be null if error.
+ */
+ virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
+ const audio_attributes_t &attr, AudioMix **mix = nullptr) const = 0;
+
+ /**
+ * Get the legacy stream type for a given audio attributes.
+ *
+ * @param[in] audio attributes to get the associated audio_stream_type_t.
+ *
+ * @return audio_stream_type_t associated to the attributes.
+ */
+ virtual audio_stream_type_t getStreamTypeForAttributes(
+ const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getAttributesForStream get the audio attributes from legacy stream type
+ * Attributes returned might only be used to check upon routing decision, not volume decisions.
+ * @param stream to consider
+ * @return audio attributes matching the legacy stream type
+ */
+ virtual audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const = 0;
+
+ /**
+ * @brief getStreamTypesForProductStrategy retrieves the list of legacy stream type following
+ * the given product strategy
+ * @param ps product strategy to consider
+ * @return associated legacy Stream Types vector of the given product strategy
+ */
+ virtual StreamTypeVector getStreamTypesForProductStrategy(product_strategy_t ps) const = 0;
+
+ /**
+ * @brief getAllAttributesForProductStrategy retrieves all the attributes following the given
+ * product strategy. Any attributes that "matches" with this one will follow the product
+ * strategy.
+ * "matching" means the usage shall match if reference attributes has a defined usage, AND
+ * content type shall match if reference attributes has a defined content type AND
+ * flags shall match if reference attributes has defined flags AND
+ * tags shall match if reference attributes has defined tags.
+ * @param ps product strategy to consider
+ * @return vector of product strategy ids, empty if unknown strategy.
+ */
+ virtual AttributesVector getAllAttributesForProductStrategy(product_strategy_t ps) const = 0;
+
+ /**
+ * @brief getOrderedAudioProductStrategies
+ * @return priority ordered product strategies to help the AudioPolicyManager evaluating the
+ * device selection per output according to the prioritized strategies.
+ */
+ virtual StrategyVector getOrderedProductStrategies() const = 0;
+
+ /**
+ * @brief updateDeviceSelectionCache. Device selection for AudioAttribute / Streams is cached
+ * in the engine in order to speed up process when the audio system is stable.
+ * When a device is connected, the android mode is changed, engine is notified and can update
+ * the cache.
+ * When starting / stopping an output with a stream that can affect notification, the engine
+ * needs to update the cache upon this function call.
+ */
+ virtual void updateDeviceSelectionCache() = 0;
+
+ /**
+ * @brief listAudioProductStrategies. Introspection API to retrieve a collection of
+ * AudioProductStrategyVector that allows to build AudioAttributes according to a
+ * product_strategy which is just an index. It has also a human readable name to help the
+ * Car/Oem/AudioManager identiying the use case.
+ * @param strategies collection.
+ * @return OK if the list has been retrieved, error code otherwise
+ */
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForAttributes retrieves the Volume Curves interface for the
+ * requested Audio Attributes.
+ * @param attr to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForStreamType retrieves the Volume Curves interface for the stream
+ * @param stream to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForVolumeGroup retrieves the Volume Curves interface for volume group
+ * @param group to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const = 0;
+
+ /**
+ * @brief getVolumeGroups retrieves the collection of volume groups.
+ * @return vector of volume groups
+ */
+ virtual VolumeGroupVector getVolumeGroups() const = 0;
+
+ /**
+ * @brief getVolumeGroupForAttributes gets the appropriate volume group to be used for a given
+ * Audio Attributes.
+ * @param attr to be considered
+ * @return volume group associated to the given audio attributes, default group if none
+ * applicable, VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeGroupForStreamType gets the appropriate volume group to be used for a given
+ * legacy stream type
+ * @param stream type to be considered
+ * @return volume group associated to the given stream type, default group if none applicable,
+ * VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const = 0;
+
+ virtual StreamTypeVector getStreamTypesForVolumeGroup(volume_group_t volumeGroup) const = 0;
+
+ virtual AttributesVector getAllAttributesForVolumeGroup(volume_group_t volumeGroup) const = 0;
+
+ /**
+ * @brief listAudioVolumeGroups introspection API to get the Audio Volume Groups, aka
+ * former stream aliases in Audio Service, defining volume curves attached to one or more
+ * Audio Attributes.
+ * @param groups
+ * @return NO_ERROR if the volume groups were retrieved successfully, error code otherwise
+ */
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) const = 0;
+
+ virtual void dump(String8 *dst) const = 0;
+
protected:
virtual ~AudioPolicyManagerInterface() {}
};
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index b7902cf..43ba625 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
#pragma once
-#include <IVolumeCurvesCollection.h>
#include <AudioGain.h>
#include <AudioPort.h>
#include <AudioPatch.h>
@@ -51,8 +50,6 @@
virtual const DeviceVector &getAvailableInputDevices() const = 0;
- virtual IVolumeCurvesCollection &getVolumeCurves() = 0;
-
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
protected:
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index c2105e9..4eff6e6 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -11,14 +11,15 @@
src/Engine.cpp \
src/EngineInstance.cpp \
src/Stream.cpp \
- src/Strategy.cpp \
- src/Usage.cpp \
src/InputSource.cpp \
+ ../engine/common/src/VolumeCurve.cpp \
+ ../engine/common/src/VolumeGroup.cpp \
+ ../engine/common/src/ProductStrategy.cpp \
+ ../engine/common/src/EngineBase.cpp
audio_policy_engine_includes_common := \
frameworks/av/services/audiopolicy/engineconfigurable/include \
- frameworks/av/services/audiopolicy/engineconfigurable/interface \
- frameworks/av/services/audiopolicy/engine/interface
+ frameworks/av/services/audiopolicy/engineconfigurable/interface
LOCAL_CFLAGS += \
-Wall \
@@ -32,8 +33,12 @@
$(audio_policy_engine_includes_common) \
$(TARGET_OUT_HEADERS)/hw \
$(call include-path-for, frameworks-av) \
- $(call include-path-for, audio-utils) \
- frameworks/av/services/audiopolicy/common/include
+ $(call include-path-for, audio-utils)
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_common_headers \
+ libaudiopolicyengine_interface_headers
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -45,13 +50,14 @@
libaudiopolicycomponents
LOCAL_SHARED_LIBRARIES := \
+ libaudiopolicyengineconfig \
liblog \
- libcutils \
libutils \
liblog \
libaudioutils \
libparameter \
libmedia_helper \
+ libaudiopolicy \
libxml2
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/engineconfigurable/config/Android.mk b/services/audiopolicy/engineconfigurable/config/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/Android.mk b/services/audiopolicy/engineconfigurable/config/example/Android.mk
new file mode 100644
index 0000000..45419f0
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/Android.mk
@@ -0,0 +1,126 @@
+LOCAL_PATH := $(call my-dir)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+PROVISION_CRITERION_TYPES := $(TOOLS)/provision_criterion_types_from_android_headers.mk
+
+##################################################################
+# CONFIGURATION TOP FILE
+##################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+ audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml
+
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+
+##################################################################
+# AUTOMOTIVE CONFIGURATION TOP FILE
+##################################################################
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_automotive.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+ audio_policy_engine_product_strategies_automotive.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml \
+ audio_policy_engine_volumes.xml
+
+include $(BUILD_PREBUILT)
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_automotive.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_criteria.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := common/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_criterion_types.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+ $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
+
+ANDROID_AUDIO_BASE_HEADER_FILE := system/media/audio/include/system/audio-base.h
+AUDIO_POLICY_CONFIGURATION_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
+CRITERION_TYPES_FILE := $(LOCAL_PATH)/common/$(LOCAL_MODULE).in
+
+include $(PROVISION_CRITERION_TYPES)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
similarity index 67%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
rename to services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
index 5d9193b..28a140a 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
@@ -12,14 +12,14 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--->
-<!--
- These are the minimum required criteria to be used by Audio HAL to ensure a basic
- user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+ -->
- <xi:include href="policy_criterion_types.xml"/>
- <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_criterion_types.xml"/>
+ <xi:include href="audio_policy_engine_criteria.xml"/>
+ <xi:include href="audio_policy_engine_volumes.xml"/>
</configuration>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..c487da9
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,170 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<ProductStrategies>
+ <!-- OEM Usages -->
+ <!-- product_strategy will be defined according this order
+ product_strategy is oem_traffic_anouncement if all the conditions are satisfied for
+ AudioAttributes aa
+
+ int type = 0;
+ if (bundle != null) {
+ type = bundle.getInt(KEY_OEM_TYPE, 0);
+ }
+ if(
+ ( aa.mContentType == AudioAttributes.AUDIO_CONTENT_TYPE_SPEECH ) &&
+ ( aa.mUsage == AudioAttributes.AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE ) &&
+ ( type == 1 ) )
+ -->
+
+ <ProductStrategy name="oem_traffic_anouncement">
+ <AttributesGroup volumeGroup="oem_traffic_anouncement">
+ <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+ <!-- traffic_annoucement = 1 -->
+ <Bundle key="oem" value="1"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="oem_strategy_1">
+ <AttributesGroup volumeGroup="oem_adas_2">
+ <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+ <Bundle key="oem" value="2"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="oem_strategy_2">
+ <AttributesGroup volumeGroup="oem_adas_3">
+ <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+ <Bundle key="oem" value="3"/>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Car Usages -->
+ <!-- Keep those lines only for car -->
+ <!-- Check car conditions if any OEM conditions matched -->
+ <!-- As defined by CarAudioAttributesUtil.java -->
+ <!-- product_strategy will be defined according this order
+ product_strategy is radio if all the conditions are satisfied for AudioAttributes aa
+
+ int type = CAR_AUDIO_TYPE_DEFAULT;
+ if (bundle != null) {
+ type = bundle.getInt(KEY_CAR_AUDIO_TYPE, CAR_AUDIO_TYPE_DEFAULT);
+ }
+ if(
+ ( aa.mContentType == AudioAttributes.AUDIO_CONTENT_TYPE_SPEECH ) &&
+ ( aa.mUsage == AudioAttributes.AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE ) &&
+ ( type == CAR_AUDIO_TYPE_RADIO ) )
+ -->
+ <ProductStrategy name="radio">
+ <AttributesGroup volumeGroup="media_car_audio_type_3">
+ <ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
+ <Usage value="AUDIO_USAGE_MEDIA"/>
+ <Bundle key="car_audio_type" value="3"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="ext_audio_source">
+ <AttributesGroup volumeGroup="media_car_audio_type_7">
+ <ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
+ <Usage value="AUDIO_USAGE_MEDIA"/>
+ <Bundle key="car_audio_type" value="7"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="voice_command">
+ <AttributesGroup volumeGroup="speech">
+ <Attributes>
+ <ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
+ <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+ <!-- CAR_AUDIO_TYPE_VOICE_COMMAND = 1 -->
+ <Bundle key="car_audio_type" value="1"/>
+ </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="safety_alert">
+ <AttributesGroup volumeGroup="system">
+ <ContentType value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
+ <Usage value="AUDIO_USAGE_NOTIFICATION"/>
+ <!-- CAR_AUDIO_TYPE_SAFETY_ALERT = 2 -->
+ <Bundle key="car_audio_type" value="2"/>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- To be checked
+ CAR_AUDIO_TYPE_CARSERVICE_BOTTOM
+ CAR_AUDIO_TYPE_CARSERVICE_CAR_PROXY
+ CAR_AUDIO_TYPE_CARSERVICE_MEDIA_MUTE
+ -->
+
+ <!-- Generic Usages -->
+ <ProductStrategy name="music">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="media">
+ <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+ <!-- Default product strategy has empty attributes -->
+ <Attributes></Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="nav_guidance">
+ <AttributesGroup volumeGroup="speech">
+ <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="voice_call">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="phone">
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="phone">
+ <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="alarm">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="ring">
+ <Usage value="AUDIO_USAGE_ALARM"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="ring">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
+ <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="notification">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="ring">
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="system">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
+ <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
+ </AttributesGroup>
+ </ProductStrategy>
+ <ProductStrategy name="tts">
+ <!-- TTS stream MUST BE MANAGED OUTSIDE default product strategy if NO DEDICATED OUTPUT
+ for TTS, otherwise when beacon happens, default strategy is ... muted.
+ If it is media, it is annoying... -->
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
+ <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+</ProductStrategies>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
new file mode 100644
index 0000000..b326b50
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
@@ -0,0 +1,192 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<!-- Volume Groups Tables included by Audio Policy Configuration file -->
+<!-- Note:
+ It is VALID to have a group without attributes if a product strategy is following
+ this group for all attributes.
+ Otherwise, attributes must be specified
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>oem_traffic_anouncement</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- OEM ADAS is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy excluisve, so the volume
+ will be the one of the currently acitve stream, otherwise a priority must be given by
+ any mean. -->
+ <volumeGroup>
+ <name>oem_adas_2</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>oem_adas_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- MEDIA is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy exclusive, so the volume
+ will be the one of the active stream with highest priority (ORDER MATTERS) unless the curves
+ followed will the the curves for the requested attributes.-->
+ <volumeGroup>
+ <name>media_car_audio_type_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media_car_audio_type_7</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>speech</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>phone</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-0</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criteria.xml b/services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criteria.xml
similarity index 100%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_criteria.xml
rename to services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criteria.xml
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in b/services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criterion_types.xml.in
similarity index 95%
rename from services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
rename to services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criterion_types.xml.in
index 6cb799f..fe17369 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_criterion_types.xml.in
+++ b/services/audiopolicy/engineconfigurable/config/example/common/audio_policy_engine_criterion_types.xml.in
@@ -16,7 +16,12 @@
<criterion_types>
<criterion_type name="OutputDevicesMaskType" type="inclusive"/>
<criterion_type name="InputDevicesMaskType" type="inclusive"/>
- <criterion_type name="OutputDevicesAddressesType" type="inclusive"/>
+ <criterion_type name="OutputDevicesAddressesType" type="inclusive">
+ <values>
+ <!-- legacy remote submix -->
+ <value literal="0" numerical="1"/>
+ </values>
+ </criterion_type>
<criterion_type name="InputDevicesAddressesType" type="inclusive"/>
<criterion_type name="AndroidModeType" type="exclusive"/>
<criterion_type name="BooleanType" type="exclusive">
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
similarity index 67%
copy from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
copy to services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
index 5d9193b..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
@@ -12,14 +12,13 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--->
-<!--
- These are the minimum required criteria to be used by Audio HAL to ensure a basic
- user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+ -->
- <xi:include href="policy_criterion_types.xml"/>
- <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..9398743
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<ProductStrategies>
+
+ <!-- "hidden strategies" like TTS, enforced audible:
+ Shall we expose them here or keep it hard coded -->
+
+ <!-- Used to identify the volume of audio streams for enforced system sounds in certain
+ countries (e.g. camera in Japan)
+ This strategy will only have higher priority than phone if force for system is set to
+ enforced. -->
+
+ <ProductStrategy name="STRATEGY_PHONE">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
+ <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_SONIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
+ <Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
+ <Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_MEDIA">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
+ <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
+ <Attributes></Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
+ (TTS) of the device -->
+ <ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
+ <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Routing Strategy rerouting may be removed as following media??? -->
+ <ProductStrategy name="STRATEGY_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
+ <Attributes></Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Default product strategy has empty attributes -->
+ <ProductStrategy name="STRATEGY_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
+ <Attributes></Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+
+</ProductStrategies>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
index 2e29a9b..1fc2264 100644
--- a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -16,10 +16,11 @@
#pragma once
-#include <RoutingStrategy.h>
+#include <policy.h>
#include <EngineDefinition.h>
#include <Volume.h>
#include <system/audio.h>
+#include <media/AudioCommonTypes.h>
#include <utils/Errors.h>
#include <string>
#include <vector>
@@ -36,19 +37,6 @@
{
public:
/**
- * Add a strategy to the engine
- *
- * @param[in] name of the strategy to add
- * @param[in] identifier: the numerical value associated to this member. It MUST match either
- * system/audio.h or system/audio_policy.h enumration value in order to link the
- * parameter controled by the PFW and the policy manager component.
- *
- * @return NO_ERROR if the strategy has been added successfully, error code otherwise.
- *
- */
- virtual android::status_t addStrategy(const std::string &name, routing_strategy id) = 0;
-
- /**
* Add a streams to the engine.
*
* @param[in] name of the stream to add
@@ -62,19 +50,6 @@
virtual android::status_t addStream(const std::string &name, audio_stream_type_t id) = 0;
/**
- * Add a usage to the engine
- *
- * @param[in] name of the usage to add
- * @param[in] identifier: the numerical value associated to this member. It MUST match either
- * system/audio.h or system/audio_policy.h enumration value in order to link the
- * parameter controled by the PFW and the policy manager component.
- *
- * @return NO_ERROR if the usage has been added successfully, error code otherwise.
- *
- */
- virtual android::status_t addUsage(const std::string &name, audio_usage_t id) = 0;
-
- /**
* Add an input source to the engine
*
* @param[in] name of the input source to add
@@ -88,26 +63,6 @@
virtual android::status_t addInputSource(const std::string &name, audio_source_t id) = 0;
/**
- * Set the device to be used by a strategy.
- *
- * @param[in] strategy: name of the strategy for which the device to use has to be set
- * @param[in] devices; mask of devices to be used for the given strategy.
- *
- * @return true if the devices were set correclty for this strategy, false otherwise.
- */
- virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices) = 0;
-
- /**
- * Set the strategy to be followed by a stream.
- *
- * @param[in] stream: name of the stream for which the strategy to use has to be set
- * @param[in] strategy to follow for the given stream.
- *
- * @return true if the strategy were set correclty for this stream, false otherwise.
- */
- virtual bool setStrategyForStream(const audio_stream_type_t &stream, routing_strategy strategy) = 0;
-
- /**
* Set the strategy to be followed by a stream.
*
* @param[in] stream: name of the stream for which the strategy to use has to be set
@@ -119,16 +74,6 @@
const audio_stream_type_t &volumeProfile) = 0;
/**
- * Set the strategy to be followed by a usage.
- *
- * @param[in] usage: name of the usage for which the strategy to use has to be set
- * @param[in] strategy to follow for the given usage.
- *
- * @return true if the strategy were set correclty for this usage, false otherwise.
- */
- virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy) = 0;
-
- /**
* Set the input device to be used by an input source.
*
* @param[in] inputSource: name of the input source for which the device to use has to be set
@@ -139,6 +84,22 @@
virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
audio_devices_t device) = 0;
+ virtual void setDeviceAddressForProductStrategy(product_strategy_t strategy,
+ const std::string &address) = 0;
+
+ /**
+ * Set the device to be used by a product strategy.
+ *
+ * @param[in] strategy: name of the product strategy for which the device to use has to be set
+ * @param[in] devices; mask of devices to be used for the given strategy.
+ *
+ * @return true if the devices were set correclty for this strategy, false otherwise.
+ */
+ virtual bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
+ audio_devices_t devices) = 0;
+
+ virtual product_strategy_t getProductStrategyByName(const std::string &address) = 0;
+
protected:
virtual ~AudioPolicyPluginInterface() {}
};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
index 7631976..060830b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
@@ -9,7 +9,7 @@
LOCAL_PATH := $(call my-dir)
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable no-output_configurable no-input_configurable))
PFW_CORE := external/parameter-framework
#@TODO: upstream new domain generator
@@ -20,116 +20,79 @@
TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
+endif
+
##################################################################
# CONFIGURATION FILES
##################################################################
######### Policy PFW top level file #########
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
include $(CLEAR_VARS)
LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework
LOCAL_SRC_FILES := $(LOCAL_MODULE).in
+# external/parameter-framework prevents from using debug interface
AUDIO_PATTERN = @TUNING_ALLOWED@
-ifeq ($(TARGET_BUILD_VARIANT),user)
+#ifeq ($(TARGET_BUILD_VARIANT),user)
AUDIO_VALUE = false
-else
-AUDIO_VALUE = true
-endif
+#else
+#AUDIO_VALUE = true
+#endif
-LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(TARGET_OUT_VENDOR_ETC)/$(LOCAL_MODULE_RELATIVE_PATH)/$(LOCAL_MODULE)
include $(BUILD_PREBUILT)
-
-########## Policy PFW Structures #########
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicyClass.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_REQUIRED_MODULES := \
- PolicySubsystem-CommonTypes.xml \
- libpolicy-subsystem
-
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
+########## Policy PFW Common Structures #########
include $(CLEAR_VARS)
LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/Structure/Policy
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
include $(BUILD_PREBUILT)
-######### Policy PFW Settings #########
include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE := PolicyClass.xml
+LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_REQUIRED_MODULES := \
- policy_criteria.xml \
- policy_criterion_types.xml \
- PolicySubsystem.xml \
- PolicyClass.xml \
- ParameterFrameworkConfigurationPolicy.xml
-
-ifeq ($(pfw_rebuild_settings),true)
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
- $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
- $(LOCAL_PATH)/Settings/volumes.pfw
-
-LOCAL_ADDITIONAL_DEPENDENCIES := \
- $(PFW_EDD_FILES)
-
-
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
-
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-include $(BUILD_PFW_SETTINGS)
-else
-# Use the existing file
-LOCAL_SRC_FILES := Settings/$(LOCAL_MODULE_STEM)
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
include $(BUILD_PREBUILT)
-endif # pfw_rebuild_settings
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 0)
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
+
+########## Policy PFW Example Structures #########
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.common
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+ PolicySubsystem-CommonTypes.xml \
+ PolicySubsystem-Volume.xml \
+ libpolicy-subsystem \
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
######### Policy PFW Settings - No Output #########
-ifeq (0, 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
include $(CLEAR_VARS)
LOCAL_MODULE := parameter-framework.policy.no-output
@@ -138,26 +101,24 @@
LOCAL_VENDOR_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
LOCAL_REQUIRED_MODULES := \
- policy_criteria.xml \
- policy_criterion_types.xml \
- PolicySubsystem.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml \
+ PolicySubsystem.xml.common \
PolicyClass.xml \
ParameterFrameworkConfigurationPolicy.xml
PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
PFW_EDD_FILES := \
$(LOCAL_PATH)/SettingsNoOutput/device_for_strategies.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
$(LOCAL_PATH)/Settings/device_for_input_source.pfw \
$(LOCAL_PATH)/Settings/volumes.pfw
include $(BUILD_PFW_SETTINGS)
-endif # ifeq (0, 1)
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
######### Policy PFW Settings - No Input #########
-ifeq (0, 1)
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
include $(CLEAR_VARS)
LOCAL_MODULE := parameter-framework.policy.no-input
@@ -166,36 +127,26 @@
LOCAL_VENDOR_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
LOCAL_REQUIRED_MODULES := \
- policy_criteria.xml \
- policy_criterion_types.xml \
- PolicySubsystem.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml \
+ PolicySubsystem.xml.common \
PolicyClass.xml \
ParameterFrameworkConfigurationPolicy.xml
PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/policy_criteria.xml
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
PFW_EDD_FILES := \
- $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
- $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
- $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
$(LOCAL_PATH)/SettingsNoInput/device_for_input_source.pfw \
$(LOCAL_PATH)/Settings/volumes.pfw
include $(BUILD_PFW_SETTINGS)
-endif # ifeq (1, 0)
-
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
#######################################################################
# Recursive call sub-folder Android.mk
#######################################################################
include $(call all-makefiles-under,$(LOCAL_PATH))
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
new file mode 100644
index 0000000..ea4a58f
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
@@ -0,0 +1,86 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
+
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.car
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+ ProductStrategies.xml.car \
+ PolicySubsystem-Volume.xml \
+ PolicySubsystem-CommonTypes.xml \
+ libpolicy-subsystem
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ProductStrategies.xml.car
+LOCAL_MODULE_STEM := ProductStrategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.car
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+
+PFW_EDD_FILES := \
+ $(LOCAL_PATH)/Settings/device_for_product_strategies.pfw \
+ $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
+ $(LOCAL_PATH)/../Settings/volumes.pfw
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+ $(PFW_EDD_FILES)
+
+LOCAL_REQUIRED_MODULES := \
+ PolicySubsystem.xml.car \
+ PolicyClass.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml \
+ ParameterFrameworkConfigurationPolicy.xml
+
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+include $(BUILD_PFW_SETTINGS)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw
new file mode 100644
index 0000000..196d82c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Settings/device_for_product_strategies.pfw
@@ -0,0 +1,717 @@
+supDomain: DeviceForProductStrategies
+ supDomain: OemTrafficAnouncement
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/oem_traffic_anouncement/device_address = BUS08_OEM1
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+ component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+ bus = 1
+
+ conf: Default
+ component: /Policy/policy/product_strategies/oem_traffic_anouncement/selected_output_devices/mask
+ bus = 0
+
+ supDomain: OemStrategy1
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/oem_strategy_1/device_address = BUS08_OEM1
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+ component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+ bus = 1
+
+ conf: Default
+ component: /Policy/policy/product_strategies/oem_strategy_1/selected_output_devices/mask
+ bus = 0
+
+
+
+ supDomain: OemStrategy2
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/oem_strategy_2/device_address = BUS08_OEM1
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS08_OEM1
+
+ component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+ bus = 1
+
+ conf: Default
+ component: /Policy/policy/product_strategies/oem_strategy_2/selected_output_devices/mask
+ bus = 0
+
+
+
+ supDomain: Radio
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/radio/device_address = BUS09_OEM2
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS09_OEM2
+
+ component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/radio/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+ supDomain: ExtAudioSource
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/ext_audio_source/device_address = BUS09_OEM2
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS09_OEM2
+
+ component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+ bus = 1
+
+ conf: Default
+ component: /Policy/policy/product_strategies/ext_audio_source/selected_output_devices/mask
+ bus = 0
+
+
+
+ supDomain: VoiceCommand
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/voice_command/device_address = BUS02_VOICE_COMMAND
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS02_VOICE_COMMAND
+
+ component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/voice_command/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: SafetyAlert
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/safety_alert/device_address = BUS00_MEDIA
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS00_MEDIA
+
+ component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/safety_alert/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: Music
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/music/device_address = BUS00_MEDIA
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS00_MEDIA
+
+ component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/music/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+
+ supDomain: NavGuidance
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/nav_guidance/device_address = BUS01_NAV_GUIDANCE
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS01_NAV_GUIDANCE
+
+ component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/nav_guidance/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: VoiceCall
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/voice_call/device_address = BUS04_CALL
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS04_CALL
+
+ component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/voice_call/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: Alarm
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/alarm/device_address = BUS05_ALARM
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS05_ALARM
+
+ component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/alarm/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: Ring
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/ring/device_address = BUS03_CALL_RING
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS03_CALL_RING
+
+ component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/ring/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: Notification
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/notification/device_address = BUS06_NOTIFICATION
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS06_NOTIFICATION
+
+ component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/notification/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
+ supDomain: System
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+ earpiece = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/system/device_address = BUS07_SYSTEM_SOUND
+
+ domain: SelectedDevice
+ conf: Bus
+ AvailableOutputDevices Includes Bus
+ AvailableOutputDevicesAddresses Includes BUS07_SYSTEM_SOUND
+
+ component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+ speaker = 0
+ bus = 1
+
+ conf: Speaker
+ AvailableOutputDevices Includes Speaker
+ component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+ speaker = 1
+ bus = 0
+
+ conf: Default
+ component: /Policy/policy/product_strategies/system/selected_output_devices/mask
+ speaker = 0
+ bus = 0
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml
new file mode 100644
index 0000000..b55ce2c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/PolicySubsystem.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xi="http://www.w3.org/2001/XInclude"
+ xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
+ Name="policy" Type="Policy">
+
+ <ComponentLibrary>
+ <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+ <!-- Common Types defintion -->
+ <xi:include href="PolicySubsystem-CommonTypes.xml"/>
+ <xi:include href="ProductStrategies.xml"/>
+
+
+ <!--#################### GLOBAL COMPONENTS END ####################-->
+
+ <!--#################### STREAM BEGIN ####################-->
+
+ <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
+ <Component Name="voice_call" Type="Stream" Mapping="Name:AUDIO_STREAM_VOICE_CALL"/>
+ <Component Name="system" Type="Stream" Mapping="Name:AUDIO_STREAM_SYSTEM"/>
+ <Component Name="ring" Type="Stream" Mapping="Name:AUDIO_STREAM_RING"/>
+ <Component Name="music" Type="Stream" Mapping="Name:AUDIO_STREAM_MUSIC"/>
+ <Component Name="alarm" Type="Stream" Mapping="Name:AUDIO_STREAM_ALARM"/>
+ <Component Name="notification" Type="Stream" Mapping="Name:AUDIO_STREAM_NOTIFICATION"/>
+ <Component Name="bluetooth_sco" Type="Stream" Mapping="Name:AUDIO_STREAM_BLUETOOTH_SCO"/>
+ <Component Name="enforced_audible" Type="Stream" Mapping="Name:AUDIO_STREAM_ENFORCED_AUDIBLE"
+ Description="Sounds that cannot be muted by user and must be routed to speaker"/>
+ <Component Name="dtmf" Type="Stream" Mapping="Name:AUDIO_STREAM_DTMF"/>
+ <Component Name="tts" Type="Stream" Mapping="Name:AUDIO_STREAM_TTS"
+ Description="Transmitted Through Speaker. Plays over speaker only, silent on other devices"/>
+ <Component Name="accessibility" Type="Stream" Mapping="Name:AUDIO_STREAM_ACCESSIBILITY"
+ Description="For accessibility talk back prompts"/>
+ <Component Name="rerouting" Type="Stream" Mapping="Name:AUDIO_STREAM_REROUTING"
+ Description="For dynamic policy output mixes"/>
+ <Component Name="patch" Type="Stream" Mapping="Name:AUDIO_STREAM_PATCH"
+ Description="For internal audio flinger tracks. Fixed volume"/>
+ </ComponentType>
+
+ <!--#################### STREAM END ####################-->
+
+ <!--#################### INPUT SOURCE BEGIN ####################-->
+
+ <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
+ identifier mapping must match the value of the enum">
+ <Component Name="default" Type="InputSource" Mapping="Name:AUDIO_SOURCE_DEFAULT"/>
+ <Component Name="mic" Type="InputSource" Mapping="Name:AUDIO_SOURCE_MIC"/>
+ <Component Name="voice_uplink" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_UPLINK"/>
+ <Component Name="voice_downlink" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_DOWNLINK"/>
+ <Component Name="voice_call" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_CALL"/>
+ <Component Name="camcorder" Type="InputSource" Mapping="Name:AUDIO_SOURCE_CAMCORDER"/>
+ <Component Name="voice_recognition" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_RECOGNITION"/>
+ <Component Name="voice_communication" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+ <Component Name="remote_submix" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_REMOTE_SUBMIX"/>
+ <Component Name="unprocessed" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_UNPROCESSED"/>
+ <Component Name="fm_tuner" Type="InputSource" Mapping="Name:AUDIO_SOURCE_FM_TUNER"/>
+ <Component Name="hotword" Type="InputSource" Mapping="Name:AUDIO_SOURCE_HOTWORD"/>
+ </ComponentType>
+
+ <!--#################### INPUT SOURCE END ####################-->
+ </ComponentLibrary>
+
+ <InstanceDefinition>
+ <Component Name="streams" Type="Streams"/>
+ <Component Name="input_sources" Type="InputSources"/>
+ <Component Name="product_strategies" Type="ProductStrategies"/>
+ </InstanceDefinition>
+</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml
new file mode 100644
index 0000000..53bba03
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Structure/ProductStrategies.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ComponentTypeSet xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xi="http://www.w3.org/2001/XInclude"
+ xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+
+ <ComponentType Name="ProductStrategies" Description="">
+ <Component Name="oem_traffic_anouncement" Type="ProductStrategy"/>
+ <Component Name="oem_strategy_1" Type="ProductStrategy"/>
+ <Component Name="oem_strategy_2" Type="ProductStrategy"/>
+
+ <Component Name="radio" Type="ProductStrategy"/>
+ <Component Name="ext_audio_source" Type="ProductStrategy"/>
+ <Component Name="voice_command" Type="ProductStrategy"/>
+ <Component Name="safety_alert" Type="ProductStrategy"/>
+
+ <Component Name="music" Type="ProductStrategy"/>
+ <Component Name="nav_guidance" Type="ProductStrategy"/>
+ <Component Name="voice_call" Type="ProductStrategy"/>
+ <Component Name="alarm" Type="ProductStrategy"/>
+ <Component Name="ring" Type="ProductStrategy"/>
+ <Component Name="notification" Type="ProductStrategy"/>
+ <Component Name="system" Type="ProductStrategy"/>
+ </ComponentType>
+
+</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
new file mode 100644
index 0000000..e9d67e9
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
@@ -0,0 +1,94 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
+
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
+BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml.phone
+LOCAL_MODULE_STEM := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_REQUIRED_MODULES := \
+ PolicySubsystem-CommonTypes.xml \
+ ProductStrategies.xml.phone \
+ PolicySubsystem-Volume.xml \
+ libpolicy-subsystem \
+
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ProductStrategies.xml.phone
+LOCAL_MODULE_STEM := ProductStrategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy.phone
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+
+PFW_EDD_FILES := \
+ $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
+ $(LOCAL_PATH)/../Settings/volumes.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_media.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_accessibility.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_dtmf.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_enforced_audible.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_phone.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification_respectful.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_rerouting.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_transmitted_through_speaker.pfw \
+ $(LOCAL_PATH)/Settings/device_for_product_strategy_unknown.pfw
+
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+ $(PFW_EDD_FILES)
+
+LOCAL_REQUIRED_MODULES := \
+ PolicySubsystem.xml.phone \
+ PolicyClass.xml \
+ audio_policy_engine_criteria.xml \
+ audio_policy_engine_criterion_types.xml \
+ ParameterFrameworkConfigurationPolicy.xml
+
+PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
+PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
+
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+include $(BUILD_PFW_SETTINGS)
+
+endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
similarity index 87%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
index eb11980..53e93de 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_accessibility.pfw
@@ -1,4 +1,4 @@
-supDomain: DeviceForStrategy
+supDomain: DeviceForProductStrategy
supDomain: Accessibility
#
@@ -9,7 +9,7 @@
#
domain: UnreachableDevices
conf: Calibration
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
hdmi_arc = 0
spdif = 0
aux_line = 0
@@ -18,6 +18,8 @@
telephony_tx = 0
ip = 0
bus = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
domain: Device
@@ -28,8 +30,9 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes RemoteSubmix
+ AvailableOutputDevicesAddresses Includes 0
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 1
earpiece = 0
bluetooth_a2dp = 0
@@ -57,7 +60,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dp
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 1
@@ -85,7 +88,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpHeadphones
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -113,7 +116,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes BluetoothA2dpSpeaker
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -141,7 +144,7 @@
ForceUseForMedia Is ForceSpeaker
AvailableOutputDevices Includes Speaker
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -170,7 +173,7 @@
AvailableOutputDevices Includes BluetoothScoCarkit
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -199,7 +202,7 @@
AvailableOutputDevices Includes BluetoothScoHeadset
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -228,7 +231,7 @@
AvailableOutputDevices Includes BluetoothSco
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -265,7 +268,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes WiredHeadphone
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -301,7 +304,7 @@
#
AvailableOutputDevices Includes Line
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -338,7 +341,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes WiredHeadset
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -378,7 +381,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbDevice
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -409,7 +412,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes UsbAccessory
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -439,7 +442,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes DgtlDockHeadset
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -469,7 +472,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes Hdmi
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -501,7 +504,7 @@
AvailableOutputDevices Includes AnlgDockHeadset
ForceUseForDock Is ForceAnalogDock
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -530,7 +533,7 @@
AvailableOutputDevices Includes Earpiece
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 1
bluetooth_a2dp = 0
@@ -552,7 +555,7 @@
conf: Speaker
AvailableOutputDevices Includes Speaker
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
@@ -572,7 +575,7 @@
hdmi = 0
conf: Default
- component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/accessibility/selected_output_devices/mask
remote_submix = 0
earpiece = 0
bluetooth_a2dp = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
similarity index 85%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
index 883c741..b8426c6 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_dtmf.pfw
@@ -1,16 +1,17 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategies
supDomain: Dtmf
-
domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ conf: calibration
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
fm = 0
speaker_safe = 0
bluetooth_sco_carkit = 0
ip = 0
bus = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
+ /Policy/policy/product_strategies/dtmf/device_address =
domain: Device2
conf: RemoteSubmix
@@ -20,8 +21,9 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes RemoteSubmix
+ AvailableOutputDevicesAddresses Includes 0
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 1
earpiece = 0
wired_headset = 0
@@ -49,7 +51,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dp
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -77,7 +79,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpHeadphones
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -105,7 +107,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpSpeaker
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -134,7 +136,7 @@
ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
AvailableOutputDevices Includes Speaker
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -163,7 +165,7 @@
AvailableOutputDevices Includes BluetoothScoHeadset
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -192,7 +194,7 @@
AvailableOutputDevices Includes BluetoothSco
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -229,7 +231,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes WiredHeadphone
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -268,7 +270,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes Line
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -305,7 +307,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes WiredHeadset
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 1
@@ -345,7 +347,7 @@
ForceUseForCommunication Is ForceSpeaker
AvailableOutputDevices Includes UsbDevice
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -376,7 +378,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes UsbAccessory
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -406,7 +408,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes DgtlDockHeadset
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -436,7 +438,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes Hdmi
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -468,7 +470,7 @@
ForceUseForDock Is ForceAnalogDock
AvailableOutputDevices Includes AnlgDockHeadset
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -497,7 +499,7 @@
AvailableOutputDevices Includes Earpiece
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 1
wired_headset = 0
@@ -535,7 +537,7 @@
ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
AvailableOutputDevices Includes Speaker
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -555,7 +557,7 @@
speaker = 1
conf: Default
- component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/dtmf/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -585,10 +587,10 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes HdmiArc
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
conf: NotSelected
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
domain: Spdif
#
@@ -601,10 +603,10 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes Spdif
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 1
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/spdif = 1
conf: NotSelected
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 0
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/spdif = 0
domain: AuxLine
#
@@ -617,7 +619,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes AuxLine
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 1
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/aux_line = 1
conf: NotSelected
- /Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 0
+ /Policy/policy/product_strategies/dtmf/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
similarity index 83%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
index f504631..2daa9ac 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_enforced_audible.pfw
@@ -1,10 +1,10 @@
-supDomain: DeviceForStrategy
+supDomain: DeviceForProductStrategy
supDomain: EnforcedAudible
domain: UnreachableDevices
conf: Calibration
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
# no enforced_audible on remote submix (e.g. WFD)
hdmi_arc = 0
spdif = 0
@@ -13,6 +13,8 @@
ip = 0
bus = 0
fm = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
domain: Speaker
@@ -51,18 +53,19 @@
AvailableOutputDevices Excludes AnlgDockHeadset
ForceUseForDock IsNot ForceAnalogDock
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
speaker = 1
conf: NotSelected
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
speaker = 0
domain: Device2
conf: RemoteSubmix
AvailableOutputDevices Includes RemoteSubmix
+ AvailableOutputDevicesAddresses Includes 0
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 1
earpiece = 0
wired_headset = 0
@@ -85,7 +88,7 @@
AvailableOutputDevices Includes BluetoothA2dp
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -108,7 +111,7 @@
AvailableOutputDevices Includes BluetoothA2dpHeadphones
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -131,7 +134,7 @@
AvailableOutputDevices Includes BluetoothA2dpSpeaker
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -154,7 +157,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes WiredHeadphone
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -177,7 +180,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes Line
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -200,7 +203,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes WiredHeadset
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 1
@@ -223,7 +226,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbAccessory
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -246,7 +249,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbDevice
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -269,7 +272,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes DgtlDockHeadset
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -292,7 +295,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes Hdmi
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -316,7 +319,7 @@
ForceUseForDock Is ForceAnalogDock
AvailableOutputDevices Includes AnlgDockHeadset
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
@@ -336,7 +339,7 @@
line = 0
conf: NoDevice
- component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/enforced_audible/selected_output_devices/mask
remote_submix = 0
earpiece = 0
wired_headset = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
similarity index 77%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
index bdb6ae0..d6d355c 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_media.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_media.pfw
@@ -1,10 +1,8 @@
-domainGroup: DeviceForStrategy
-
- domainGroup: Media
-
+supDomain: DeviceForProductStrategy
+ supDomain: Media
domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ conf: calibration
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
fm = 0
speaker_safe = 0
earpiece = 0
@@ -13,14 +11,18 @@
bluetooth_sco_carkit = 0
telephony_tx = 0
ip = 0
+ proxy = 0
+ usb_headset = 0
bus = 0
stub = 0
+ /Policy/policy/product_strategies/media/device_address =
domain: Device2
conf: RemoteSubmix
AvailableOutputDevices Includes RemoteSubmix
+ AvailableOutputDevicesAddresses Includes 0
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -40,7 +42,7 @@
ForceUseForCommunication IsNot ForceBtSco
AvailableOutputDevices Includes BluetoothA2dp
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -60,7 +62,7 @@
ForceUseForCommunication IsNot ForceBtSco
AvailableOutputDevices Includes BluetoothA2dpHeadphones
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -80,7 +82,7 @@
ForceUseForCommunication IsNot ForceBtSco
AvailableOutputDevices Includes BluetoothA2dpSpeaker
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -103,7 +105,7 @@
#
ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 1
hdmi = 0
dgtl_dock_headset = 0
@@ -121,7 +123,7 @@
conf: WiredHeadphone
AvailableOutputDevices Includes WiredHeadphone
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -139,7 +141,7 @@
conf: Line
AvailableOutputDevices Includes Line
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -157,7 +159,7 @@
conf: WiredHeadset
AvailableOutputDevices Includes WiredHeadset
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -175,7 +177,7 @@
conf: UsbAccessory
AvailableOutputDevices Includes UsbAccessory
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -193,7 +195,7 @@
conf: UsbDevice
AvailableOutputDevices Includes UsbDevice
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -211,7 +213,7 @@
conf: DgtlDockHeadset
AvailableOutputDevices Includes DgtlDockHeadset
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 1
@@ -229,7 +231,7 @@
conf: AuxDigital
AvailableOutputDevices Includes Hdmi
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 1
dgtl_dock_headset = 0
@@ -248,7 +250,7 @@
AvailableOutputDevices Includes AnlgDockHeadset
ForceUseForDock Is ForceAnalogDock
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -271,7 +273,7 @@
ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
ForceUseForCommunication IsNot ForceBtSco
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 1
hdmi = 0
dgtl_dock_headset = 0
@@ -287,7 +289,7 @@
line = 0
conf: Default
- component: /Policy/policy/strategies/media/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/media/selected_output_devices/mask
speaker = 0
hdmi = 0
dgtl_dock_headset = 0
@@ -310,10 +312,10 @@
conf: Selected
AvailableOutputDevices Includes HdmiArc
- /Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/hdmi_arc = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/hdmi_arc = 0
domain: Spdif
#
@@ -323,16 +325,16 @@
conf: Selected
AvailableOutputDevices Includes Spdif
- /Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/spdif = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/spdif = 0
domain: AuxLine
conf: Selected
AvailableOutputDevices Includes AuxLine
- /Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/aux_line = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
+ /Policy/policy/product_strategies/media/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
similarity index 86%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
index d371ad9..5693d4e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_phone.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_phone.pfw
@@ -1,10 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
supDomain: Phone
-
domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ conf: calibration
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
# no sonification on remote submix (e.g. WFD)
remote_submix = 0
hdmi_arc = 0
@@ -12,16 +10,18 @@
spdif = 0
fm = 0
speaker_safe = 0
- ip = 0
bus = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
+ /Policy/policy/product_strategies/phone/device_address =
domain: Device
conf: ScoCarkit
AvailableOutputDevices Includes BluetoothScoCarkit
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -44,7 +44,7 @@
AvailableOutputDevices Includes BluetoothScoHeadset
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -67,7 +67,7 @@
AvailableOutputDevices Includes BluetoothSco
ForceUseForCommunication Is ForceBtSco
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -97,7 +97,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
ForceUseForCommunication Is ForceNone
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -127,7 +127,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
ForceUseForCommunication Is ForceNone
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -157,7 +157,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
ForceUseForCommunication Is ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -184,7 +184,7 @@
AvailableOutputDevices Includes WiredHeadphone
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 1
@@ -211,7 +211,7 @@
AvailableOutputDevices Includes WiredHeadset
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 1
wired_headphone = 0
@@ -234,7 +234,7 @@
AvailableOutputDevices Includes Line
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -272,7 +272,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -300,7 +300,7 @@
TelephonyMode IsNot InCommunication
TelephonyMode IsNot InCall
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -328,7 +328,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -356,7 +356,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -384,7 +384,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -411,7 +411,7 @@
AvailableOutputDevices Includes Earpiece
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 1
wired_headset = 0
wired_headphone = 0
@@ -438,7 +438,7 @@
AvailableOutputDevices Includes Speaker
ForceUseForCommunication Is ForceSpeaker
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -461,7 +461,7 @@
#
# Fallback on default output device which can be speaker for example
#
- component: /Policy/policy/strategies/phone/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/phone/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw
new file mode 100644
index 0000000..c064c18
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_rerouting.pfw
@@ -0,0 +1,43 @@
+supDomain: DeviceForProductStrategy
+ supDomain: Rerouting
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ stub = 0
+ /Policy/policy/product_strategies/rerouting/device_address =
+
+ domain: SelectedDevice
+ conf: Bus
+ component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+ bus = 1
+
+ conf: Default
+ component: /Policy/policy/product_strategies/rerouting/selected_output_devices/mask
+ bus = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
similarity index 86%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
index 70740d1..c4edeeb 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification.pfw
@@ -1,11 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
supDomain: Sonification
-
domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
- # no sonification on remote submix (e.g. WFD)
+ conf: calibration
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
remote_submix = 0
hdmi_arc = 0
spdif = 0
@@ -16,9 +13,12 @@
# Sonification follows phone strategy if in call but HDMI is not reachable
#
hdmi = 0
- ip = 0
bus = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
+ /Policy/policy/product_strategies/sonification/device_address =
domain: Speaker
@@ -41,11 +41,11 @@
TelephonyMode Is InCommunication
AvailableOutputDevices Excludes Line
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
speaker = 1
conf: NotSelected
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
speaker = 0
domain: Device2
@@ -59,7 +59,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -85,7 +85,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -111,7 +111,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceNoBtA2dp
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -138,7 +138,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothScoCarkit
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -165,7 +165,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothScoHeadset
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -192,7 +192,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothSco
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -228,7 +228,7 @@
TelephonyMode Is InCommunication
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 1
@@ -266,7 +266,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -302,7 +302,7 @@
TelephonyMode Is InCommunication
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 1
wired_headphone = 0
@@ -339,7 +339,7 @@
TelephonyMode Is InCommunication
ForceUseForCommunication IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -368,7 +368,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -397,7 +397,7 @@
TelephonyMode IsNot InCommunication
ForceUseForMedia IsNot ForceSpeaker
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -427,7 +427,7 @@
ForceUseForMedia IsNot ForceSpeaker
ForceUseForDock Is ForceAnalogDock
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
@@ -454,7 +454,7 @@
ForceUseForCommunication IsNot ForceSpeaker
AvailableOutputDevices Includes Earpiece
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 1
wired_headset = 0
wired_headphone = 0
@@ -472,7 +472,7 @@
line = 0
conf: None
- component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification/selected_output_devices/mask
earpiece = 0
wired_headset = 0
wired_headphone = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
similarity index 85%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
index b673c4f..0a3dd5f 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_sonification_respectful.pfw
@@ -1,6 +1,5 @@
-domainGroup: DeviceForStrategy
-
- domainGroup: SonificationRespectful
+supDomain: DeviceForProductStrategy
+ supDomain: SonificationRespectful
#
# Sonificiation Respectful follows:
# - If in call: Strategy sonification (that follows phone strategy in call also...)
@@ -12,10 +11,9 @@
# selected.
#
# Case of stream active handled programmatically
-
domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ conf: calibration
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
remote_submix = 0
hdmi_arc = 0
aux_line = 0
@@ -23,8 +21,10 @@
fm = 0
telephony_tx = 0
ip = 0
- bus = 0
+ proxy = 0
+ usb_headset = 0
stub = 0
+ /Policy/policy/product_strategies/sonification_respectful/device_address =
domain: Speakers
@@ -38,7 +38,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
speaker_safe = 1
speaker = 0
@@ -61,12 +61,12 @@
TelephonyMode Is InCommunication
AvailableOutputDevices Excludes Line
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
speaker_safe = 0
speaker = 1
conf: None
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
speaker_safe = 0
speaker = 0
@@ -81,7 +81,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dp
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -108,7 +108,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpHeadphones
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -135,7 +135,7 @@
ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpSpeaker
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -162,7 +162,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothScoCarkit
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -189,7 +189,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothScoHeadset
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 1
@@ -216,7 +216,7 @@
ForceUseForCommunication Is ForceBtSco
AvailableOutputDevices Includes BluetoothSco
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 1
bluetooth_sco_headset = 0
@@ -253,7 +253,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes WiredHeadphone
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -294,7 +294,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes Line
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -331,7 +331,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes WiredHeadset
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -369,7 +369,7 @@
AvailableOutputDevices Excludes UsbAccessory
AvailableOutputDevices Includes UsbDevice
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -399,7 +399,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbAccessory
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -429,7 +429,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes DgtlDockHeadset
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -459,7 +459,7 @@
ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes Hdmi
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -490,7 +490,7 @@
ForceUseForDock Is ForceAnalogDock
AvailableOutputDevices Includes AnlgDockHeadset
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -517,7 +517,7 @@
ForceUseForCommunication IsNot ForceSpeaker
AvailableOutputDevices Includes Earpiece
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/sonification_respectful/selected_output_devices/mask
earpiece = 1
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -534,20 +534,3 @@
usb_device = 0
hdmi = 0
- conf: None
- component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
- earpiece = 0
- bluetooth_sco = 0
- bluetooth_sco_headset = 0
- bluetooth_sco_carkit = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- bluetooth_a2dp = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
similarity index 61%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
index 9f9c211..3fc7670 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_transmitted_through_speaker.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_transmitted_through_speaker.pfw
@@ -1,9 +1,8 @@
-supDomain: DeviceForStrategy
-
+supDomain: DeviceForProductStrategy
supDomain: TransmittedThroughSpeaker
domain: UnreacheableDevices
conf: Calibration
- component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
remote_submix = 0
hdmi_arc = 0
spdif = 0
@@ -11,7 +10,7 @@
fm = 0
speaker_safe = 0
earpiece = 0
- wired_headset = 1
+ wired_headset = 0
wired_headphone = 0
bluetooth_sco = 0
bluetooth_sco_headset = 0
@@ -29,15 +28,16 @@
ip = 0
bus = 0
stub = 0
+ /Policy/policy/product_strategies/transmitted_through_speaker/device_address =
domain: Speaker
conf: Selected
AvailableOutputDevices Includes Speaker
- component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
speaker = 1
conf: NotSelected
- component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+ component: /Policy/policy/product_strategies/transmitted_through_speaker/selected_output_devices/mask
speaker = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw
new file mode 100644
index 0000000..c46cf56
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Settings/device_for_product_strategy_unknown.pfw
@@ -0,0 +1,36 @@
+supDomain: DeviceForProductStrategy
+ supDomain: Unknown
+ domain: UnreachableDevices
+ conf: calibration
+ component: /Policy/policy/product_strategies/unknown/selected_output_devices/mask
+ earpiece = 0
+ speaker = 0
+ wired_headset = 0
+ wired_headphone = 0
+ bluetooth_sco = 0
+ bluetooth_sco_headset = 0
+ bluetooth_sco_carkit = 0
+ bluetooth_a2dp = 0
+ bluetooth_a2dp_headphones = 0
+ bluetooth_a2dp_speaker = 0
+ hdmi = 0
+ angl_dock_headset = 0
+ dgtl_dock_headset = 0
+ usb_accessory = 0
+ usb_device = 0
+ remote_submix = 0
+ telephony_tx = 0
+ line = 0
+ hdmi_arc = 0
+ spdif = 0
+ fm = 0
+ aux_line = 0
+ speaker_safe = 0
+ ip = 0
+ proxy = 0
+ usb_headset = 0
+ bus = 0
+ stub = 0
+ /Policy/policy/product_strategies/unknown/device_address =
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml
new file mode 100644
index 0000000..b55ce2c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/PolicySubsystem.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xi="http://www.w3.org/2001/XInclude"
+ xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
+ Name="policy" Type="Policy">
+
+ <ComponentLibrary>
+ <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+ <!-- Common Types defintion -->
+ <xi:include href="PolicySubsystem-CommonTypes.xml"/>
+ <xi:include href="ProductStrategies.xml"/>
+
+
+ <!--#################### GLOBAL COMPONENTS END ####################-->
+
+ <!--#################### STREAM BEGIN ####################-->
+
+ <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
+ <Component Name="voice_call" Type="Stream" Mapping="Name:AUDIO_STREAM_VOICE_CALL"/>
+ <Component Name="system" Type="Stream" Mapping="Name:AUDIO_STREAM_SYSTEM"/>
+ <Component Name="ring" Type="Stream" Mapping="Name:AUDIO_STREAM_RING"/>
+ <Component Name="music" Type="Stream" Mapping="Name:AUDIO_STREAM_MUSIC"/>
+ <Component Name="alarm" Type="Stream" Mapping="Name:AUDIO_STREAM_ALARM"/>
+ <Component Name="notification" Type="Stream" Mapping="Name:AUDIO_STREAM_NOTIFICATION"/>
+ <Component Name="bluetooth_sco" Type="Stream" Mapping="Name:AUDIO_STREAM_BLUETOOTH_SCO"/>
+ <Component Name="enforced_audible" Type="Stream" Mapping="Name:AUDIO_STREAM_ENFORCED_AUDIBLE"
+ Description="Sounds that cannot be muted by user and must be routed to speaker"/>
+ <Component Name="dtmf" Type="Stream" Mapping="Name:AUDIO_STREAM_DTMF"/>
+ <Component Name="tts" Type="Stream" Mapping="Name:AUDIO_STREAM_TTS"
+ Description="Transmitted Through Speaker. Plays over speaker only, silent on other devices"/>
+ <Component Name="accessibility" Type="Stream" Mapping="Name:AUDIO_STREAM_ACCESSIBILITY"
+ Description="For accessibility talk back prompts"/>
+ <Component Name="rerouting" Type="Stream" Mapping="Name:AUDIO_STREAM_REROUTING"
+ Description="For dynamic policy output mixes"/>
+ <Component Name="patch" Type="Stream" Mapping="Name:AUDIO_STREAM_PATCH"
+ Description="For internal audio flinger tracks. Fixed volume"/>
+ </ComponentType>
+
+ <!--#################### STREAM END ####################-->
+
+ <!--#################### INPUT SOURCE BEGIN ####################-->
+
+ <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
+ identifier mapping must match the value of the enum">
+ <Component Name="default" Type="InputSource" Mapping="Name:AUDIO_SOURCE_DEFAULT"/>
+ <Component Name="mic" Type="InputSource" Mapping="Name:AUDIO_SOURCE_MIC"/>
+ <Component Name="voice_uplink" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_UPLINK"/>
+ <Component Name="voice_downlink" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_DOWNLINK"/>
+ <Component Name="voice_call" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_CALL"/>
+ <Component Name="camcorder" Type="InputSource" Mapping="Name:AUDIO_SOURCE_CAMCORDER"/>
+ <Component Name="voice_recognition" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_RECOGNITION"/>
+ <Component Name="voice_communication" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_COMMUNICATION"/>
+ <Component Name="remote_submix" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_REMOTE_SUBMIX"/>
+ <Component Name="unprocessed" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_UNPROCESSED"/>
+ <Component Name="fm_tuner" Type="InputSource" Mapping="Name:AUDIO_SOURCE_FM_TUNER"/>
+ <Component Name="hotword" Type="InputSource" Mapping="Name:AUDIO_SOURCE_HOTWORD"/>
+ </ComponentType>
+
+ <!--#################### INPUT SOURCE END ####################-->
+ </ComponentLibrary>
+
+ <InstanceDefinition>
+ <Component Name="streams" Type="Streams"/>
+ <Component Name="input_sources" Type="InputSources"/>
+ <Component Name="product_strategies" Type="ProductStrategies"/>
+ </InstanceDefinition>
+</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml
new file mode 100644
index 0000000..4cbb3da
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Structure/ProductStrategies.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<ComponentTypeSet xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xi="http://www.w3.org/2001/XInclude"
+ xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+
+ <ComponentType Name="ProductStrategies" Description="">
+ <Component Name="accessibility" Type="ProductStrategy"/>
+ <Component Name="enforced_audible" Type="ProductStrategy"/>
+ <Component Name="transmitted_through_speaker" Type="ProductStrategy"/>
+
+ <Component Name="media" Type="ProductStrategy"/>
+ <Component Name="phone" Type="ProductStrategy"/>
+ <Component Name="dtmf" Type="ProductStrategy"/>
+
+ <Component Name="sonification" Type="ProductStrategy"/>
+ <Component Name="sonification_respectful" Type="ProductStrategy"/>
+ <Component Name="rerouting" Type="ProductStrategy"/>
+ <Component Name="unknown" Type="ProductStrategy"/>
+ </ComponentType>
+
+</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
deleted file mode 100644
index 04e62f7..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_rerouting.pfw
+++ /dev/null
@@ -1,300 +0,0 @@
-domainGroup: DeviceForStrategy
-
- domainGroup: Rerouting
- #
- # Falls through media strategy
- #
- domain: UnreachableDevices
- conf: Calibration
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- hdmi_arc = 0
- spdif = 0
- aux_line = 0
- fm = 0
- speaker_safe = 0
- earpiece = 0
- bluetooth_sco = 0
- bluetooth_sco_headset = 0
- bluetooth_sco_carkit = 0
- telephony_tx = 0
- ip = 0
- bus = 0
- stub = 0
-
- domain: Device2
- conf: RemoteSubmix
- AvailableOutputDevices Includes RemoteSubmix
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 1
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: BluetoothA2dp
- ForceUseForMedia IsNot ForceNoBtA2dp
- AvailableOutputDevices Includes BluetoothA2dp
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 1
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: BluetoothA2dpHeadphone
- ForceUseForMedia IsNot ForceNoBtA2dp
- AvailableOutputDevices Includes BluetoothA2dpHeadphones
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 1
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: BluetoothA2dpSpeaker
- ForceUseForMedia IsNot ForceNoBtA2dp
- AvailableOutputDevices Includes BluetoothA2dpSpeaker
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 1
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: ForceSpeaker
- ForceUseForMedia Is ForceSpeaker
- AvailableOutputDevices Includes Speaker
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 1
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: WiredHeadphone
- AvailableOutputDevices Includes WiredHeadphone
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 1
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: Line
- AvailableOutputDevices Includes Line
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 1
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: WiredHeadset
- AvailableOutputDevices Includes WiredHeadset
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 1
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: UsbAccessory
- AvailableOutputDevices Includes UsbAccessory
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 1
- usb_device = 0
- hdmi = 0
-
- conf: UsbDevice
- AvailableOutputDevices Includes UsbDevice
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 1
- hdmi = 0
-
- conf: DgtlDockHeadset
- AvailableOutputDevices Includes DgtlDockHeadset
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 1
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: AuxDigital
- #
- # Rerouting is similar to media and sonification (exept here: sonification is not allowed on HDMI)
- #
- AvailableOutputDevices Includes Hdmi
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 1
-
- conf: AnlgDockHeadset
- AvailableOutputDevices Includes AnlgDockHeadset
- ForceUseForDock Is ForceAnalogDock
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 1
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: Speaker
- AvailableOutputDevices Includes Speaker
-
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 1
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
- conf: Default
- component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
- remote_submix = 0
- bluetooth_a2dp = 0
- bluetooth_a2dp_headphones = 0
- bluetooth_a2dp_speaker = 0
- speaker = 0
- wired_headset = 0
- wired_headphone = 0
- line = 0
- angl_dock_headset = 0
- dgtl_dock_headset = 0
- usb_accessory = 0
- usb_device = 0
- hdmi = 0
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
deleted file mode 100644
index 3940b9d..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_stream.pfw
+++ /dev/null
@@ -1,20 +0,0 @@
-domain: StrategyForStream
-
- conf: Calibration
- /Policy/policy/streams/voice_call/applicable_strategy/strategy = phone
- #
- # NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
- # while key clicks are played produces a poor result
- #
- /Policy/policy/streams/system/applicable_strategy/strategy = media
- /Policy/policy/streams/ring/applicable_strategy/strategy = sonification
- /Policy/policy/streams/music/applicable_strategy/strategy = media
- /Policy/policy/streams/alarm/applicable_strategy/strategy = sonification
- /Policy/policy/streams/notification/applicable_strategy/strategy = sonification_respectful
- /Policy/policy/streams/bluetooth_sco/applicable_strategy/strategy = phone
- /Policy/policy/streams/enforced_audible/applicable_strategy/strategy = enforced_audible
- /Policy/policy/streams/dtmf/applicable_strategy/strategy = dtmf
- /Policy/policy/streams/tts/applicable_strategy/strategy = transmitted_through_speaker
- /Policy/policy/streams/accessibility/applicable_strategy/strategy = accessibility
- /Policy/policy/streams/rerouting/applicable_strategy/strategy = rerouting
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
index daa7f68..56c5ed3 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem-CommonTypes.xml
@@ -133,30 +133,6 @@
</BitParameterBlock>
</ComponentType>
- <!-- Routing Strategy definition as an enumeration. Numerical value must match the value
- of the routing strategy in policy header file. -->
- <ComponentType Name="Strategy">
- <EnumParameter Name="strategy" Size="32">
- <ValuePair Literal="media" Numerical="0"/>
- <ValuePair Literal="phone" Numerical="1"/>
- <ValuePair Literal="sonification" Numerical="2"/>
- <ValuePair Literal="sonification_respectful" Numerical="3"/>
- <ValuePair Literal="dtmf" Numerical="4"/>
- <ValuePair Literal="enforced_audible" Numerical="5"/>
- <ValuePair Literal="transmitted_through_speaker" Numerical="6"/>
- <ValuePair Literal="accessibility" Numerical="7"/>
- <ValuePair Literal="rerouting" Numerical="8"/>
- </EnumParameter>
- </ComponentType>
-
- <!--#################### STRATEGY COMMON TYPES BEGIN ####################-->
-
- <ComponentType Name="StrategyConfig" Mapping="Strategy">
- <Component Name="selected_output_devices" Type="OutputDevicesMask"/>
- </ComponentType>
-
- <!--#################### STRATEGY COMMON TYPES END ####################-->
-
<!--#################### STREAM COMMON TYPES BEGIN ####################-->
<ComponentType Name="VolumeProfileType">
@@ -178,21 +154,12 @@
</ComponentType>
<ComponentType Name="Stream" Mapping="Stream">
- <Component Name="applicable_strategy" Type="Strategy"/>
<Component Name="applicable_volume_profile" Type="VolumeProfileType"
Description="Volume profile followed by a given stream type."/>
</ComponentType>
<!--#################### STREAM COMMON TYPES END ####################-->
- <!--#################### USAGE COMMON TYPES BEGIN ####################-->
-
- <ComponentType Name="Usage">
- <Component Name="applicable_strategy" Type="Strategy" Mapping="Usage"/>
- </ComponentType>
-
- <!--#################### USAGE COMMON TYPES END ####################-->
-
<!--#################### INPUT SOURCE COMMON TYPES BEGIN ####################-->
<ComponentType Name="InputSource">
@@ -202,4 +169,14 @@
<!--#################### INPUT SOURCE COMMON TYPES END ####################-->
+ <!--#################### PRODUCT STRATEGY COMMON TYPES BEGIN ####################-->
+
+ <ComponentType Name="ProductStrategy" Mapping="ProductStrategy">
+ <Component Name="selected_output_devices" Type="OutputDevicesMask"/>
+ <StringParameter Name="device_address" MaxLength="256"
+ Description="if any, device address associated"/>
+ </ComponentType>
+
+ <!--#################### PRODUCT STRATEGY COMMON TYPES END ####################-->
+
</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index 45d1e8a..a4e7537 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -11,22 +11,6 @@
<!--#################### GLOBAL COMPONENTS END ####################-->
- <!--#################### STRATEGY BEGIN ####################-->
-
- <ComponentType Name="Strategies">
- <Component Name="media" Type="StrategyConfig" Mapping="Name:STRATEGY_MEDIA"/>
- <Component Name="phone" Type="StrategyConfig" Mapping="Name:STRATEGY_PHONE"/>
- <Component Name="sonification" Type="StrategyConfig" Mapping="Name:STRATEGY_SONIFICATION"/>
- <Component Name="sonification_respectful" Type="StrategyConfig" Mapping="Name:STRATEGY_SONIFICATION_RESPECTFUL"/>
- <Component Name="dtmf" Type="StrategyConfig" Mapping="Name:STRATEGY_DTMF"/>
- <Component Name="enforced_audible" Type="StrategyConfig" Mapping="Name:STRATEGY_ENFORCED_AUDIBLE"/>
- <Component Name="transmitted_through_speaker" Type="StrategyConfig" Mapping="Name:STRATEGY_TRANSMITTED_THROUGH_SPEAKER"/>
- <Component Name="accessibility" Type="StrategyConfig" Mapping="Name:STRATEGY_ACCESSIBILITY"/>
- <Component Name="rerouting" Type="StrategyConfig" Mapping=",Name:STRATEGY_REROUTING"/>
- </ComponentType>
-
- <!--#################### STRATEGY END ####################-->
-
<!--#################### STREAM BEGIN ####################-->
<ComponentType Name="Streams" Description="associated to audio_stream_type_t definition">
@@ -52,40 +36,6 @@
<!--#################### STREAM END ####################-->
- <!--#################### USAGE BEGIN ####################-->
-
- <ComponentType Name="Usages" Description="associated to audio_usage_t definition">
- <Component Name="unknown" Type="Usage" Mapping="Name:AUDIO_USAGE_UNKNOWN"/>
- <Component Name="media" Type="Usage" Mapping="Name:AUDIO_USAGE_MEDIA"/>
- <Component Name="voice_communication" Type="Usage"
- Mapping="Name:AUDIO_USAGE_VOICE_COMMUNICATION"/>
- <Component Name="voice_communication_signalling" Type="Usage"
- Mapping="Name:AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/>
- <Component Name="alarm" Type="Usage" Mapping="Name:AUDIO_USAGE_ALARM"/>
- <Component Name="notification" Type="Usage" Mapping="Name:AUDIO_USAGE_NOTIFICATION"/>
- <Component Name="notification_telephony_ringtone" Type="Usage"
- Mapping="Name:AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
- <Component Name="notification_communication_request" Type="Usage"
- Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/>
- <Component Name="notification_communication_instant" Type="Usage"
- Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/>
- <Component Name="notification_communication_delayed" Type="Usage"
- Mapping="Name:AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/>
- <Component Name="notification_event" Type="Usage"
- Mapping="Name:AUDIO_USAGE_NOTIFICATION_EVENT"/>
- <Component Name="assistance_accessibility" Type="Usage"
- Mapping="Name:AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/>
- <Component Name="assistance_navigation_guidance" Type="Usage"
- Mapping="Name:AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
- <Component Name="assistance_sonification" Type="Usage"
- Mapping="Name:AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
- <Component Name="game" Type="Usage" Mapping="Name:AUDIO_USAGE_GAME"/>
- <Component Name="virtual_source" Type="Usage" Mapping="Name:AUDIO_USAGE_VIRTUAL_SOURCE"/>
- <Component Name="assistant" Type="Usage" Mapping="Name:AUDIO_USAGE_ASSISTANT"/>
- </ComponentType>
-
- <!--#################### USAGE END ####################-->
-
<!--#################### INPUT SOURCE BEGIN ####################-->
<ComponentType Name="InputSources" Description="associated to audio_source_t definition,
@@ -117,8 +67,6 @@
<InstanceDefinition>
<Component Name="streams" Type="Streams"/>
- <Component Name="strategies" Type="Strategies"/>
<Component Name="input_sources" Type="InputSources"/>
- <Component Name="usages" Type="Usages"/>
</InstanceDefinition>
</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index db1f038..65dc9af 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -8,10 +8,9 @@
LOCAL_SRC_FILES := \
PolicySubsystemBuilder.cpp \
PolicySubsystem.cpp \
- Strategy.cpp \
InputSource.cpp \
Stream.cpp \
- Usage.cpp
+ ProductStrategy.cpp
LOCAL_CFLAGS += \
-Wall \
@@ -21,9 +20,8 @@
-fvisibility=hidden
LOCAL_C_INCLUDES := \
- frameworks/av/services/audiopolicy/common/include \
frameworks/av/services/audiopolicy/engineconfigurable/include \
- frameworks/av/services/audiopolicy/engineconfigurable/interface \
+ frameworks/av/services/audiopolicy/engineconfigurable/interface
LOCAL_SHARED_LIBRARIES := \
libaudiopolicyengineconfigurable \
@@ -31,6 +29,11 @@
libmedia_helper \
liblog \
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudioclient_headers \
+ libbase_headers
+
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_STATIC_LIBRARIES := libpfw_utility
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
index 7374fc3..bfc1bca 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
@@ -17,10 +17,9 @@
#include "PolicySubsystem.h"
#include "SubsystemObjectFactory.h"
#include "PolicyMappingKeys.h"
-#include "Strategy.h"
#include "Stream.h"
#include "InputSource.h"
-#include "Usage.h"
+#include "ProductStrategy.h"
#include <AudioPolicyPluginInterface.h>
#include <AudioPolicyEngineInstance.h>
#include <utils/Log.h>
@@ -36,9 +35,8 @@
const char *const PolicySubsystem::mStreamComponentName = "Stream";
-const char *const PolicySubsystem::mStrategyComponentName = "Strategy";
const char *const PolicySubsystem::mInputSourceComponentName = "InputSource";
-const char *const PolicySubsystem::mUsageComponentName = "Usage";
+const char *const PolicySubsystem::mProductStrategyComponentName = "ProductStrategy";
PolicySubsystem::PolicySubsystem(const std::string &name, core::log::Logger &logger)
: CSubsystem(name, logger),
@@ -68,20 +66,14 @@
(1 << MappingKeyName))
);
addSubsystemObjectFactory(
- new TSubsystemObjectFactory<Strategy>(
- mStrategyComponentName,
- 0)
- );
- addSubsystemObjectFactory(
- new TSubsystemObjectFactory<Usage>(
- mUsageComponentName,
- (1 << MappingKeyName))
- );
- addSubsystemObjectFactory(
new TSubsystemObjectFactory<InputSource>(
mInputSourceComponentName,
(1 << MappingKeyName))
);
+ addSubsystemObjectFactory(
+ new TSubsystemObjectFactory<ProductStrategy>(
+ mProductStrategyComponentName, 0)
+ );
}
// Retrieve Route interface
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
index 822eeb9..9bf1c23 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
@@ -53,7 +53,6 @@
static const char *const mKeyAmend3; /**< amend3 key mapping string. */
static const char *const mStreamComponentName;
- static const char *const mStrategyComponentName;
static const char *const mInputSourceComponentName;
- static const char *const mUsageComponentName;
+ static const char *const mProductStrategyComponentName;
};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp
new file mode 100644
index 0000000..bb29ef1
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.cpp
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ProductStrategy.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+using android::product_strategy_t;
+
+ProductStrategy::ProductStrategy(const string &mappingValue,
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context,
+ core::log::Logger& logger)
+ : CFormattedSubsystemObject(instanceConfigurableElement,
+ logger,
+ mappingValue,
+ MappingKeyAmend1,
+ (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+ context)
+{
+ ALOG_ASSERT(instanceConfigurableElement != nullptr, "Invalid Configurable Element");
+ mPolicySubsystem = static_cast<const PolicySubsystem *>(
+ instanceConfigurableElement->getBelongingSubsystem());
+ ALOG_ASSERT(mPolicySubsystem != nullptr, "Invalid Policy Subsystem");
+
+ mPolicyPluginInterface = mPolicySubsystem->getPolicyPluginInterface();
+ ALOG_ASSERT(mPolicyPluginInterface != nullptr, "Invalid Policy Plugin Interface");
+
+ std::string name(instanceConfigurableElement->getName());
+ mId = mPolicyPluginInterface->getProductStrategyByName(name);
+
+ ALOG_ASSERT(mId != PRODUCT_STRATEGY_INVALID, "Product Strategy %s not found", name.c_str());
+
+ ALOGE("Product Strategy %s added", name.c_str());
+}
+
+bool ProductStrategy::sendToHW(string & /*error*/)
+{
+ Device deviceParams;
+ blackboardRead(&deviceParams, sizeof(deviceParams));
+
+ mPolicyPluginInterface->setDeviceTypesForProductStrategy(mId, deviceParams.applicableDevice);
+ mPolicyPluginInterface->setDeviceAddressForProductStrategy(mId, deviceParams.deviceAddress);
+ return true;
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
similarity index 68%
rename from services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
rename to services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
index c02b82c..244f082 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,14 +20,24 @@
#include "InstanceConfigurableElement.h"
#include "MappingContext.h"
#include <AudioPolicyPluginInterface.h>
+#include <policy.h>
#include <string>
class PolicySubsystem;
-class Strategy : public CFormattedSubsystemObject
+class ProductStrategy : public CFormattedSubsystemObject
{
+private:
+ static const uint32_t mMaxStringSize = 257; /**< max string size (plus zero terminal). */
+
+ struct Device
+ {
+ uint32_t applicableDevice; /**< applicable device for this strategy. */
+ char deviceAddress[mMaxStringSize]; /**< device address associated with this strategy. */
+ } __attribute__((packed));
+
public:
- Strategy(const std::string &mappingValue,
+ ProductStrategy(const std::string &mappingValue,
CInstanceConfigurableElement *instanceConfigurableElement,
const CMappingContext &context,
core::log::Logger& logger);
@@ -38,10 +48,10 @@
private:
const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+ android::product_strategy_t mId;
+
/**
* Interface to communicate with Audio Policy Engine.
*/
android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
- android::routing_strategy mId; /**< strategy identifier to link with audio.h.*/
};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
deleted file mode 100644
index 876bcb0..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Strategy.h"
-#include "PolicyMappingKeys.h"
-#include "PolicySubsystem.h"
-#include <RoutingStrategy.h>
-
-using std::string;
-using android::routing_strategy;
-
-namespace detail {
-
-constexpr std::pair<routing_strategy, const char*> routingStrategyMap[] = {
- {android::STRATEGY_MEDIA, "STRATEGY_MEDIA"},
- {android::STRATEGY_PHONE, "STRATEGY_PHONE"},
- {android::STRATEGY_SONIFICATION, "STRATEGY_SONIFICATION"},
- {android::STRATEGY_SONIFICATION_RESPECTFUL, "STRATEGY_SONIFICATION_RESPECTFUL"},
- {android::STRATEGY_DTMF, "STRATEGY_DTMF"},
- {android::STRATEGY_ENFORCED_AUDIBLE, "STRATEGY_ENFORCED_AUDIBLE"},
- {android::STRATEGY_TRANSMITTED_THROUGH_SPEAKER, "STRATEGY_TRANSMITTED_THROUGH_SPEAKER"},
- {android::STRATEGY_ACCESSIBILITY, "STRATEGY_ACCESSIBILITY"},
- {android::STRATEGY_REROUTING, "STRATEGY_REROUTING"},
-};
-
-bool fromString(const char *literalName, routing_strategy &type)
-{
- for (auto& pair : routingStrategyMap) {
- if (strcmp(pair.second, literalName) == 0) {
- type = pair.first;
- return true;
- }
- }
- return false;
-}
-
-}
-
-Strategy::Strategy(const string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context,
- core::log::Logger& logger)
- : CFormattedSubsystemObject(instanceConfigurableElement,
- logger,
- mappingValue,
- MappingKeyAmend1,
- (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
- context),
- mPolicySubsystem(static_cast<const PolicySubsystem *>(
- instanceConfigurableElement->getBelongingSubsystem())),
- mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
-{
- std::string name(context.getItem(MappingKeyName));
- if (not detail::fromString(name.c_str(), mId)) {
- LOG_ALWAYS_FATAL("Invalid Strategy %s, invalid XML structure file", name.c_str());
- }
- // Declares the strategy to audio policy engine
- mPolicyPluginInterface->addStrategy(instanceConfigurableElement->getName(), mId);
-}
-
-bool Strategy::sendToHW(string & /*error*/)
-{
- uint32_t applicableOutputDevice;
- blackboardRead(&applicableOutputDevice, sizeof(applicableOutputDevice));
- return mPolicyPluginInterface->setDeviceForStrategy(mId, applicableOutputDevice);
-}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
index 46c9e1c..5230e0e 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
@@ -20,7 +20,6 @@
#include <media/TypeConverter.h>
using std::string;
-using android::routing_strategy;
Stream::Stream(const string &/*mappingValue*/,
CInstanceConfigurableElement *instanceConfigurableElement,
@@ -45,11 +44,8 @@
Applicable params;
blackboardRead(¶ms, sizeof(params));
- mPolicyPluginInterface->setStrategyForStream(mId,
- static_cast<routing_strategy>(params.strategy));
-
- mPolicyPluginInterface->setVolumeProfileForStream(mId,
- static_cast<audio_stream_type_t>(params.volumeProfile));
+ mPolicyPluginInterface->setVolumeProfileForStream(
+ mId, static_cast<audio_stream_type_t>(params.volumeProfile));
return true;
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
index 4a875db..e0ce2fa 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
@@ -29,7 +29,6 @@
private:
struct Applicable
{
- uint32_t strategy; /**< applicable strategy for this stream. */
uint32_t volumeProfile; /**< applicable strategy for this stream. */
} __attribute__((packed));
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
deleted file mode 100644
index 925d631..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "Usage.h"
-#include "PolicyMappingKeys.h"
-#include "PolicySubsystem.h"
-#include <media/TypeConverter.h>
-
-using std::string;
-using android::routing_strategy;
-
-Usage::Usage(const string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context, core::log::Logger &logger)
- : CFormattedSubsystemObject(instanceConfigurableElement,
- logger,
- mappingValue,
- MappingKeyAmend1,
- (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
- context),
- mPolicySubsystem(static_cast<const PolicySubsystem *>(
- instanceConfigurableElement->getBelongingSubsystem())),
- mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
-{
- std::string name(context.getItem(MappingKeyName));
-
- if (not android::UsageTypeConverter::fromString(name, mId)) {
- LOG_ALWAYS_FATAL("Invalid Usage name: %s, invalid XML structure file", name.c_str());
- }
- // Declares the strategy to audio policy engine
- mPolicyPluginInterface->addUsage(name, mId);
-}
-
-bool Usage::sendToHW(string & /*error*/)
-{
- uint32_t applicableStrategy;
- blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
- return mPolicyPluginInterface->setStrategyForUsage(mId,
- static_cast<routing_strategy>(applicableStrategy));
-}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
deleted file mode 100644
index 860204f..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "FormattedSubsystemObject.h"
-#include "InstanceConfigurableElement.h"
-#include "MappingContext.h"
-#include <AudioPolicyPluginInterface.h>
-#include <string>
-
-class PolicySubsystem;
-
-class Usage : public CFormattedSubsystemObject
-{
-public:
- Usage(const std::string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context,
- core::log::Logger& logger);
-
-protected:
- virtual bool sendToHW(std::string &error);
-
-private:
- const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
-
- /**
- * Interface to communicate with Audio Policy Engine.
- */
- android::AudioPolicyPluginInterface *mPolicyPluginInterface;
-
- audio_usage_t mId; /**< usage identifier to link with audio.h. */
-};
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
index 1f8ed8d..02b41cb 100644
--- a/services/audiopolicy/engineconfigurable/src/Collection.h
+++ b/services/audiopolicy/engineconfigurable/src/Collection.h
@@ -18,8 +18,6 @@
#include "Element.h"
#include "Stream.h"
-#include "Strategy.h"
-#include "Usage.h"
#include "InputSource.h"
#include <utils/Errors.h>
#include <system/audio.h>
@@ -147,15 +145,9 @@
template <>
struct Collection<std::string>::collectionSupported {};
template <>
-struct Collection<audio_usage_t>::collectionSupported {};
-template <>
struct Collection<audio_source_t>::collectionSupported {};
-template <>
-struct Collection<routing_strategy>::collectionSupported {};
-typedef Collection<routing_strategy> StrategyCollection;
typedef Collection<audio_stream_type_t> StreamCollection;
-typedef Collection<audio_usage_t> UsageCollection;
typedef Collection<audio_source_t> InputSourceCollection;
} // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/Element.h b/services/audiopolicy/engineconfigurable/src/Element.h
index 1b55c8c..97950d8 100644
--- a/services/audiopolicy/engineconfigurable/src/Element.h
+++ b/services/audiopolicy/engineconfigurable/src/Element.h
@@ -62,7 +62,7 @@
/**
* A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+ * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
* or a string.
*
* @tparam Property for which this policy element has setter / getter.
@@ -73,7 +73,7 @@
/**
* A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+ * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
* or a string.
*
* @tparam Property for which this policy element has setter / getter.
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 009cf90..89a1694 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -25,11 +25,12 @@
#endif
#include "Engine.h"
-#include "Strategy.h"
#include "Stream.h"
#include "InputSource.h"
-#include "Usage.h"
+
+#include <EngineConfig.h>
#include <policy.h>
+#include <AudioIODescriptorInterface.h>
#include <ParameterManagerWrapper.h>
using std::string;
@@ -39,77 +40,48 @@
namespace audio_policy {
template <>
-StrategyCollection &Engine::getCollection<routing_strategy>()
-{
- return mStrategyCollection;
-}
-template <>
StreamCollection &Engine::getCollection<audio_stream_type_t>()
{
return mStreamCollection;
}
template <>
-UsageCollection &Engine::getCollection<audio_usage_t>()
-{
- return mUsageCollection;
-}
-template <>
InputSourceCollection &Engine::getCollection<audio_source_t>()
{
return mInputSourceCollection;
}
template <>
-const StrategyCollection &Engine::getCollection<routing_strategy>() const
-{
- return mStrategyCollection;
-}
-template <>
const StreamCollection &Engine::getCollection<audio_stream_type_t>() const
{
return mStreamCollection;
}
template <>
-const UsageCollection &Engine::getCollection<audio_usage_t>() const
-{
- return mUsageCollection;
-}
-template <>
const InputSourceCollection &Engine::getCollection<audio_source_t>() const
{
return mInputSourceCollection;
}
-Engine::Engine()
- : mManagerInterface(this),
- mPluginInterface(this),
- mPolicyParameterMgr(new ParameterManagerWrapper()),
- mApmObserver(NULL)
+Engine::Engine() : mPolicyParameterMgr(new ParameterManagerWrapper())
{
+ status_t loadResult = loadAudioPolicyEngineConfig();
+ if (loadResult < 0) {
+ ALOGE("Policy Engine configuration is invalid.");
+ }
}
Engine::~Engine()
{
- mStrategyCollection.clear();
mStreamCollection.clear();
mInputSourceCollection.clear();
- mUsageCollection.clear();
-}
-
-
-void Engine::setObserver(AudioPolicyManagerObserver *observer)
-{
- ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
- mApmObserver = observer;
}
status_t Engine::initCheck()
{
- if (mPolicyParameterMgr == NULL || mPolicyParameterMgr->start() != NO_ERROR) {
+ if (mPolicyParameterMgr == nullptr || mPolicyParameterMgr->start() != NO_ERROR) {
ALOGE("%s: could not start Policy PFW", __FUNCTION__);
return NO_INIT;
}
- return (mApmObserver != NULL)? NO_ERROR : NO_INIT;
+ return EngineBase::initCheck();
}
template <typename Key>
@@ -137,55 +109,16 @@
return element->template get<Property>();
}
-routing_strategy Engine::ManagerInterfaceImpl::getStrategyForUsage(audio_usage_t usage)
+bool Engine::setVolumeProfileForStream(const audio_stream_type_t &stream,
+ const audio_stream_type_t &profile)
{
- return mPolicyEngine->getPropertyForKey<routing_strategy, audio_usage_t>(usage);
-}
-
-audio_devices_t Engine::ManagerInterfaceImpl::getDeviceForStrategy(routing_strategy strategy) const
-{
- const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
-
- /** This is the only case handled programmatically because the PFW is unable to know the
- * activity of streams.
- *
- * -While media is playing on a remote device, use the the sonification behavior.
- * Note that we test this usecase before testing if media is playing because
- * the isStreamActive() method only informs about the activity of a stream, not
- * if it's for local playback. Note also that we use the same delay between both tests
- *
- * -When media is not playing anymore, fall back on the sonification behavior
- */
- if (strategy == STRATEGY_SONIFICATION_RESPECTFUL &&
- !is_state_in_call(getPhoneState()) &&
- !outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
- SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
- outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
- return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(STRATEGY_MEDIA);
- }
- if (strategy == STRATEGY_ACCESSIBILITY &&
- (outputs.isStreamActive(AUDIO_STREAM_RING) || outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
- // do not route accessibility prompts to a digital output currently configured with a
- // compressed format as they would likely not be mixed and dropped.
- // Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
- return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(
- STRATEGY_SONIFICATION);
- }
- return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(strategy);
-}
-
-bool Engine::PluginInterfaceImpl::setVolumeProfileForStream(const audio_stream_type_t &stream,
- const audio_stream_type_t &profile)
-{
- if (mPolicyEngine->setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream,
- profile)) {
- mPolicyEngine->mApmObserver->getVolumeCurves().switchVolumeCurve(profile, stream);
+ if (setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream, profile)) {
+ switchVolumeCurve(profile, stream);
return true;
}
return false;
}
-
template <typename Property, typename Key>
bool Engine::setPropertyForKey(const Property &property, const Key &key)
{
@@ -199,7 +132,11 @@
status_t Engine::setPhoneState(audio_mode_t mode)
{
- return mPolicyParameterMgr->setPhoneState(mode);
+ status_t status = mPolicyParameterMgr->setPhoneState(mode);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return EngineBase::setPhoneState(mode);
}
audio_mode_t Engine::getPhoneState() const
@@ -210,7 +147,11 @@
status_t Engine::setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config)
{
- return mPolicyParameterMgr->setForceUse(usage, config);
+ status_t status = mPolicyParameterMgr->setForceUse(usage, config);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return EngineBase::setForceUse(usage, config);
}
audio_policy_forced_cfg_t Engine::getForceUse(audio_policy_force_use_t usage) const
@@ -225,24 +166,210 @@
if (audio_is_output_device(devDesc->type())) {
return mPolicyParameterMgr->setAvailableOutputDevices(
- mApmObserver->getAvailableOutputDevices().types());
+ getApmObserver()->getAvailableOutputDevices().types());
} else if (audio_is_input_device(devDesc->type())) {
return mPolicyParameterMgr->setAvailableInputDevices(
- mApmObserver->getAvailableInputDevices().types());
+ getApmObserver()->getAvailableInputDevices().types());
}
return BAD_TYPE;
}
+status_t Engine::loadAudioPolicyEngineConfig()
+{
+ auto result = EngineBase::loadAudioPolicyEngineConfig();
+
+ // Custom XML Parsing
+ auto loadCriteria= [this](const auto& configCriteria, const auto& configCriterionTypes) {
+ for (auto& criterion : configCriteria) {
+ engineConfig::CriterionType criterionType;
+ for (auto &configCriterionType : configCriterionTypes) {
+ if (configCriterionType.name == criterion.typeName) {
+ criterionType = configCriterionType;
+ break;
+ }
+ }
+ ALOG_ASSERT(not criterionType.name.empty(), "Invalid criterion type for %s",
+ criterion.name.c_str());
+ mPolicyParameterMgr->addCriterion(criterion.name, criterionType.isInclusive,
+ criterionType.valuePairs,
+ criterion.defaultLiteralValue);
+ }
+ };
+
+ loadCriteria(result.parsedConfig->criteria, result.parsedConfig->criterionTypes);
+ return result.nbSkippedElement == 0? NO_ERROR : BAD_VALUE;
+}
+
+DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t ps) const
+{
+ const auto productStrategies = getProductStrategies();
+ if (productStrategies.find(ps) == productStrategies.end()) {
+ ALOGE("%s: Trying to get device on invalid strategy %d", __FUNCTION__, ps);
+ return {};
+ }
+ const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+ const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+ uint32_t availableOutputDevicesType = availableOutputDevices.types();
+
+ /** This is the only case handled programmatically because the PFW is unable to know the
+ * activity of streams.
+ *
+ * -While media is playing on a remote device, use the the sonification behavior.
+ * Note that we test this usecase before testing if media is playing because
+ * the isStreamActive() method only informs about the activity of a stream, not
+ * if it's for local playback. Note also that we use the same delay between both tests
+ *
+ * -When media is not playing anymore, fall back on the sonification behavior
+ */
+ audio_devices_t devices = AUDIO_DEVICE_NONE;
+ if (ps == getProductStrategyForStream(AUDIO_STREAM_NOTIFICATION) &&
+ !is_state_in_call(getPhoneState()) &&
+ !outputs.isActiveRemotely(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+ product_strategy_t strategyForMedia =
+ getProductStrategyForStream(AUDIO_STREAM_MUSIC);
+ devices = productStrategies.getDeviceTypesForProductStrategy(strategyForMedia);
+ } else if (ps == getProductStrategyForStream(AUDIO_STREAM_ACCESSIBILITY) &&
+ (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM)))) {
+ // do not route accessibility prompts to a digital output currently configured with a
+ // compressed format as they would likely not be mixed and dropped.
+ // Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
+ product_strategy_t strategyNotification = getProductStrategyForStream(AUDIO_STREAM_RING);
+ devices = productStrategies.getDeviceTypesForProductStrategy(strategyNotification);
+ } else {
+ devices = productStrategies.getDeviceTypesForProductStrategy(ps);
+ }
+ if (devices == AUDIO_DEVICE_NONE ||
+ (devices & availableOutputDevicesType) == AUDIO_DEVICE_NONE) {
+ devices = getApmObserver()->getDefaultOutputDevice()->type();
+ ALOGE_IF(devices == AUDIO_DEVICE_NONE, "%s: no valid default device defined", __FUNCTION__);
+ return DeviceVector(getApmObserver()->getDefaultOutputDevice());
+ }
+ if (/*device_distinguishes_on_address(devices)*/ devices == AUDIO_DEVICE_OUT_BUS) {
+ // We do expect only one device for these types of devices
+ // Criterion device address garantee this one is available
+ // If this criterion is not wished, need to ensure this device is available
+ const String8 address(productStrategies.getDeviceAddressForProductStrategy(ps).c_str());
+ ALOGV("%s:device 0x%x %s %d", __FUNCTION__, devices, address.c_str(), ps);
+ return DeviceVector(availableOutputDevices.getDevice(devices,
+ address,
+ AUDIO_FORMAT_DEFAULT));
+ }
+ ALOGV("%s:device 0x%x %d", __FUNCTION__, devices, ps);
+ return availableOutputDevices.getDevicesFromTypeMask(devices);
+}
+
+DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
+ const sp<DeviceDescriptor> &preferredDevice,
+ bool fromCache) const
+{
+ // First check for explict routing device
+ if (preferredDevice != nullptr) {
+ ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
+ return DeviceVector(preferredDevice);
+ }
+ product_strategy_t strategy = getProductStrategyForAttributes(attributes);
+ const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+ const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+ //
+ // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
+ // be by APM?
+ //
+ // Honor explicit routing requests only if all active clients have a preferred route in which
+ // case the last active client route is used
+ sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices);
+ if (device != nullptr) {
+ return DeviceVector(device);
+ }
+
+ return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+}
+
+DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
+{
+ auto attributes = EngineBase::getAttributesForStreamType(stream);
+ return getOutputDevicesForAttributes(attributes, nullptr, fromCache);
+}
+
+sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ AudioMix **mix) const
+{
+ const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
+ const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+ const auto &inputs = getApmObserver()->getInputs();
+ std::string address;
+ //
+ // Explicit Routing ??? what is the priority of explicit routing? Shall it be considered
+ // first as it used to be by APM?
+ //
+ // Honor explicit routing requests only if all active clients have a preferred route in which
+ // case the last active client route is used
+ sp<DeviceDescriptor> device =
+ findPreferredDevice(inputs, attr.source, availableInputDevices);
+ if (device != nullptr) {
+ return device;
+ }
+
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ if (device != nullptr) {
+ return device;
+ }
+
+ audio_devices_t deviceType = getPropertyForKey<audio_devices_t, audio_source_t>(attr.source);
+
+ if (audio_is_remote_submix_device(deviceType)) {
+ address = "0";
+ std::size_t pos;
+ std::string tags { attr.tags };
+ if ((pos = tags.find("addr=")) != std::string::npos) {
+ address = tags.substr(pos + std::strlen("addr="));
+ }
+ }
+ return availableInputDevices.getDevice(deviceType, String8(address.c_str()), AUDIO_FORMAT_DEFAULT);
+}
+
+void Engine::updateDeviceSelectionCache()
+{
+ for (const auto &iter : getProductStrategies()) {
+ const auto &strategy = iter.second;
+ mDevicesForStrategies[strategy->getId()] = getDevicesForProductStrategy(strategy->getId());
+ }
+}
+
+void Engine::setDeviceAddressForProductStrategy(product_strategy_t strategy,
+ const std::string &address)
+{
+ if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
+ ALOGE("%s: Trying to set address %s on invalid strategy %d", __FUNCTION__, address.c_str(),
+ strategy);
+ return;
+ }
+ getProductStrategies().at(strategy)->setDeviceAddress(address);
+}
+
+bool Engine::setDeviceTypesForProductStrategy(product_strategy_t strategy, audio_devices_t devices)
+{
+ if (getProductStrategies().find(strategy) == getProductStrategies().end()) {
+ ALOGE("%s: set device %d on invalid strategy %d", __FUNCTION__, devices, strategy);
+ return false;
+ }
+ getProductStrategies().at(strategy)->setDeviceTypes(devices);
+ return true;
+}
+
template <>
AudioPolicyManagerInterface *Engine::queryInterface()
{
- return &mManagerInterface;
+ return this;
}
template <>
AudioPolicyPluginInterface *Engine::queryInterface()
{
- return &mPluginInterface;
+ return this;
}
} // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index ba4f889..5553994 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -16,7 +16,7 @@
#pragma once
-
+#include "EngineBase.h"
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyPluginInterface.h>
#include "Collection.h"
@@ -29,7 +29,7 @@
class ParameterManagerWrapper;
class VolumeProfile;
-class Engine
+class Engine : public EngineBase, AudioPolicyPluginInterface
{
public:
Engine();
@@ -38,132 +38,69 @@
template <class RequestedInterface>
RequestedInterface *queryInterface();
-private:
- /// Interface members
- class ManagerInterfaceImpl : public AudioPolicyManagerInterface
+ ///
+ /// from EngineBase
+ ///
+ android::status_t initCheck() override;
+
+ status_t setPhoneState(audio_mode_t mode) override;
+
+ audio_mode_t getPhoneState() const override;
+
+ status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) override;
+
+ audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const override;
+
+ android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
+ audio_policy_dev_state_t state) override;
+
+ DeviceVector getOutputDevicesForAttributes(const audio_attributes_t &attr,
+ const sp<DeviceDescriptor> &preferedDevice = nullptr,
+ bool fromCache = false) const override;
+
+ DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+ bool fromCache = false) const override;
+
+ sp<DeviceDescriptor> getInputDeviceForAttributes(
+ const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+
+ void updateDeviceSelectionCache() override;
+
+ ///
+ /// from AudioPolicyPluginInterface
+ ///
+ status_t addStream(const std::string &name, audio_stream_type_t stream) override
{
- public:
- ManagerInterfaceImpl(Engine *policyEngine)
- : mPolicyEngine(policyEngine) {}
-
- virtual android::status_t initCheck()
- {
- return mPolicyEngine->initCheck();
- }
- virtual void setObserver(AudioPolicyManagerObserver *observer)
- {
- mPolicyEngine->setObserver(observer);
- }
- virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
- {
- return mPolicyEngine->getPropertyForKey<audio_devices_t, audio_source_t>(inputSource);
- }
- virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const;
- virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
- {
- return mPolicyEngine->getPropertyForKey<routing_strategy, audio_stream_type_t>(stream);
- }
- virtual routing_strategy getStrategyForUsage(audio_usage_t usage);
- virtual status_t setPhoneState(audio_mode_t mode)
- {
- return mPolicyEngine->setPhoneState(mode);
- }
- virtual audio_mode_t getPhoneState() const
- {
- return mPolicyEngine->getPhoneState();
- }
- virtual status_t setForceUse(audio_policy_force_use_t usage,
- audio_policy_forced_cfg_t config)
- {
- return mPolicyEngine->setForceUse(usage, config);
- }
- virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
- {
- return mPolicyEngine->getForceUse(usage);
- }
- virtual android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
- audio_policy_dev_state_t state)
- {
- return mPolicyEngine->setDeviceConnectionState(devDesc, state);
- }
-
- private:
- Engine *mPolicyEngine;
- } mManagerInterface;
-
- class PluginInterfaceImpl : public AudioPolicyPluginInterface
+ return add<audio_stream_type_t>(name, stream);
+ }
+ status_t addInputSource(const std::string &name, audio_source_t source) override
{
- public:
- PluginInterfaceImpl(Engine *policyEngine)
- : mPolicyEngine(policyEngine) {}
+ return add<audio_source_t>(name, source);
+ }
+ bool setVolumeProfileForStream(const audio_stream_type_t &stream,
+ const audio_stream_type_t &volumeProfile) override;
- virtual status_t addStrategy(const std::string &name, routing_strategy strategy)
- {
- return mPolicyEngine->add<routing_strategy>(name, strategy);
- }
- virtual status_t addStream(const std::string &name, audio_stream_type_t stream)
- {
- return mPolicyEngine->add<audio_stream_type_t>(name, stream);
- }
- virtual status_t addUsage(const std::string &name, audio_usage_t usage)
- {
- return mPolicyEngine->add<audio_usage_t>(name, usage);
- }
- virtual status_t addInputSource(const std::string &name, audio_source_t source)
- {
- return mPolicyEngine->add<audio_source_t>(name, source);
- }
- virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices)
- {
- return mPolicyEngine->setPropertyForKey<audio_devices_t, routing_strategy>(devices,
- strategy);
- }
- virtual bool setStrategyForStream(const audio_stream_type_t &stream,
- routing_strategy strategy)
- {
- return mPolicyEngine->setPropertyForKey<routing_strategy, audio_stream_type_t>(strategy,
- stream);
- }
- virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
- const audio_stream_type_t &volumeProfile);
+ bool setDeviceForInputSource(const audio_source_t &inputSource, audio_devices_t device) override
+ {
+ return setPropertyForKey<audio_devices_t, audio_source_t>(device, inputSource);
+ }
+ void setDeviceAddressForProductStrategy(product_strategy_t strategy,
+ const std::string &address) override;
- virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy)
- {
- return mPolicyEngine->setPropertyForKey<routing_strategy, audio_usage_t>(strategy,
- usage);
- }
- virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
- audio_devices_t device)
- {
- return mPolicyEngine->setPropertyForKey<audio_devices_t, audio_source_t>(device,
- inputSource);
- }
+ bool setDeviceTypesForProductStrategy(product_strategy_t strategy,
+ audio_devices_t devices) override;
- private:
- Engine *mPolicyEngine;
- } mPluginInterface;
+ product_strategy_t getProductStrategyByName(const std::string &name) override
+ {
+ return EngineBase::getProductStrategyByName(name);
+ }
private:
/* Copy facilities are put private to disable copy. */
Engine(const Engine &object);
Engine &operator=(const Engine &object);
- void setObserver(AudioPolicyManagerObserver *observer);
-
- bool setVolumeProfileForStream(const audio_stream_type_t &stream,
- device_category deviceCategory,
- const VolumeCurvePoints &points);
-
- status_t initCheck();
- status_t setPhoneState(audio_mode_t mode);
- audio_mode_t getPhoneState() const;
- status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
- audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
- status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
- audio_policy_dev_state_t state);
- StrategyCollection mStrategyCollection; /**< Strategies indexed by their enum id. */
StreamCollection mStreamCollection; /**< Streams indexed by their enum id. */
- UsageCollection mUsageCollection; /**< Usages indexed by their enum id. */
InputSourceCollection mInputSourceCollection; /**< Input sources indexed by their enum id. */
template <typename Key>
@@ -184,12 +121,16 @@
template <typename Property, typename Key>
bool setPropertyForKey(const Property &property, const Key &key);
+ status_t loadAudioPolicyEngineConfig();
+
+ DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+
/**
* Policy Parameter Manager hidden through a wrapper.
*/
ParameterManagerWrapper *mPolicyParameterMgr;
- AudioPolicyManagerObserver *mApmObserver;
+ DeviceStrategyMap mDevicesForStrategies;
};
} // namespace audio_policy
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.h b/services/audiopolicy/engineconfigurable/src/InputSource.h
index 64b390e..e1865cc 100644
--- a/services/audiopolicy/engineconfigurable/src/InputSource.h
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.h
@@ -59,7 +59,7 @@
/**
* A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+ * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
* or a string.
*/
template <typename Property>
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
deleted file mode 100644
index 310b35e..0000000
--- a/services/audiopolicy/engineconfigurable/src/Strategy.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/Strategy"
-
-#include "Strategy.h"
-
-using std::string;
-
-namespace android {
-namespace audio_policy {
-
-status_t Element<routing_strategy>::setIdentifier(routing_strategy identifier)
-{
- if (identifier >= NUM_STRATEGIES) {
- return BAD_VALUE;
- }
- mIdentifier = identifier;
- ALOGD("%s: Strategy %s identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
- return NO_ERROR;
-}
-
-/**
- * Set the device associated to this strategy.
- * It checks if the output device is valid.
- *
- * @param[in] devices selected for the given strategy.
- *
- * @return NO_ERROR if the device is either valid or none, error code otherwise.
- */
-template <>
-status_t Element<routing_strategy>::set<audio_devices_t>(audio_devices_t devices)
-{
- if (!audio_is_output_devices(devices) || devices == AUDIO_DEVICE_NONE) {
- ALOGE("%s: trying to set an invalid device 0x%X for strategy %s",
- __FUNCTION__, devices, getName().c_str());
- return BAD_VALUE;
- }
- ALOGD("%s: 0x%X for strategy %s", __FUNCTION__, devices, getName().c_str());
- mApplicableDevices = devices;
- return NO_ERROR;
-}
-
-template <>
-audio_devices_t Element<routing_strategy>::get<audio_devices_t>() const
-{
- ALOGV("%s: 0x%X for strategy %s", __FUNCTION__, mApplicableDevices, getName().c_str());
- return mApplicableDevices;
-}
-
-} // namespace audio_policy
-} // namespace android
-
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.h b/services/audiopolicy/engineconfigurable/src/Strategy.h
deleted file mode 100644
index f2487fd..0000000
--- a/services/audiopolicy/engineconfigurable/src/Strategy.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "Element.h"
-#include <RoutingStrategy.h>
-
-namespace android {
-namespace audio_policy {
-
-/**
- * @tparam audio_devices_t: Applicable output device(s) for this strategy.
- */
-template <>
-class Element<routing_strategy>
-{
-public:
- Element(const std::string &name)
- : mName(name),
- mApplicableDevices(AUDIO_DEVICE_NONE)
- {}
- ~Element() {}
-
- /**
- * Returns identifier of this policy element
- *
- * @returns string representing the name of this policy element
- */
- const std::string &getName() const { return mName; }
-
- /**
- * Set the unique identifier for this policy element.
- *
- * @tparam Key type of the unique identifier.
- * @param[in] identifier to be set.
- *
- * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
- */
- status_t setIdentifier(routing_strategy identifier);
-
- /**
- * @return the unique identifier of this policy element.
- */
- routing_strategy getIdentifier() const { return mIdentifier; }
-
- /**
- * A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
- * or a string.
- */
- template <typename Property>
- Property get() const;
-
- template <typename Property>
- status_t set(Property property);
-
-private:
- /* Copy facilities are put private to disable copy. */
- Element(const Element &object);
- Element &operator=(const Element &object);
-
- std::string mName; /**< Unique literal Identifier of a policy base element*/
- routing_strategy mIdentifier; /**< Unique numerical Identifier of a policy base element*/
-
- audio_devices_t mApplicableDevices; /**< Applicable output device(s) for this strategy. */
-};
-
-typedef Element<routing_strategy> Strategy;
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
index 73fb94d..297eb02 100644
--- a/services/audiopolicy/engineconfigurable/src/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -34,32 +34,6 @@
return NO_ERROR;
}
-/**
-* Set the strategy to follow for this stream.
-* It checks if the strategy is valid.
-*
-* @param[in] strategy to be followed.
-*
-* @return NO_ERROR if the strategy is set correctly, error code otherwise.
-*/
-template <>
-status_t Element<audio_stream_type_t>::set<routing_strategy>(routing_strategy strategy)
-{
- if (strategy >= NUM_STRATEGIES) {
- return BAD_VALUE;
- }
- mApplicableStrategy = strategy;
- ALOGD("%s: 0x%X for Stream %s", __FUNCTION__, strategy, getName().c_str());
- return NO_ERROR;
-}
-
-template <>
-routing_strategy Element<audio_stream_type_t>::get<routing_strategy>() const
-{
- ALOGV("%s: 0x%X for Stream %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
- return mApplicableStrategy;
-}
-
template <>
status_t Element<audio_stream_type_t>::set<audio_stream_type_t>(audio_stream_type_t volumeProfile)
{
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
index 2bf70b3..a4fdd39 100644
--- a/services/audiopolicy/engineconfigurable/src/Stream.h
+++ b/services/audiopolicy/engineconfigurable/src/Stream.h
@@ -18,22 +18,20 @@
#include "Element.h"
#include "EngineDefinition.h"
-#include <RoutingStrategy.h>
#include <map>
namespace android {
namespace audio_policy {
/**
- * @tparam routing_strategy: Applicable strategy for this stream.
+ * @tparam product_strategy_t: Applicable strategy for this stream.
*/
template <>
class Element<audio_stream_type_t>
{
public:
Element(const std::string &name)
- : mName(name),
- mApplicableStrategy(STRATEGY_MEDIA)
+ : mName(name)
{}
~Element() {}
@@ -61,7 +59,7 @@
/**
* A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+ * Property may be audio_stream_type_t, audio_usage_t, audio_source_t
* or a string.
*/
template <typename Property>
@@ -78,8 +76,6 @@
std::string mName; /**< Unique literal Identifier of a policy base element*/
audio_stream_type_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
- routing_strategy mApplicableStrategy; /**< Applicable strategy for this stream. */
-
audio_stream_type_t mVolumeProfile; /**< Volume Profile followed by this stream. */
};
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.cpp b/services/audiopolicy/engineconfigurable/src/Usage.cpp
deleted file mode 100644
index 8c0dfba..0000000
--- a/services/audiopolicy/engineconfigurable/src/Usage.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/Usage"
-
-#include "Usage.h"
-
-namespace android {
-namespace audio_policy {
-
-status_t Element<audio_usage_t>::setIdentifier(audio_usage_t identifier)
-{
- if (identifier > AUDIO_USAGE_MAX) {
- return BAD_VALUE;
- }
- mIdentifier = identifier;
- ALOGD("%s: Usage %s has identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
- return NO_ERROR;
-}
-
-template <>
-status_t Element<audio_usage_t>::set<routing_strategy>(routing_strategy strategy)
-{
- if (strategy >= NUM_STRATEGIES) {
- return BAD_VALUE;
- }
- ALOGD("%s: %d for Usage %s", __FUNCTION__, strategy, getName().c_str());
- mApplicableStrategy = strategy;
- return NO_ERROR;
-}
-
-template <>
-routing_strategy Element<audio_usage_t>::get<routing_strategy>() const
-{
- ALOGD("%s: %d for Usage %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
- return mApplicableStrategy;
-}
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.h b/services/audiopolicy/engineconfigurable/src/Usage.h
deleted file mode 100644
index 72a452f..0000000
--- a/services/audiopolicy/engineconfigurable/src/Usage.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "Element.h"
-#include <RoutingStrategy.h>
-
-namespace android {
-namespace audio_policy {
-
-/**
- * @tparam routing_strategy: Applicable strategy for this usage.
- */
-template <>
-class Element<audio_usage_t>
-{
-public:
- Element(const std::string &name)
- : mName(name),
- mApplicableStrategy(STRATEGY_MEDIA)
- {}
- ~Element() {}
-
- /**
- * Returns identifier of this policy element
- *
- * @returns string representing the name of this policy element
- */
- const std::string &getName() const { return mName; }
-
- /**
- * Set the unique identifier for this policy element.
- *
- * @tparam Key type of the unique identifier.
- * @param[in] identifier to be set.
- *
- * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
- */
- status_t setIdentifier(audio_usage_t identifier);
-
- /**
- * @return the unique identifier of this policy element.
- */
- audio_usage_t getIdentifier() const { return mIdentifier; }
-
- /**
- * A Policy element may implement getter/setter function for a given property.
- * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
- * or a string.
- */
- template <typename Property>
- Property get() const;
-
- template <typename Property>
- status_t set(Property property);
-
-private:
- /* Copy facilities are put private to disable copy. */
- Element(const Element &object);
- Element &operator=(const Element &object);
-
- std::string mName; /**< Unique literal Identifier of a policy base element*/
- audio_usage_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
- routing_strategy mApplicableStrategy; /**< Applicable strategy for this usage. */
-};
-
-typedef Element<audio_usage_t> Usage;
-
-} // namespace audio_policy
-} // namespace android
-
-
diff --git a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk b/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
index 4814376..eebdfd6 100644
--- a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
+++ b/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
@@ -8,7 +8,6 @@
$(LOCAL_BUILT_MODULE): MY_AUDIO_POLICY_CONFIGURATION_FILE := $(AUDIO_POLICY_CONFIGURATION_FILE)
$(LOCAL_BUILT_MODULE): MY_CRITERION_TOOL := $(HOST_OUT)/bin/buildPolicyCriterionTypes.py
$(LOCAL_BUILT_MODULE): $(LOCAL_REQUIRED_MODULES) $(LOCAL_ADDITIONAL_DEPENDENCIES) \
- buildPolicyCriterionTypes.py \
$(CRITERION_TYPES_FILE) \
$(ANDROID_AUDIO_BASE_HEADER_FILE)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index d19a364..c7d8d34 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -1,8 +1,5 @@
LOCAL_PATH:= $(call my-dir)
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-PROVISION_CRITERION_TYPES := $(TOOLS)/provision_criterion_types_from_android_headers.mk
-
##################################################################
# WRAPPER LIBRARY
##################################################################
@@ -13,20 +10,20 @@
$(LOCAL_PATH)/include \
frameworks/av/services/audiopolicy/engineconfigurable/include \
frameworks/av/services/audiopolicy/engineconfigurable/interface \
- frameworks/av/services/audiopolicy/common/include \
external/libxml2/include \
external/icu/icu4c/source/common
LOCAL_SRC_FILES:= \
- ParameterManagerWrapper.cpp \
- ParameterManagerWrapperConfig.cpp
+ ParameterManagerWrapper.cpp
LOCAL_SHARED_LIBRARIES := \
libparameter \
libmedia_helper \
- libicuuc \
libxml2
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon
+
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents
@@ -40,39 +37,3 @@
include $(BUILD_STATIC_LIBRARY)
-##################################################################
-# CONFIGURATION FILE
-##################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_wrapper_configuration.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := config/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_criteria.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := config/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := policy_criterion_types.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_ADDITIONAL_DEPENDENCIES := \
- $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
-
-AUDIO_POLICY_CONFIGURATION_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
-ANDROID_AUDIO_BASE_HEADER_FILE := system/media/audio/include/system/audio-base.h
-CRITERION_TYPES_FILE := $(LOCAL_PATH)/config/policy_criterion_types.xml.in
-
-include $(PROVISION_CRITERION_TYPES)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), 1)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 1934fa4..4b57444 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -18,7 +18,6 @@
//#define LOG_NDEBUG 0
#include "ParameterManagerWrapper.h"
-#include "ParameterManagerWrapperConfig.h"
#include <ParameterMgrPlatformConnector.h>
#include <SelectionCriterionTypeInterface.h>
#include <SelectionCriterionInterface.h>
@@ -38,7 +37,6 @@
using std::string;
using std::map;
using std::vector;
-using CriterionTypes = std::map<std::string, ISelectionCriterionTypeInterface *>;
/// PFW related definitions
// Logger
@@ -65,6 +63,8 @@
const char *const ParameterManagerWrapper::mPolicyPfwDefaultConfFileName =
"/etc/parameter-framework/ParameterFrameworkConfigurationPolicy.xml";
+const char *const ParameterManagerWrapper::mPolicyPfwVendorConfFileName =
+ "/vendor/etc/parameter-framework/ParameterFrameworkConfigurationPolicy.xml";
static const char *const gInputDeviceCriterionName = "AvailableInputDevices";
static const char *const gOutputDeviceCriterionName = "AvailableOutputDevices";
@@ -96,67 +96,43 @@
: mPfwConnectorLogger(new ParameterMgrPlatformConnectorLogger)
{
// Connector
- mPfwConnector = new CParameterMgrPlatformConnector(mPolicyPfwDefaultConfFileName);
+ if (access(mPolicyPfwVendorConfFileName, R_OK) == 0) {
+ mPfwConnector = new CParameterMgrPlatformConnector(mPolicyPfwVendorConfFileName);
+ } else {
+ mPfwConnector = new CParameterMgrPlatformConnector(mPolicyPfwDefaultConfFileName);
+ }
// Logger
mPfwConnector->setLogger(mPfwConnectorLogger);
-
- status_t loadResult = loadConfig();
- if (loadResult < 0) {
- ALOGE("Policy Wrapper configuration is partially invalid.");
- }
}
-status_t ParameterManagerWrapper::loadConfig()
+status_t ParameterManagerWrapper::addCriterion(const std::string &name, bool isInclusive,
+ ValuePairs pairs, const std::string &defaultValue)
{
- auto result = wrapper_config::parse();
- if (result.parsedConfig == nullptr) {
- return -ENOENT;
+ ALOG_ASSERT(not isStarted(), "Cannot add a criterion if PFW is already started");
+ auto criterionType = mPfwConnector->createSelectionCriterionType(isInclusive);
+
+ for (auto pair : pairs) {
+ std::string error;
+ ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
+ pair.second.c_str(), name.c_str());
+ criterionType->addValuePair(pair.first, pair.second, error);
}
- ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
+ ALOG_ASSERT(mPolicyCriteria.find(name) == mPolicyCriteria.end(),
+ "%s: Criterion %s already added", __FUNCTION__, name.c_str());
- CriterionTypes criterionTypes;
- for (auto criterionType : result.parsedConfig->criterionTypes) {
- ALOG_ASSERT(criterionTypes.find(criterionType.name) == criterionTypes.end(),
- "CriterionType %s already added", criterionType.name.c_str());
- ALOGV("%s: Adding new criterionType %s", __FUNCTION__, criterionType.name.c_str());
+ auto criterion = mPfwConnector->createSelectionCriterion(name, criterionType);
+ mPolicyCriteria[name] = criterion;
- auto criterionTypePfw =
- mPfwConnector->createSelectionCriterionType(criterionType.isInclusive);
-
- for (auto pair : criterionType.valuePairs) {
- std::string error;
- ALOGV("%s: Adding pair %d,%s for criterionType %s", __FUNCTION__, pair.first,
- pair.second.c_str(), criterionType.name.c_str());
- criterionTypePfw->addValuePair(pair.first, pair.second, error);
+ if (not defaultValue.empty()) {
+ int numericalValue = 0;
+ if (not criterionType->getNumericalValue(defaultValue.c_str(), numericalValue)) {
+ ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
+ defaultValue.c_str());
}
- criterionTypes[criterionType.name] = criterionTypePfw;
+ criterion->setCriterionState(numericalValue);
}
-
- for (auto criterion : result.parsedConfig->criteria) {
- ALOG_ASSERT(mPolicyCriteria.find(criterion.name) == mPolicyCriteria.end(),
- "%s: Criterion %s already added", __FUNCTION__, criterion.name.c_str());
-
- auto criterionType =
- getElement<ISelectionCriterionTypeInterface>(criterion.typeName, criterionTypes);
- ALOG_ASSERT(criterionType != nullptr, "No %s Criterion type found for criterion %s",
- criterion.typeName.c_str(), criterion.name.c_str());
-
- auto criterionPfw = mPfwConnector->createSelectionCriterion(criterion.name, criterionType);
- mPolicyCriteria[criterion.name] = criterionPfw;
-
- if (not criterion.defaultLiteralValue.empty()) {
- int numericalValue = 0;
- if (not criterionType->getNumericalValue(criterion.defaultLiteralValue.c_str(),
- numericalValue)) {
- ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
- criterion.defaultLiteralValue.c_str());
- continue;
- }
- criterionPfw->setCriterionState(numericalValue);
- }
- }
- return result.nbSkippedElement == 0? NO_ERROR : BAD_VALUE;
+ return NO_ERROR;
}
ParameterManagerWrapper::~ParameterManagerWrapper()
@@ -283,7 +259,7 @@
std::string criterionName = audio_is_output_device(devDesc->type()) ?
gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
- ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->mAddress.string(),
+ ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().string(),
state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE? "disconnected" : "connected");
ISelectionCriterionInterface *criterion =
getElement<ISelectionCriterionInterface>(criterionName, mPolicyCriteria);
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp
deleted file mode 100644
index bc6d046..0000000
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPolicyEngine/PFWWrapperConfig"
-#define LOG_NDEBUG 0
-
-#include "ParameterManagerWrapperConfig.h"
-
-#include <media/convert.h>
-#include <utils/Log.h>
-#include <libxml/parser.h>
-#include <libxml/xinclude.h>
-#include <string>
-#include <vector>
-#include <sstream>
-#include <istream>
-
-
-namespace android {
-
-using utilities::convertTo;
-
-namespace audio_policy {
-namespace wrapper_config {
-namespace detail {
-
-std::string getXmlAttribute(const xmlNode *cur, const char *attribute)
-{
- xmlChar *xmlValue = xmlGetProp(cur, (const xmlChar *)attribute);
- if (xmlValue == NULL) {
- return "";
- }
- std::string value((const char *)xmlValue);
- xmlFree(xmlValue);
- return value;
-}
-
-template <class Trait>
-static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
- typename Trait::Collection &collection,
- size_t &nbSkippedElement)
-{
- const xmlNode *root = cur->xmlChildrenNode;
- while (root != NULL) {
- if (xmlStrcmp(root->name, (const xmlChar *)Trait::collectionTag) &&
- xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
- root = root->next;
- continue;
- }
- const xmlNode *child = root;
- if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
- child = child->xmlChildrenNode;
- }
- while (child != NULL) {
- if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
- status_t status = Trait::deserialize(doc, child, collection);
- if (status == NO_ERROR) {
- nbSkippedElement += 1;
- }
- }
- child = child->next;
- }
- if (!xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
- return NO_ERROR;
- }
- root = root->next;
- }
- return NO_ERROR;
-}
-
-const char *const ValueTraits::tag = "value";
-const char *const ValueTraits::collectionTag = "values";
-
-const char ValueTraits::Attributes::literal[] = "literal";
-const char ValueTraits::Attributes::numerical[] = "numerical";
-
-status_t ValueTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &values)
-{
- std::string literal = getXmlAttribute(child, Attributes::literal);
- if (literal.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
- return BAD_VALUE;
- }
- uint32_t numerical = 0;
- std::string numericalTag = getXmlAttribute(child, Attributes::numerical);
- if (numericalTag.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::literal);
- return BAD_VALUE;
- }
- if (!convertTo(numericalTag, numerical)) {
- ALOGE("%s: : Invalid value(%s)", __FUNCTION__, numericalTag.c_str());
- return BAD_VALUE;
- }
- values.push_back({numerical, literal});
- return NO_ERROR;
-}
-
-const char *const CriterionTypeTraits::tag = "criterion_type";
-const char *const CriterionTypeTraits::collectionTag = "criterion_types";
-
-const char CriterionTypeTraits::Attributes::name[] = "name";
-const char CriterionTypeTraits::Attributes::type[] = "type";
-
-status_t CriterionTypeTraits::deserialize(_xmlDoc *doc, const _xmlNode *child,
- Collection &criterionTypes)
-{
- std::string name = getXmlAttribute(child, Attributes::name);
- if (name.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
- return BAD_VALUE;
- }
- ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::name, name.c_str());
-
- std::string type = getXmlAttribute(child, Attributes::type);
- if (type.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::type);
- return BAD_VALUE;
- }
- ALOGV("%s: %s %s = %s", __FUNCTION__, tag, Attributes::type, type.c_str());
- bool isInclusive(type == "inclusive");
-
- ValuePairs pairs;
- size_t nbSkippedElements = 0;
- detail::deserializeCollection<detail::ValueTraits>(doc, child, pairs, nbSkippedElements);
-
- criterionTypes.push_back({name, isInclusive, pairs});
- return NO_ERROR;
-}
-
-const char *const CriterionTraits::tag = "criterion";
-const char *const CriterionTraits::collectionTag = "criteria";
-
-const char CriterionTraits::Attributes::name[] = "name";
-const char CriterionTraits::Attributes::type[] = "type";
-const char CriterionTraits::Attributes::defaultVal[] = "default";
-
-status_t CriterionTraits::deserialize(_xmlDoc */*doc*/, const _xmlNode *child, Collection &criteria)
-{
- std::string name = getXmlAttribute(child, Attributes::name);
- if (name.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
- return BAD_VALUE;
- }
- ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
-
- std::string defaultValue = getXmlAttribute(child, Attributes::defaultVal);
- if (defaultValue.empty()) {
- // Not mandatory to provide a default value for a criterion, even it is recommanded...
- ALOGV("%s: No attribute %s found", __FUNCTION__, Attributes::defaultVal);
- }
- ALOGV("%s: %s = %s", __FUNCTION__, Attributes::defaultVal, defaultValue.c_str());
-
- std::string typeName = getXmlAttribute(child, Attributes::type);
- if (typeName.empty()) {
- ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::name);
- return BAD_VALUE;
- }
- ALOGV("%s: %s = %s", __FUNCTION__, Attributes::type, typeName.c_str());
-
- criteria.push_back({name, typeName, defaultValue});
- return NO_ERROR;
-}
-} // namespace detail
-
-ParsingResult parse(const char* path) {
- xmlDocPtr doc;
- doc = xmlParseFile(path);
- if (doc == NULL) {
- ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
- return {nullptr, 0};
- }
- xmlNodePtr cur = xmlDocGetRootElement(doc);
- if (cur == NULL) {
- ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
- xmlFreeDoc(doc);
- return {nullptr, 0};
- }
- if (xmlXIncludeProcess(doc) < 0) {
- ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
- return {nullptr, 0};
- }
- size_t nbSkippedElements = 0;
- auto config = std::make_unique<Config>();
-
- detail::deserializeCollection<detail::CriterionTraits>(
- doc, cur, config->criteria, nbSkippedElements);
- detail::deserializeCollection<detail::CriterionTypeTraits>(
- doc, cur, config->criterionTypes, nbSkippedElements);
-
- return {std::move(config), nbSkippedElements};
-}
-
-} // namespace wrapper_config
-} // namespace audio_policy
-} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h
deleted file mode 100644
index 467d0e1..0000000
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapperConfig.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <stdint.h>
-#include <string>
-#include <vector>
-#include <utils/Errors.h>
-
-struct _xmlNode;
-struct _xmlDoc;
-
-namespace android {
-namespace audio_policy {
-namespace wrapper_config {
-
-/** Default path of audio policy usages configuration file. */
-constexpr char DEFAULT_PATH[] = "/vendor/etc/policy_wrapper_configuration.xml";
-
-/** Directories where the effect libraries will be search for. */
-constexpr const char* POLICY_USAGE_LIBRARY_PATH[] = {"/odm/etc/", "/vendor/etc/", "/system/etc/"};
-
-using ValuePair = std::pair<uint32_t, std::string>;
-using ValuePairs = std::vector<ValuePair>;
-
-struct CriterionType
-{
- std::string name;
- bool isInclusive;
- ValuePairs valuePairs;
-};
-
-using CriterionTypes = std::vector<CriterionType>;
-
-struct Criterion
-{
- std::string name;
- std::string typeName;
- std::string defaultLiteralValue;
-};
-
-using Criteria = std::vector<Criterion>;
-
-struct Config {
- float version;
- Criteria criteria;
- CriterionTypes criterionTypes;
-};
-
-namespace detail
-{
-struct ValueTraits
-{
- static const char *const tag;
- static const char *const collectionTag;
-
- struct Attributes
- {
- static const char literal[];
- static const char numerical[];
- };
-
- typedef ValuePair Element;
- typedef ValuePair *PtrElement;
- typedef ValuePairs Collection;
-
- static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
- Collection &collection);
-};
-
-struct CriterionTypeTraits
-{
- static const char *const tag;
- static const char *const collectionTag;
-
- struct Attributes
- {
- static const char name[];
- static const char type[];
- };
-
- typedef CriterionType Element;
- typedef CriterionType *PtrElement;
- typedef CriterionTypes Collection;
-
- static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
- Collection &collection);
-};
-
-struct CriterionTraits
-{
- static const char *const tag;
- static const char *const collectionTag;
-
- struct Attributes
- {
- static const char name[];
- static const char type[];
- static const char defaultVal[];
- };
-
- typedef Criterion Element;
- typedef Criterion *PtrElement;
- typedef Criteria Collection;
-
- static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
- Collection &collection);
-};
-} // namespace detail
-
-/** Result of `parse(const char*)` */
-struct ParsingResult {
- /** Parsed config, nullptr if the xml lib could not load the file */
- std::unique_ptr<Config> parsedConfig;
- size_t nbSkippedElement; //< Number of skipped invalid product strategies
-};
-
-/** Parses the provided audio policy usage configuration.
- * @return audio policy usage @see Config
- */
-ParsingResult parse(const char* path = DEFAULT_PATH);
-
-} // namespace wrapper_config
-} // namespace audio_policy
-} // android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 1a634a1..5bfad29 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -39,6 +39,9 @@
namespace android {
namespace audio_policy {
+using ValuePair = std::pair<uint32_t, std::string>;
+using ValuePairs = std::vector<ValuePair>;
+
class ParameterManagerWrapper
{
private:
@@ -118,6 +121,17 @@
status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
audio_policy_dev_state_t state);
+ /**
+ * @brief addCriterion to the policy pfw
+ * @param name of the criterion
+ * @param isInclusive if true, inclusive, if false exclusive criterion type
+ * @param pairs of numerical/literal values of the criterion
+ * @param defaultValue provided as literal.
+ * @return
+ */
+ status_t addCriterion(const std::string &name, bool isInclusive, ValuePairs pairs,
+ const std::string &defaultValue);
+
private:
/**
* Apply the configuration of the platform on the policy parameter manager.
@@ -131,13 +145,6 @@
*/
void applyPlatformConfiguration();
- /**
- * Load the criterion configuration file.
- *
- * @return NO_ERROR is parsing successful, error code otherwise.
- */
- status_t loadConfig();
-
/**
* Retrieve an element from a map by its name.
*
@@ -199,6 +206,7 @@
struct parameterManagerElementSupported;
static const char *const mPolicyPfwDefaultConfFileName; /**< Default Policy PFW top file name.*/
+ static const char *const mPolicyPfwVendorConfFileName; /**< Vendor Policy PFW top file name.*/
};
} // namespace audio_policy
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index 837d5bb..ebf383b 100644
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -8,10 +8,13 @@
LOCAL_SRC_FILES := \
src/Engine.cpp \
src/EngineInstance.cpp \
+ ../engine/common/src/VolumeCurve.cpp \
+ ../engine/common/src/ProductStrategy.cpp \
+ ../engine/common/src/EngineBase.cpp \
+ ../engine/common/src/VolumeGroup.cpp
audio_policy_engine_includes_common := \
- $(LOCAL_PATH)/include \
- frameworks/av/services/audiopolicy/engine/interface
+ $(LOCAL_PATH)/include
LOCAL_CFLAGS += \
-Wall \
@@ -26,8 +29,7 @@
$(TARGET_OUT_HEADERS)/hw \
$(call include-path-for, frameworks-av) \
$(call include-path-for, audio-utils) \
- $(call include-path-for, bionic) \
- frameworks/av/services/audiopolicy/common/include
+ $(call include-path-for, bionic)
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -37,12 +39,19 @@
LOCAL_HEADER_LIBRARIES := libbase_headers
LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents \
+ libaudiopolicycomponents
-LOCAL_SHARED_LIBRARIES += \
+LOCAL_SHARED_LIBRARIES := \
liblog \
libcutils \
libutils \
- libmedia_helper
+ libmedia_helper \
+ libaudiopolicyengineconfig \
+ libaudiopolicy
+
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_common_headers \
+ libaudiopolicyengine_interface_headers
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/config/Android.mk b/services/audiopolicy/enginedefault/config/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/enginedefault/config/example/Android.mk b/services/audiopolicy/enginedefault/config/example/Android.mk
new file mode 100644
index 0000000..f06ee4c
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/Android.mk
@@ -0,0 +1,50 @@
+LOCAL_PATH := $(call my-dir)
+
+##################################################################
+# CONFIGURATION TOP FILE
+##################################################################
+
+ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_configuration_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_configuration.xml
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+
+LOCAL_REQUIRED_MODULES := \
+ audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml
+
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_product_strategies_phone.xml
+LOCAL_MODULE_STEM := audio_policy_engine_product_strategies.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
similarity index 67%
copy from services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
copy to services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
index 5d9193b..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/config/policy_wrapper_configuration.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
@@ -12,14 +12,13 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--->
-<!--
- These are the minimum required criteria to be used by Audio HAL to ensure a basic
- user experience on an Android device
--->
-<configuration name="audio_policy_wrapper_configuration" xmlns:xi="http://www.w3.org/2001/XInclude">
+ -->
- <xi:include href="policy_criterion_types.xml"/>
- <xi:include href="policy_criteria.xml"/>
+<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
+
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
new file mode 100644
index 0000000..9398743
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<ProductStrategies>
+
+ <!-- "hidden strategies" like TTS, enforced audible:
+ Shall we expose them here or keep it hard coded -->
+
+ <!-- Used to identify the volume of audio streams for enforced system sounds in certain
+ countries (e.g. camera in Japan)
+ This strategy will only have higher priority than phone if force for system is set to
+ enforced. -->
+
+ <ProductStrategy name="STRATEGY_PHONE">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
+ <Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_SONIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
+ <Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
+ <Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_EVENT"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_MEDIA">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
+ <Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
+ <Attributes></Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
+ <Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <ProductStrategy name="STRATEGY_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
+ <Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
+ (TTS) of the device -->
+ <ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
+ <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Routing Strategy rerouting may be removed as following media??? -->
+ <ProductStrategy name="STRATEGY_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
+ <Attributes></Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+ <!-- Default product strategy has empty attributes -->
+ <ProductStrategy name="STRATEGY_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
+ <Attributes></Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
+
+
+</ProductStrategies>
+
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 3d68cd8..f191738 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -29,6 +29,7 @@
#include <AudioPolicyManagerObserver.h>
#include <AudioPort.h>
#include <IOProfile.h>
+#include <AudioIODescriptorInterface.h>
#include <policy.h>
#include <utils/String8.h>
#include <utils/Log.h>
@@ -38,60 +39,33 @@
namespace audio_policy
{
+struct legacy_strategy_map { const char *name; legacy_strategy id; };
+static const std::vector<legacy_strategy_map> gLegacyStrategy = {
+ { "STRATEGY_NONE", STRATEGY_NONE },
+ { "STRATEGY_MEDIA", STRATEGY_MEDIA },
+ { "STRATEGY_PHONE", STRATEGY_PHONE },
+ { "STRATEGY_SONIFICATION", STRATEGY_SONIFICATION },
+ { "STRATEGY_SONIFICATION_RESPECTFUL", STRATEGY_SONIFICATION_RESPECTFUL },
+ { "STRATEGY_DTMF", STRATEGY_DTMF },
+ { "STRATEGY_ENFORCED_AUDIBLE", STRATEGY_ENFORCED_AUDIBLE },
+ { "STRATEGY_TRANSMITTED_THROUGH_SPEAKER", STRATEGY_TRANSMITTED_THROUGH_SPEAKER },
+ { "STRATEGY_ACCESSIBILITY", STRATEGY_ACCESSIBILITY },
+ { "STRATEGY_REROUTING", STRATEGY_REROUTING },
+ { "STRATEGY_PATCH", STRATEGY_REROUTING }, // boiler to manage stream patch volume
+};
+
Engine::Engine()
- : mManagerInterface(this),
- mPhoneState(AUDIO_MODE_NORMAL),
- mApmObserver(NULL)
{
- for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
- mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
+ auto result = EngineBase::loadAudioPolicyEngineConfig();
+ ALOGE_IF(result.nbSkippedElement != 0,
+ "Policy Engine configuration is partially invalid, skipped %zu elements",
+ result.nbSkippedElement);
+
+ for (const auto &strategy : gLegacyStrategy) {
+ mLegacyStrategyMap[getProductStrategyByName(strategy.name)] = strategy.id;
}
}
-Engine::~Engine()
-{
-}
-
-void Engine::setObserver(AudioPolicyManagerObserver *observer)
-{
- ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
- mApmObserver = observer;
-}
-
-status_t Engine::initCheck()
-{
- return (mApmObserver != NULL) ? NO_ERROR : NO_INIT;
-}
-
-status_t Engine::setPhoneState(audio_mode_t state)
-{
- ALOGV("setPhoneState() state %d", state);
-
- if (state < 0 || state >= AUDIO_MODE_CNT) {
- ALOGW("setPhoneState() invalid state %d", state);
- return BAD_VALUE;
- }
-
- if (state == mPhoneState ) {
- ALOGW("setPhoneState() setting same state %d", state);
- return BAD_VALUE;
- }
-
- // store previous phone state for management of sonification strategy below
- int oldState = mPhoneState;
- mPhoneState = state;
-
- if (!is_state_in_call(oldState) && is_state_in_call(state)) {
- ALOGV(" Entering call in setPhoneState()");
- mApmObserver->getVolumeCurves().switchVolumeCurve(AUDIO_STREAM_VOICE_CALL,
- AUDIO_STREAM_DTMF);
- } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
- ALOGV(" Exiting call in setPhoneState()");
- mApmObserver->getVolumeCurves().restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
- }
- return NO_ERROR;
-}
-
status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
{
switch(usage) {
@@ -101,7 +75,6 @@
ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
return BAD_VALUE;
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_MEDIA:
if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
@@ -112,7 +85,6 @@
ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
return BAD_VALUE;
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_RECORD:
if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
@@ -120,7 +92,6 @@
ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
return BAD_VALUE;
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_DOCK:
if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
@@ -130,21 +101,18 @@
config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_SYSTEM:
if (config != AUDIO_POLICY_FORCE_NONE &&
config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
if (config != AUDIO_POLICY_FORCE_NONE &&
config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
ALOGW("setForceUse() invalid config %d for HDMI_SYSTEM_AUDIO", config);
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
if (config != AUDIO_POLICY_FORCE_NONE &&
@@ -154,109 +122,25 @@
ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
return BAD_VALUE;
}
- mForceUse[usage] = config;
break;
case AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING:
if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_NONE) {
ALOGW("setForceUse() invalid config %d for FOR_VIBRATE_RINGING", config);
return BAD_VALUE;
}
- mForceUse[usage] = config;
break;
default:
ALOGW("setForceUse() invalid usage %d", usage);
break; // TODO return BAD_VALUE?
}
- return NO_ERROR;
+ return EngineBase::setForceUse(usage, config);
}
-routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
-{
- // stream to strategy mapping
- switch (stream) {
- case AUDIO_STREAM_VOICE_CALL:
- case AUDIO_STREAM_BLUETOOTH_SCO:
- return STRATEGY_PHONE;
- case AUDIO_STREAM_RING:
- case AUDIO_STREAM_ALARM:
- return STRATEGY_SONIFICATION;
- case AUDIO_STREAM_NOTIFICATION:
- return STRATEGY_SONIFICATION_RESPECTFUL;
- case AUDIO_STREAM_DTMF:
- return STRATEGY_DTMF;
- default:
- ALOGE("unknown stream type %d", stream);
- FALLTHROUGH_INTENDED;
- case AUDIO_STREAM_SYSTEM:
- // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
- // while key clicks are played produces a poor result
- case AUDIO_STREAM_MUSIC:
- return STRATEGY_MEDIA;
- case AUDIO_STREAM_ENFORCED_AUDIBLE:
- return STRATEGY_ENFORCED_AUDIBLE;
- case AUDIO_STREAM_TTS:
- return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
- case AUDIO_STREAM_ACCESSIBILITY:
- return STRATEGY_ACCESSIBILITY;
- case AUDIO_STREAM_REROUTING:
- return STRATEGY_REROUTING;
- }
-}
-
-routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
-{
- // usage to strategy mapping
- switch (usage) {
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return STRATEGY_ACCESSIBILITY;
-
- case AUDIO_USAGE_MEDIA:
- case AUDIO_USAGE_GAME:
- case AUDIO_USAGE_ASSISTANT:
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return STRATEGY_MEDIA;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return STRATEGY_PHONE;
-
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return STRATEGY_DTMF;
-
- case AUDIO_USAGE_ALARM:
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return STRATEGY_SONIFICATION;
-
- case AUDIO_USAGE_NOTIFICATION:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return STRATEGY_SONIFICATION_RESPECTFUL;
-
- case AUDIO_USAGE_UNKNOWN:
- default:
- return STRATEGY_MEDIA;
- }
-}
-
-audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
-{
- DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
- DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
-
- const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
-
- return getDeviceForStrategyInt(strategy, availableOutputDevices,
- availableInputDevices, outputs, (uint32_t)AUDIO_DEVICE_NONE);
-}
-
-
-audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs,
- uint32_t outputDeviceTypesToIgnore) const
+audio_devices_t Engine::getDeviceForStrategyInt(legacy_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs,
+ uint32_t outputDeviceTypesToIgnore) const
{
uint32_t device = AUDIO_DEVICE_NONE;
uint32_t availableOutputDevicesType =
@@ -269,16 +153,17 @@
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
- if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+ if (isInCall() || outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
device = getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
} else {
bool media_active_locally =
- outputs.isStreamActiveLocally(
- AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
- || outputs.isStreamActiveLocally(
- AUDIO_STREAM_ACCESSIBILITY, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
+ outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_MUSIC),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)
+ || outputs.isActiveLocally(
+ streamToVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
+ SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
// routing is same as media without the "remote" device
device = getDeviceForStrategyInt(STRATEGY_MEDIA,
availableOutputDevices,
@@ -334,7 +219,7 @@
}
// for phone strategy, we first consider the forced use and then the available devices by
// order of priority
- switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+ switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
case AUDIO_POLICY_FORCE_BT_SCO:
if (!isInCall() || strategy != STRATEGY_DTMF) {
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
@@ -352,7 +237,7 @@
if (device) break;
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
if (device) break;
@@ -386,7 +271,7 @@
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
// A2DP speaker when forcing to speaker output
if (!isInCall() &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
if (device) break;
@@ -411,7 +296,8 @@
case STRATEGY_SONIFICATION:
// If incall, just select the STRATEGY_PHONE device
- if (isInCall() || outputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL)) {
+ if (isInCall() ||
+ outputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL))) {
device = getDeviceForStrategyInt(
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -426,7 +312,7 @@
// - in countries where not enforced in which case it follows STRATEGY_MEDIA
if ((strategy == STRATEGY_SONIFICATION) ||
- (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
}
@@ -442,9 +328,9 @@
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
}
// Use ONLY Bluetooth SCO output when ringing in vibration mode
- if (!((mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+ if (!((getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
&& (strategy == STRATEGY_ENFORCED_AUDIBLE))) {
- if (mForceUse[AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING]
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_VIBRATE_RINGING)
== AUDIO_POLICY_FORCE_BT_SCO) {
if (device2 != AUDIO_DEVICE_NONE) {
device = device2;
@@ -453,7 +339,7 @@
}
}
// Use both Bluetooth SCO and phone default output when ringing in normal mode
- if (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) {
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) {
if ((strategy == STRATEGY_SONIFICATION) &&
(device & AUDIO_DEVICE_OUT_SPEAKER) &&
(availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
@@ -484,8 +370,8 @@
}
availableOutputDevices =
availableOutputDevices.getDevicesFromTypeMask(availableOutputDevicesType);
- if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
- outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+ if (outputs.isActive(streamToVolumeSource(AUDIO_STREAM_RING)) ||
+ outputs.isActive(streamToVolumeSource(AUDIO_STREAM_ALARM))) {
return getDeviceForStrategyInt(
STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs,
outputDeviceTypesToIgnore);
@@ -506,7 +392,7 @@
if (strategy != STRATEGY_SONIFICATION) {
// no sonification on remote submix (e.g. WFD)
if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- String8("0")) != 0) {
+ String8("0"), AUDIO_FORMAT_DEFAULT) != 0) {
device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
}
@@ -516,11 +402,13 @@
outputDeviceTypesToIgnore);
break;
}
- if (device2 == AUDIO_DEVICE_NONE) {
+ // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
+ if ((device2 == AUDIO_DEVICE_NONE) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
if (device2 == AUDIO_DEVICE_NONE) {
@@ -531,7 +419,7 @@
}
}
if ((device2 == AUDIO_DEVICE_NONE) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
}
if (device2 == AUDIO_DEVICE_NONE) {
@@ -560,7 +448,7 @@
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
}
if (device2 == AUDIO_DEVICE_NONE) {
@@ -581,7 +469,7 @@
// If hdmi system audio mode is on, remove speaker out of output list.
if ((strategy == STRATEGY_MEDIA) &&
- (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
device &= ~AUDIO_DEVICE_OUT_SPEAKER;
}
@@ -603,7 +491,7 @@
if (device == AUDIO_DEVICE_NONE) {
ALOGV("getDeviceForStrategy() no device found for strategy %d", strategy);
- device = mApmObserver->getDefaultOutputDevice()->type();
+ device = getApmObserver()->getDefaultOutputDevice()->type();
ALOGE_IF(device == AUDIO_DEVICE_NONE,
"getDeviceForStrategy() no default device defined");
}
@@ -614,9 +502,9 @@
audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
{
- const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
- const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
- const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+ const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+ const DeviceVector &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+ const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
uint32_t device = AUDIO_DEVICE_NONE;
@@ -651,7 +539,7 @@
case AUDIO_SOURCE_MIC:
if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
- } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
+ } else if ((getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) &&
(availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
@@ -675,7 +563,7 @@
primaryOutput->getModuleHandle()) & ~AUDIO_DEVICE_BIT_IN;
}
- switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+ switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
case AUDIO_POLICY_FORCE_BT_SCO:
// if SCO device is requested but no SCO device is available, fall back to default case
if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
@@ -709,7 +597,7 @@
case AUDIO_SOURCE_VOICE_RECOGNITION:
case AUDIO_SOURCE_UNPROCESSED:
case AUDIO_SOURCE_HOTWORD:
- if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
+ if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO &&
availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
} else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
@@ -780,10 +668,107 @@
return device;
}
+void Engine::updateDeviceSelectionCache()
+{
+ for (const auto &iter : getProductStrategies()) {
+ const auto &strategy = iter.second;
+ auto devices = getDevicesForProductStrategy(strategy->getId());
+ mDevicesForStrategies[strategy->getId()] = devices;
+ strategy->setDeviceTypes(devices.types());
+ strategy->setDeviceAddress(devices.getFirstValidAddress().c_str());
+ }
+}
+
+DeviceVector Engine::getDevicesForProductStrategy(product_strategy_t strategy) const
+{
+ DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+ DeviceVector availableInputDevices = getApmObserver()->getAvailableInputDevices();
+ const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+
+ auto legacyStrategy = mLegacyStrategyMap.find(strategy) != end(mLegacyStrategyMap) ?
+ mLegacyStrategyMap.at(strategy) : STRATEGY_NONE;
+ audio_devices_t devices = getDeviceForStrategyInt(legacyStrategy,
+ availableOutputDevices,
+ availableInputDevices, outputs,
+ (uint32_t)AUDIO_DEVICE_NONE);
+ return availableOutputDevices.getDevicesFromTypeMask(devices);
+}
+
+DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
+ const sp<DeviceDescriptor> &preferredDevice,
+ bool fromCache) const
+{
+ // First check for explict routing device
+ if (preferredDevice != nullptr) {
+ ALOGV("%s explicit Routing on device %s", __func__, preferredDevice->toString().c_str());
+ return DeviceVector(preferredDevice);
+ }
+ product_strategy_t strategy = getProductStrategyForAttributes(attributes);
+ const DeviceVector &availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
+ const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
+ //
+ // @TODO: what is the priority of explicit routing? Shall it be considered first as it used to
+ // be by APM?
+ //
+ // Honor explicit routing requests only if all active clients have a preferred route in which
+ // case the last active client route is used
+ sp<DeviceDescriptor> device = findPreferredDevice(outputs, strategy, availableOutputDevices);
+ if (device != nullptr) {
+ return DeviceVector(device);
+ }
+
+ return fromCache? mDevicesForStrategies.at(strategy) : getDevicesForProductStrategy(strategy);
+}
+
+DeviceVector Engine::getOutputDevicesForStream(audio_stream_type_t stream, bool fromCache) const
+{
+ auto attributes = getAttributesForStreamType(stream);
+ return getOutputDevicesForAttributes(attributes, nullptr, fromCache);
+}
+
+sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
+ AudioMix **mix) const
+{
+ const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
+ const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
+ const auto &inputs = getApmObserver()->getInputs();
+ std::string address;
+
+ //
+ // Explicit Routing ??? what is the priority of explicit routing? Shall it be considered
+ // first as it used to be by APM?
+ //
+ // Honor explicit routing requests only if all active clients have a preferred route in which
+ // case the last active client route is used
+ sp<DeviceDescriptor> device =
+ findPreferredDevice(inputs, attr.source, availableInputDevices);
+ if (device != nullptr) {
+ return device;
+ }
+
+ device = policyMixes.getDeviceAndMixForInputSource(attr.source, availableInputDevices, mix);
+ if (device != nullptr) {
+ return device;
+ }
+ audio_devices_t deviceType = getDeviceForInputSource(attr.source);
+
+ if (audio_is_remote_submix_device(deviceType)) {
+ address = "0";
+ std::size_t pos;
+ std::string tags { attr.tags };
+ if ((pos = tags.find("addr=")) != std::string::npos) {
+ address = tags.substr(pos + std::strlen("addr="));
+ }
+ }
+ return availableInputDevices.getDevice(deviceType,
+ String8(address.c_str()),
+ AUDIO_FORMAT_DEFAULT);
+}
+
template <>
AudioPolicyManagerInterface *Engine::queryInterface()
{
- return &mManagerInterface;
+ return this;
}
} // namespace audio_policy
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 06186c1..d8a3698 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -16,7 +16,7 @@
#pragma once
-
+#include "EngineBase.h"
#include "AudioPolicyManagerInterface.h"
#include <AudioGain.h>
#include <policy.h>
@@ -29,114 +29,67 @@
namespace audio_policy
{
-class Engine
+enum legacy_strategy {
+ STRATEGY_NONE = -1,
+ STRATEGY_MEDIA,
+ STRATEGY_PHONE,
+ STRATEGY_SONIFICATION,
+ STRATEGY_SONIFICATION_RESPECTFUL,
+ STRATEGY_DTMF,
+ STRATEGY_ENFORCED_AUDIBLE,
+ STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
+ STRATEGY_ACCESSIBILITY,
+ STRATEGY_REROUTING,
+};
+
+class Engine : public EngineBase
{
public:
Engine();
- virtual ~Engine();
+ virtual ~Engine() = default;
template <class RequestedInterface>
RequestedInterface *queryInterface();
private:
- /// Interface members
- class ManagerInterfaceImpl : public AudioPolicyManagerInterface
- {
- public:
- explicit ManagerInterfaceImpl(Engine *policyEngine)
- : mPolicyEngine(policyEngine) {}
+ ///
+ /// from EngineBase, so from AudioPolicyManagerInterface
+ ///
+ status_t setForceUse(audio_policy_force_use_t usage,
+ audio_policy_forced_cfg_t config) override;
- virtual void setObserver(AudioPolicyManagerObserver *observer)
- {
- mPolicyEngine->setObserver(observer);
- }
- virtual status_t initCheck()
- {
- return mPolicyEngine->initCheck();
- }
- virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
- {
- return mPolicyEngine->getDeviceForInputSource(inputSource);
- }
- virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy) const
- {
- return mPolicyEngine->getDeviceForStrategy(strategy);
- }
- virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
- {
- return mPolicyEngine->getStrategyForStream(stream);
- }
- virtual routing_strategy getStrategyForUsage(audio_usage_t usage)
- {
- return mPolicyEngine->getStrategyForUsage(usage);
- }
- virtual status_t setPhoneState(audio_mode_t mode)
- {
- return mPolicyEngine->setPhoneState(mode);
- }
- virtual audio_mode_t getPhoneState() const
- {
- return mPolicyEngine->getPhoneState();
- }
- virtual status_t setForceUse(audio_policy_force_use_t usage,
- audio_policy_forced_cfg_t config)
- {
- return mPolicyEngine->setForceUse(usage, config);
- }
- virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
- {
- return mPolicyEngine->getForceUse(usage);
- }
- virtual status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
- audio_policy_dev_state_t /*state*/)
- {
- return NO_ERROR;
- }
- private:
- Engine *mPolicyEngine;
- } mManagerInterface;
+ DeviceVector getOutputDevicesForAttributes(const audio_attributes_t &attr,
+ const sp<DeviceDescriptor> &preferedDevice = nullptr,
+ bool fromCache = false) const override;
+
+ DeviceVector getOutputDevicesForStream(audio_stream_type_t stream,
+ bool fromCache = false) const override;
+
+ sp<DeviceDescriptor> getInputDeviceForAttributes(
+ const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+
+ void updateDeviceSelectionCache() override;
private:
/* Copy facilities are put private to disable copy. */
Engine(const Engine &object);
Engine &operator=(const Engine &object);
- void setObserver(AudioPolicyManagerObserver *observer);
-
- status_t initCheck();
-
- inline bool isInCall() const
- {
- return is_state_in_call(mPhoneState);
- }
-
- status_t setPhoneState(audio_mode_t mode);
- audio_mode_t getPhoneState() const
- {
- return mPhoneState;
- }
- status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
- audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
- {
- return mForceUse[usage];
- }
status_t setDefaultDevice(audio_devices_t device);
- routing_strategy getStrategyForStream(audio_stream_type_t stream);
- routing_strategy getStrategyForUsage(audio_usage_t usage);
- audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
- audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
- DeviceVector availableOutputDevices,
- DeviceVector availableInputDevices,
- const SwAudioOutputCollection &outputs,
- uint32_t outputDeviceTypesToIgnore) const;
+ audio_devices_t getDeviceForStrategyInt(legacy_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs,
+ uint32_t outputDeviceTypesToIgnore) const;
+
+ DeviceVector getDevicesForProductStrategy(product_strategy_t strategy) const;
+
audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
- audio_mode_t mPhoneState; /**< current phone state. */
- /** current forced use configuration. */
- audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];
+ DeviceStrategyMap mDevicesForStrategies;
- AudioPolicyManagerObserver *mApmObserver;
+ std::map<product_strategy_t, legacy_strategy> mLegacyStrategyMap;
};
} // namespace audio_policy
} // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 5c8a799..4540c4d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,9 +33,12 @@
#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
#define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
"audio_policy_configuration_a2dp_offload_disabled.xml"
+#define AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME \
+ "audio_policy_configuration_bluetooth_legacy_hal.xml"
#include <inttypes.h>
#include <math.h>
+#include <set>
#include <unordered_set>
#include <vector>
@@ -44,7 +47,6 @@
#include <cutils/properties.h>
#include <utils/Log.h>
#include <media/AudioParameter.h>
-#include <media/AudioPolicyHelper.h>
#include <private/android_filesystem_config.h>
#include <soundtrigger/SoundTrigger.h>
#include <system/audio.h>
@@ -81,9 +83,11 @@
status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
- status_t status = setDeviceConnectionStateInt(device, state, device_address, device_name);
+ status_t status = setDeviceConnectionStateInt(device, state, device_address,
+ device_name, encodedFormat);
nextAudioPortGeneration();
return status;
}
@@ -101,16 +105,17 @@
status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t deviceType,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
- ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
- deviceType, state, device_address, device_name);
+ ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s format 0x%X",
+ deviceType, state, device_address, device_name, encodedFormat);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(deviceType) && !audio_is_input_device(deviceType)) return BAD_VALUE;
sp<DeviceDescriptor> device =
- mHwModules.getDeviceDescriptor(deviceType, device_address, device_name,
+ mHwModules.getDeviceDescriptor(deviceType, device_address, device_name, encodedFormat,
state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
if (device == 0) {
return INVALID_OPERATION;
@@ -133,10 +138,22 @@
ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
return INVALID_OPERATION;
}
- ALOGV("%s() connecting device %s", __func__, device->toString().c_str());
+ ALOGV("%s() connecting device %s format %x",
+ __func__, device->toString().c_str(), encodedFormat);
// register new device as available
- if (mAvailableOutputDevices.add(device) < 0) {
+ index = mAvailableOutputDevices.add(device);
+ if (index >= 0) {
+ sp<HwModule> module = mHwModules.getModuleForDevice(device, encodedFormat);
+ if (module == 0) {
+ ALOGD("setDeviceConnectionState() could not find HW module for device %s",
+ device->toString().c_str());
+ mAvailableOutputDevices.remove(device);
+ return INVALID_OPERATION;
+ }
+ ALOGV("setDeviceConnectionState() module name=%s", module->getName());
+ mAvailableOutputDevices[index]->attach(module);
+ } else {
return NO_MEMORY;
}
@@ -176,8 +193,13 @@
// remove device from available output devices
mAvailableOutputDevices.remove(device);
+ mOutputs.clearSessionRoutesForDevice(device);
+
checkOutputsForDevice(device, state, outputs);
+ // Reset active device codec
+ device->setEncodedFormat(AUDIO_FORMAT_DEFAULT);
+
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(device, state);
} break;
@@ -187,7 +209,26 @@
return BAD_VALUE;
}
- checkForDeviceAndOutputChanges([&]() {
+ // No need to evaluate playback routing when connecting a remote submix
+ // output device used by a dynamic policy of type recorder as no
+ // playback use case is affected.
+ bool doCheckForDeviceAndOutputChanges = true;
+ if (device->type() == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ && strncmp(device_address, "0", AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) {
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+ if (desc->mPolicyMix != nullptr
+ && desc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS
+ && strncmp(device_address,
+ desc->mPolicyMix->mDeviceAddress.string(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ doCheckForDeviceAndOutputChanges = false;
+ break;
+ }
+ }
+ }
+
+ auto checkCloseOutputs = [&]() {
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
for (audio_io_handle_t output : outputs) {
@@ -196,7 +237,7 @@
// been opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
+ (desc->mDirectOpenCount == 0))) {
closeOutput(output);
}
}
@@ -204,7 +245,13 @@
return true;
}
return false;
- });
+ };
+
+ if (doCheckForDeviceAndOutputChanges) {
+ checkForDeviceAndOutputChanges(checkCloseOutputs);
+ } else {
+ checkCloseOutputs();
+ }
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
@@ -248,6 +295,13 @@
ALOGW("%s() device already connected: %s", __func__, device->toString().c_str());
return INVALID_OPERATION;
}
+ sp<HwModule> module = mHwModules.getModuleForDevice(device, AUDIO_FORMAT_DEFAULT);
+ if (module == NULL) {
+ ALOGW("setDeviceConnectionState(): could not find HW module for device %s",
+ device->toString().c_str());
+ return INVALID_OPERATION;
+ }
+
// Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic
// parameters on newly connected devices (instead of opening the inputs...)
broadcastDeviceConnectionState(device, state);
@@ -318,11 +372,12 @@
const char *device_address)
{
sp<DeviceDescriptor> devDesc =
- mHwModules.getDeviceDescriptor(device, device_address, "", false /* allowToCreate */,
+ mHwModules.getDeviceDescriptor(device, device_address, "", AUDIO_FORMAT_DEFAULT,
+ false /* allowToCreate */,
(strlen(device_address) != 0)/*matchAddress*/);
if (devDesc == 0) {
- ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
+ ALOGV("getDeviceConnectionState() undeclared device, type %08x, address: %s",
device, device_address);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
@@ -338,50 +393,61 @@
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
- return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
+ return (deviceVector->getDevice(
+ device, String8(device_address), AUDIO_FORMAT_DEFAULT) != 0) ?
AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
status_t AudioPolicyManager::handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
status_t status;
String8 reply;
AudioParameter param;
int isReconfigA2dpSupported = 0;
- ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
- device, device_address, device_name);
+ ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s encodedFormat: 0x%X",
+ device, device_address, device_name, encodedFormat);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
// Check if the device is currently connected
- sp<DeviceDescriptor> devDesc =
- mHwModules.getDeviceDescriptor(device, device_address, device_name);
- if (devDesc == 0 || mAvailableOutputDevices.indexOf(devDesc) < 0) {
+ DeviceVector availableDevices = getAvailableOutputDevices();
+ DeviceVector deviceList = availableDevices.getDevicesFromTypeMask(device);
+ if (deviceList.empty()) {
// Nothing to do: device is not connected
return NO_ERROR;
}
+ sp<DeviceDescriptor> devDesc = deviceList.itemAt(0);
// For offloaded A2DP, Hw modules may have the capability to
- // configure codecs. Check if any of the loaded hw modules
- // supports this.
- // If supported, send a set parameter to configure A2DP codecs
- // and return. No need to toggle device state.
+ // configure codecs.
+ // Handle two specific cases by sending a set parameter to
+ // configure A2DP codecs. No need to toggle device state.
+ // Case 1: A2DP active device switches from primary to primary
+ // module
+ // Case 2: A2DP device config changes on primary module.
if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
- reply = mpClientInterface->getParameters(
- AUDIO_IO_HANDLE_NONE,
- String8(AudioParameter::keyReconfigA2dpSupported));
- AudioParameter repliedParameters(reply);
- repliedParameters.getInt(
- String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
- if (isReconfigA2dpSupported) {
- const String8 key(AudioParameter::keyReconfigA2dp);
- param.add(key, String8("true"));
- mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
- return NO_ERROR;
+ sp<HwModule> module = mHwModules.getModuleForDeviceTypes(device, encodedFormat);
+ audio_module_handle_t primaryHandle = mPrimaryOutput->getModuleHandle();
+ if (availablePrimaryOutputDevices().contains(devDesc) &&
+ (module != 0 && module->getHandle() == primaryHandle)) {
+ reply = mpClientInterface->getParameters(
+ AUDIO_IO_HANDLE_NONE,
+ String8(AudioParameter::keyReconfigA2dpSupported));
+ AudioParameter repliedParameters(reply);
+ repliedParameters.getInt(
+ String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
+ if (isReconfigA2dpSupported) {
+ const String8 key(AudioParameter::keyReconfigA2dp);
+ param.add(key, String8("true"));
+ mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+ devDesc->setEncodedFormat(encodedFormat);
+ return NO_ERROR;
+ }
}
}
@@ -389,7 +455,8 @@
// This will force reading again the device configuration
status = setDeviceConnectionState(device,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- device_address, device_name);
+ device_address, device_name,
+ devDesc->getEncodedFormat());
if (status != NO_ERROR) {
ALOGW("handleDeviceConfigChange() error disabling connection state: %d",
status);
@@ -398,7 +465,7 @@
status = setDeviceConnectionState(device,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- device_address, device_name);
+ device_address, device_name, encodedFormat);
if (status != NO_ERROR) {
ALOGW("handleDeviceConfigChange() error enabling connection state: %d",
status);
@@ -408,9 +475,47 @@
return NO_ERROR;
}
+status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats)
+{
+ ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
+ char *tok = NULL, *saveptr;
+ status_t status = NO_ERROR;
+ char encoding_formats_list[PROPERTY_VALUE_MAX];
+ audio_format_t format = AUDIO_FORMAT_DEFAULT;
+ // FIXME This list should not come from a property but the supported encoded
+ // formats of declared A2DP devices in primary module
+ property_get("persist.bluetooth.a2dp_offload.cap", encoding_formats_list, "");
+ tok = strtok_r(encoding_formats_list, "-", &saveptr);
+ for (;tok != NULL; tok = strtok_r(NULL, "-", &saveptr)) {
+ if (strcmp(tok, "sbc") == 0) {
+ ALOGV("%s: SBC offload supported\n",__func__);
+ format = AUDIO_FORMAT_SBC;
+ } else if (strcmp(tok, "aptx") == 0) {
+ ALOGV("%s: APTX offload supported\n",__func__);
+ format = AUDIO_FORMAT_APTX;
+ } else if (strcmp(tok, "aptxhd") == 0) {
+ ALOGV("%s: APTX HD offload supported\n",__func__);
+ format = AUDIO_FORMAT_APTX_HD;
+ } else if (strcmp(tok, "ldac") == 0) {
+ ALOGV("%s: LDAC offload supported\n",__func__);
+ format = AUDIO_FORMAT_LDAC;
+ } else if (strcmp(tok, "aac") == 0) {
+ ALOGV("%s: AAC offload supported\n",__func__);
+ format = AUDIO_FORMAT_AAC;
+ } else {
+ ALOGE("%s: undefined token - %s\n",__func__, tok);
+ continue;
+ }
+ formats->push_back(format);
+ }
+ return status;
+}
+
uint32_t AudioPolicyManager::updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs)
{
bool createTxPatch = false;
+ bool createRxPatch = false;
uint32_t muteWaitMs = 0;
if(!hasPrimaryOutput() || mPrimaryOutput->devices().types() == AUDIO_DEVICE_OUT_STUB) {
@@ -419,9 +524,11 @@
ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device");
audio_attributes_t attr = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION };
- auto txDevice = getDeviceAndMixForAttributes(attr);
- ALOGV("updateCallRouting device rxDevice %s txDevice %s",
- rxDevices.toString().c_str(), txDevice->toString().c_str());
+ auto txSourceDevice = mEngine->getInputDeviceForAttributes(attr);
+ ALOG_ASSERT(txSourceDevice != 0, "updateCallRouting() input selected device not available");
+
+ ALOGV("updateCallRouting device rxDevice %s txDevice %s",
+ rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str());
// release existing RX patch if any
if (mCallRxPatch != 0) {
@@ -434,22 +541,57 @@
mCallTxPatch.clear();
}
- // If the RX device is on the primary HW module, then use legacy routing method for voice calls
- // via setOutputDevice() on primary output.
- // Otherwise, create two audio patches for TX and RX path.
- if (availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) {
- muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
+ auto telephonyRxModule =
+ mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
+ auto telephonyTxModule =
+ mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT);
+ // retrieve Rx Source and Tx Sink device descriptors
+ sp<DeviceDescriptor> rxSourceDevice =
+ mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
+ String8(),
+ AUDIO_FORMAT_DEFAULT);
+ sp<DeviceDescriptor> txSinkDevice =
+ mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
+ String8(),
+ AUDIO_FORMAT_DEFAULT);
+
+ // RX and TX Telephony device are declared by Primary Audio HAL
+ if (isPrimaryModule(telephonyRxModule) && isPrimaryModule(telephonyTxModule) &&
+ (telephonyRxModule->getHalVersionMajor() >= 3)) {
+ if (rxSourceDevice == 0 || txSinkDevice == 0) {
+ // RX / TX Telephony device(s) is(are) not currently available
+ ALOGE("updateCallRouting() no telephony Tx and/or RX device");
+ return muteWaitMs;
+ }
+ // do not create a patch (aka Sw Bridging) if Primary HW module has declared supporting a
+ // route between telephony RX to Sink device and Source device to telephony TX
+ const auto &primaryModule = telephonyRxModule;
+ createRxPatch = !primaryModule->supportsPatch(rxSourceDevice, rxDevices.itemAt(0));
+ createTxPatch = !primaryModule->supportsPatch(txSourceDevice, txSinkDevice);
+ } else {
+ // If the RX device is on the primary HW module, then use legacy routing method for
+ // voice calls via setOutputDevice() on primary output.
+ // Otherwise, create two audio patches for TX and RX path.
+ createRxPatch = !(availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) &&
+ (rxSourceDevice != 0);
// If the TX device is also on the primary HW module, setOutputDevice() will take care
// of it due to legacy implementation. If not, create a patch.
- if (!availablePrimaryModuleInputDevices().contains(txDevice)) {
- createTxPatch = true;
- }
+ createTxPatch = !(availablePrimaryModuleInputDevices().contains(txSourceDevice)) &&
+ (txSinkDevice != 0);
+ }
+ // Use legacy routing method for voice calls via setOutputDevice() on primary output.
+ // Otherwise, create two audio patches for TX and RX path.
+ if (!createRxPatch) {
+ muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs);
} else { // create RX path audio patch
mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevices.itemAt(0), delayMs);
- createTxPatch = true;
+
+ // If the TX device is on the primary HW module but RX device is
+ // on other HW module, SinkMetaData of telephony input should handle it
+ // assuming the device uses audio HAL V5.0 and above
}
if (createTxPatch) { // create TX path audio patch
- mCallTxPatch = createTelephonyPatch(false /*isRx*/, txDevice, delayMs);
+ mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs);
}
return muteWaitMs;
@@ -464,15 +606,18 @@
}
if (isRx) {
patchBuilder.addSink(device).
- addSource(mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX));
+ addSource(mAvailableInputDevices.getDevice(
+ AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT));
} else {
patchBuilder.addSource(device).
- addSink(mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX));
+ addSink(mAvailableOutputDevices.getDevice(
+ AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT));
}
// @TODO: still ignoring the address, or not dealing platform with mutliple telephonydevices
const sp<DeviceDescriptor> outputDevice = isRx ?
- device : mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX);
+ device : mAvailableOutputDevices.getDevice(
+ AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT);
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevices(DeviceVector(outputDevice), mOutputs);
audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
@@ -566,26 +711,27 @@
int delayMs = 0;
if (isStateInCall(state)) {
nsecs_t sysTime = systemTime();
+ auto musicStrategy = streamToStrategy(AUDIO_STREAM_MUSIC);
+ auto sonificationStrategy = streamToStrategy(AUDIO_STREAM_ALARM);
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
// mute media and sonification strategies and delay device switch by the largest
// latency of any output where either strategy is active.
// This avoid sending the ring tone or music tail into the earpiece or headset.
- if ((isStrategyActive(desc, STRATEGY_MEDIA,
- SONIFICATION_HEADSET_MUSIC_DELAY,
- sysTime) ||
- isStrategyActive(desc, STRATEGY_SONIFICATION,
- SONIFICATION_HEADSET_MUSIC_DELAY,
- sysTime)) &&
+ if ((desc->isStrategyActive(musicStrategy, SONIFICATION_HEADSET_MUSIC_DELAY, sysTime) ||
+ desc->isStrategyActive(sonificationStrategy, SONIFICATION_HEADSET_MUSIC_DELAY,
+ sysTime)) &&
(delayMs < (int)desc->latency()*2)) {
delayMs = desc->latency()*2;
}
- setStrategyMute(STRATEGY_MEDIA, true, desc);
- setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS,
- getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
- setStrategyMute(STRATEGY_SONIFICATION, true, desc);
- setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS,
- getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
+ setStrategyMute(musicStrategy, true, desc);
+ setStrategyMute(musicStrategy, false, desc, MUTE_TIME_MS,
+ mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
+ nullptr, true /*fromCache*/).types());
+ setStrategyMute(sonificationStrategy, true, desc);
+ setStrategyMute(sonificationStrategy, false, desc, MUTE_TIME_MS,
+ mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_ALARM),
+ nullptr, true /*fromCache*/).types());
}
}
@@ -632,12 +778,8 @@
}
// Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
- if (state == AUDIO_MODE_RINGTONE &&
- isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
- mLimitRingtoneVolume = true;
- } else {
- mLimitRingtoneVolume = false;
- }
+ mLimitRingtoneVolume = (state == AUDIO_MODE_RINGTONE &&
+ isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY));
}
audio_mode_t AudioPolicyManager::getPhoneState() {
@@ -678,6 +820,9 @@
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
+ // As done in setDeviceConnectionState, we could also fix default device issue by
+ // preventing the force re-routing in case of default dev that distinguishes on address.
+ // Let's give back to engine full device choice decision however.
waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs);
}
if (forceVolumeReeval && !newDevices.isEmpty()) {
@@ -736,6 +881,10 @@
if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) {
continue;
}
+ // reject profiles if connected device does not support codec
+ if (!curProfile->deviceSupportsEncodedFormats(devices.types())) {
+ continue;
+ }
if (!directOnly) return curProfile;
// when searching for direct outputs, if several profiles are compatible, give priority
// to one with offload capability
@@ -753,8 +902,7 @@
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
- routing_strategy strategy = getStrategy(stream);
- DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+ DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
// Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
// We use selectOutput() here since we don't have the desired AudioTrack sample rate,
@@ -788,71 +936,75 @@
ALOGE("%s: invalid stream type", __func__);
return BAD_VALUE;
}
- stream_type_to_audio_attributes(srcStream, dstAttr);
+ *dstAttr = mEngine->getAttributesForStreamType(srcStream);
}
return NO_ERROR;
}
-status_t AudioPolicyManager::getOutputForAttrInt(audio_attributes_t *resultAttr,
- audio_io_handle_t *output,
- audio_session_t session,
- const audio_attributes_t *attr,
- audio_stream_type_t *stream,
- uid_t uid,
- const audio_config_t *config,
- audio_output_flags_t *flags,
- audio_port_handle_t *selectedDeviceId)
+status_t AudioPolicyManager::getOutputForAttrInt(
+ audio_attributes_t *resultAttr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ const audio_attributes_t *attr,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t *flags,
+ audio_port_handle_t *selectedDeviceId,
+ bool *isRequestedDeviceForExclusiveUse,
+ std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs)
{
- DeviceVector devices;
- routing_strategy strategy;
- audio_devices_t deviceType = AUDIO_DEVICE_NONE;
+ DeviceVector outputDevices;
const audio_port_handle_t requestedPortId = *selectedDeviceId;
DeviceVector msdDevices = getMsdAudioOutDevices();
+ const sp<DeviceDescriptor> requestedDevice =
+ mAvailableOutputDevices.getDeviceFromId(requestedPortId);
status_t status = getAudioAttributes(resultAttr, attr, *stream);
if (status != NO_ERROR) {
return status;
}
+ *stream = mEngine->getStreamTypeForAttributes(*resultAttr);
- ALOGV("%s usage=%d, content=%d, tag=%s flags=%08x"
- " session %d selectedDeviceId %d",
- __func__,
- resultAttr->usage, resultAttr->content_type, resultAttr->tags, resultAttr->flags,
- session, requestedPortId);
+ ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__,
+ toString(*resultAttr).c_str(), toString(*stream).c_str(), session, requestedPortId);
- *stream = streamTypefromAttributesInt(resultAttr);
-
- strategy = getStrategyForAttr(resultAttr);
-
- // First check for explicit routing (eg. setPreferredDevice)
- sp<DeviceDescriptor> requestedDevice = mAvailableOutputDevices.getDeviceFromId(requestedPortId);
- if (requestedDevice != nullptr) {
- deviceType = requestedDevice->type();
- } else {
- // If no explict route, is there a matching dynamic policy that applies?
- sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(*resultAttr, uid, desc) == NO_ERROR) {
- ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
- if (!audio_has_proportional_frames(config->format)) {
- return BAD_VALUE;
- }
- *stream = streamTypefromAttributesInt(resultAttr);
- *output = desc->mIoHandle;
- AudioMix *mix = desc->mPolicyMix;
- sp<DeviceDescriptor> deviceDesc =
- mAvailableOutputDevices.getDevice(mix->mDeviceType, mix->mDeviceAddress);
- *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
- ALOGV("%s returns output %d", __func__, *output);
- return NO_ERROR;
- }
-
- // Virtual sources must always be dynamicaly or explicitly routed
- if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
- ALOGW("%s no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE", __func__);
- return BAD_VALUE;
- }
- deviceType = getDeviceForStrategy(strategy, false /*fromCache*/);
+ // The primary output is the explicit routing (eg. setPreferredDevice) if specified,
+ // otherwise, fallback to the dynamic policies, if none match, query the engine.
+ // Secondary outputs are always found by dynamic policies as the engine do not support them
+ sp<SwAudioOutputDescriptor> policyDesc;
+ if (mPolicyMixes.getOutputForAttr(*resultAttr, uid, policyDesc, secondaryDescs) != NO_ERROR) {
+ policyDesc = nullptr; // reset getOutputForAttr in case of failure
+ secondaryDescs->clear();
}
+ // Explicit routing is higher priority then any dynamic policy primary output
+ bool usePrimaryOutputFromPolicyMixes = requestedDevice == nullptr && policyDesc != nullptr;
+
+ // FIXME: in case of RENDER policy, the output capabilities should be checked
+ if ((usePrimaryOutputFromPolicyMixes || !secondaryDescs->empty())
+ && !audio_is_linear_pcm(config->format)) {
+ ALOGD("%s: rejecting request as dynamic audio policy only support pcm", __func__);
+ return BAD_VALUE;
+ }
+ if (usePrimaryOutputFromPolicyMixes) {
+ *output = policyDesc->mIoHandle;
+ AudioMix *mix = policyDesc->mPolicyMix;
+ sp<DeviceDescriptor> deviceDesc =
+ mAvailableOutputDevices.getDevice(mix->mDeviceType,
+ mix->mDeviceAddress,
+ AUDIO_FORMAT_DEFAULT);
+ *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE;
+ ALOGV("getOutputForAttr() returns output %d", *output);
+ return NO_ERROR;
+ }
+ // Virtual sources must always be dynamicaly or explicitly routed
+ if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+ ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+ return BAD_VALUE;
+ }
+ // explicit routing managed by getDeviceForStrategy in APM is now handled by engine
+ // in order to let the choice of the order to future vendor engine
+ outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false);
if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
@@ -863,44 +1015,40 @@
// FIXME: provide a more generic approach which is not device specific and move this back
// to getOutputForDevice.
// TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
- if (deviceType == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
- (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
+ if (outputDevices.types() == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+ (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
audio_is_linear_pcm(config->format) &&
isInCall()) {
if (requestedPortId != AUDIO_PORT_HANDLE_NONE) {
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
- } else {
- // Get the devce type directly from the engine to bypass preferred route logic
- deviceType = mEngine->getDeviceForStrategy(strategy);
+ *isRequestedDeviceForExclusiveUse = true;
}
}
- ALOGV("%s device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
- "flags %#x",
- __func__,
- deviceType, config->sample_rate, config->format, config->channel_mask, *flags);
+ ALOGV("%s() device %s, sampling rate %d, format %#x, channel mask %#x, flags %#x stream %s",
+ __func__, outputDevices.toString().c_str(), config->sample_rate, config->format,
+ config->channel_mask, *flags, toString(*stream).c_str());
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
*output = getOutputForDevices(msdDevices, session, *stream, config, flags);
- sp<DeviceDescriptor> deviceDesc = mAvailableOutputDevices.getDevice(deviceType);
- if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(deviceDesc) == NO_ERROR) {
- ALOGV("%s() Using MSD devices %s instead of device %s",
- __func__, msdDevices.toString().c_str(), deviceDesc->toString().c_str());
- deviceType = msdDevices.types();
+ sp<DeviceDescriptor> device = outputDevices.isEmpty() ? nullptr : outputDevices.itemAt(0);
+ if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) {
+ ALOGV("%s() Using MSD devices %s instead of devices %s",
+ __func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
+ outputDevices = msdDevices;
} else {
*output = AUDIO_IO_HANDLE_NONE;
}
}
- devices = mAvailableOutputDevices.getDevicesFromTypeMask(deviceType);
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = getOutputForDevices(devices, session, *stream, config, flags);
+ *output = getOutputForDevices(outputDevices, session, *stream, config, flags);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
return INVALID_OPERATION;
}
- *selectedDeviceId = getFirstDeviceId(devices);
+ *selectedDeviceId = getFirstDeviceId(outputDevices);
ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId);
@@ -915,7 +1063,8 @@
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId)
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs)
{
// The supplied portId must be AUDIO_PORT_HANDLE_NONE
if (*portId != AUDIO_PORT_HANDLE_NONE) {
@@ -923,11 +1072,27 @@
}
const audio_port_handle_t requestedPortId = *selectedDeviceId;
audio_attributes_t resultAttr;
+ bool isRequestedDeviceForExclusiveUse = false;
+ std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputDescs;
+ const sp<DeviceDescriptor> requestedDevice =
+ mAvailableOutputDevices.getDeviceFromId(requestedPortId);
+
+ // Prevent from storing invalid requested device id in clients
+ const audio_port_handle_t sanitizedRequestedPortId =
+ requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE;
+ *selectedDeviceId = sanitizedRequestedPortId;
+
status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid,
- config, flags, selectedDeviceId);
+ config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse,
+ &secondaryOutputDescs);
if (status != NO_ERROR) {
return status;
}
+ std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryOutputDescs;
+ for (auto& secondaryDesc : secondaryOutputDescs) {
+ secondaryOutputs->push_back(secondaryDesc->mIoHandle);
+ weakSecondaryOutputDescs.push_back(secondaryDesc);
+ }
audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
.format = config->format,
@@ -936,14 +1101,16 @@
sp<TrackClientDescriptor> clientDesc =
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
- requestedPortId, *stream,
- getStrategyForAttr(&resultAttr),
- *flags);
+ sanitizedRequestedPortId, *stream,
+ mEngine->getProductStrategyForAttributes(resultAttr),
+ streamToVolumeSource(*stream),
+ *flags, isRequestedDeviceForExclusiveUse,
+ std::move(weakSecondaryOutputDescs));
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
outputDesc->addClient(clientDesc);
- ALOGV("%s returns output %d selectedDeviceId %d for port ID %d",
- __func__, *output, requestedPortId, *portId);
+ ALOGV("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__,
+ *output, requestedPortId, *selectedDeviceId, *portId);
return NO_ERROR;
}
@@ -1158,7 +1325,7 @@
ALOGE("%s() unable to get MSD module", __func__);
return NO_INIT;
}
- sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice);
+ sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice, AUDIO_FORMAT_DEFAULT);
if (deviceModule == nullptr) {
ALOGE("%s() unable to get module for %s", __func__, outputDevice->toString().c_str());
return NO_INIT;
@@ -1248,7 +1415,8 @@
// Use media strategy for unspecified output device. This should only
// occur on checkForDeviceAndOutputChanges(). Device connection events may
// therefore invalidate explicit routing requests.
- DeviceVector devices = getDevicesForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+ attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch");
device = devices.itemAt(0);
}
@@ -1427,9 +1595,13 @@
*delayMs = 0;
audio_stream_type_t stream = client->stream();
+ auto clientVolSrc = client->volumeSource();
+ auto clientStrategy = client->strategy();
+ auto clientAttr = client->attributes();
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
- if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+ if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(
+ streamToVolumeSource(AUDIO_STREAM_TTS) /*sourceToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -1451,12 +1623,15 @@
policyMix = outputDesc->mPolicyMix;
audio_devices_t newDeviceType;
address = policyMix->mDeviceAddress.string();
- if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
- newDeviceType = policyMix->mDeviceType;
- } else {
+ if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
newDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+ } else {
+ newDeviceType = policyMix->mDeviceType;
}
- devices.add(mAvailableOutputDevices.getDevice(newDeviceType, String8(address)));
+ sp device = mAvailableOutputDevices.getDevice(newDeviceType, String8(address),
+ AUDIO_FORMAT_DEFAULT);
+ ALOG_ASSERT(device, "%s: no device found t=%u, a=%s", __func__, newDeviceType, address);
+ devices.add(device);
}
// requiresMuteCheck is false when we can bypass mute strategy.
@@ -1473,24 +1648,23 @@
if (client->hasPreferredDevice(true)) {
devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
if (devices != outputDesc->devices()) {
- checkStrategyRoute(getStrategy(stream), outputDesc->mIoHandle);
+ checkStrategyRoute(clientStrategy, outputDesc->mIoHandle);
}
}
- if (stream == AUDIO_STREAM_MUSIC) {
+ if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_MEDIA))) {
selectOutputForMusicEffects();
}
- if (outputDesc->streamActiveCount(stream) == 1 || !devices.isEmpty()) {
+ if (outputDesc->getActivityCount(clientVolSrc) == 1 || !devices.isEmpty()) {
// starting an output being rerouted?
if (devices.isEmpty()) {
devices = getNewOutputDevices(outputDesc, false /*fromCache*/);
}
-
- routing_strategy strategy = getStrategy(stream);
- bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
- (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
- (beaconMuteLatency > 0);
+ bool shouldWait =
+ (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM)) ||
+ followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_NOTIFICATION)) ||
+ (beaconMuteLatency > 0));
uint32_t waitMs = beaconMuteLatency;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
@@ -1536,7 +1710,7 @@
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, outputDesc->devices().types()),
+ getVolumeCurves(stream).getVolumeIndex(outputDesc->devices().types()),
outputDesc,
outputDesc->devices().types());
@@ -1545,7 +1719,7 @@
handleNotificationRoutingForStream(stream);
// force reevaluating accessibility routing when ringtone or alarm starts
- if (strategy == STRATEGY_SONIFICATION) {
+ if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM))) {
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
@@ -1564,7 +1738,7 @@
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
- setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
+ setStrategyMute(streamToStrategy(AUDIO_STREAM_ALARM), true, outputDesc);
}
// Automatically enable the remote submix input when output is started on a re routing mix
@@ -1574,7 +1748,8 @@
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address,
- "remote-submix");
+ "remote-submix",
+ AUDIO_FORMAT_DEFAULT);
}
return NO_ERROR;
@@ -1607,11 +1782,12 @@
{
// always handle stream stop, check which stream type is stopping
audio_stream_type_t stream = client->stream();
+ auto clientVolSrc = client->volumeSource();
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
- if (outputDesc->streamActiveCount(stream) > 0) {
- if (outputDesc->streamActiveCount(stream) == 1) {
+ if (outputDesc->getActivityCount(clientVolSrc) > 0) {
+ if (outputDesc->getActivityCount(clientVolSrc) == 1) {
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
@@ -1620,12 +1796,12 @@
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
outputDesc->mPolicyMix->mDeviceAddress,
- "remote-submix");
+ "remote-submix", AUDIO_FORMAT_DEFAULT);
}
}
bool forceDeviceUpdate = false;
if (client->hasPreferredDevice(true)) {
- checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
+ checkStrategyRoute(client->strategy(), AUDIO_IO_HANDLE_NONE);
forceDeviceUpdate = true;
}
@@ -1633,8 +1809,8 @@
outputDesc->setClientActive(client, false);
// store time at which the stream was stopped - see isStreamActive()
- if (outputDesc->streamActiveCount(stream) == 0 || forceDeviceUpdate) {
- outputDesc->mStopTime[stream] = systemTime();
+ if (outputDesc->getActivityCount(clientVolSrc) == 0 || forceDeviceUpdate) {
+ outputDesc->setStopTime(client, systemTime());
DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
// delay the device switch by twice the latency because stopOutput() is executed when
// the track stop() command is received and at that time the audio track buffer can
@@ -1669,10 +1845,10 @@
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
- setStrategyMute(STRATEGY_SONIFICATION, false, outputDesc);
+ setStrategyMute(streamToStrategy(AUDIO_STREAM_RING), false, outputDesc);
}
- if (stream == AUDIO_STREAM_MUSIC) {
+ if (followsSameRouting(client->attributes(), attributes_initializer(AUDIO_USAGE_MEDIA))) {
selectOutputForMusicEffects();
}
return NO_ERROR;
@@ -1727,9 +1903,9 @@
input_type_t *inputType,
audio_port_handle_t *portId)
{
- ALOGV("getInputForAttr() source %d, sampling rate %d, format %#x, channel mask %#x,"
- "session %d, flags %#x",
- attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
+ ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, "
+ "flags %#x attributes=%s", __func__, attr->source, config->sample_rate,
+ config->format, config->channel_mask, session, flags, toString(*attr).c_str());
status_t status = NO_ERROR;
audio_source_t halInputSource;
@@ -1812,12 +1988,15 @@
}
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
- String8(attr->tags + strlen("addr=")));
+ String8(attr->tags + strlen("addr=")),
+ AUDIO_FORMAT_DEFAULT);
} else {
if (explicitRoutingDevice != nullptr) {
device = explicitRoutingDevice;
} else {
- device = getDeviceAndMixForAttributes(attributes, &policyMix);
+ // Prevent from storing invalid requested device id in clients
+ requestedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ device = mEngine->getInputDeviceForAttributes(attributes, &policyMix);
}
if (device == nullptr) {
ALOGW("getInputForAttr() could not find device for source %d", attributes.source);
@@ -1831,7 +2010,6 @@
// know about it and is therefore considered "legacy"
*inputType = API_INPUT_LEGACY;
} else if (audio_is_remote_submix_device(device->type())) {
- device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, String8("0"));
*inputType = API_INPUT_MIX_CAPTURE;
} else if (device->type() == AUDIO_DEVICE_IN_TELEPHONY_RX) {
*inputType = API_INPUT_TELEPHONY_RX;
@@ -1841,9 +2019,7 @@
}
- *input = getInputForDevice(device, session, attributes.source,
- config, flags,
- policyMix);
+ *input = getInputForDevice(device, session, attributes, config, flags, policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
goto error;
@@ -1851,8 +2027,8 @@
exit:
- *selectedDeviceId = mAvailableInputDevices.contains(device) ?
- device->getId() : AUDIO_PORT_HANDLE_NONE;
+ *selectedDeviceId = mAvailableInputDevices.contains(device) ?
+ device->getId() : AUDIO_PORT_HANDLE_NONE;
isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD &&
mSoundTriggerSessions.indexOfKey(session) > 0;
@@ -1876,16 +2052,16 @@
audio_io_handle_t AudioPolicyManager::getInputForDevice(const sp<DeviceDescriptor> &device,
audio_session_t session,
- audio_source_t inputSource,
+ const audio_attributes_t &attributes,
const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
- audio_source_t halInputSource = inputSource;
+ audio_source_t halInputSource = attributes.source;
bool isSoundTrigger = false;
- if (inputSource == AUDIO_SOURCE_HOTWORD) {
+ if (attributes.source == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
@@ -1895,7 +2071,7 @@
} else {
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
- } else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
+ } else if (attributes.source == AUDIO_SOURCE_VOICE_COMMUNICATION &&
audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
@@ -2065,7 +2241,7 @@
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address, "remote-submix");
+ address, "remote-submix", AUDIO_FORMAT_DEFAULT);
}
}
}
@@ -2116,7 +2292,7 @@
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address, "remote-submix");
+ address, "remote-submix", AUDIO_FORMAT_DEFAULT);
}
}
resetInputDevice(input);
@@ -2197,19 +2373,21 @@
}
}
-void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax)
+void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
- mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
+ if (indexMin < 0 || indexMax < 0) {
+ ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
+ return;
+ }
+ getVolumeCurves(stream).initVolume(indexMin, indexMax);
// initialize other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
+ getVolumeCurves((audio_stream_type_t)curStream).initVolume(indexMin, indexMax);
}
}
@@ -2217,12 +2395,13 @@
int index,
audio_devices_t device)
{
-
- // VOICE_CALL stream has minVolumeIndex > 0 but can be muted directly by an
- // app that has MODIFY_PHONE_STATE permission.
- if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
- !(stream == AUDIO_STREAM_VOICE_CALL && index == 0)) ||
- (index > mVolumeCurves->getVolumeIndexMax(stream))) {
+ auto &curves = getVolumeCurves(stream);
+ // VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
+ // can be muted directly by an app that has MODIFY_PHONE_STATE permission.
+ if (((index < curves.getVolumeIndexMin()) &&
+ !((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
+ index == 0)) ||
+ (index > curves.getVolumeIndexMax())) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
@@ -2230,7 +2409,7 @@
}
// Force max volume if stream cannot be muted
- if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
+ if (!curves.canBeMuted()) index = curves.getVolumeIndexMax();
ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
stream, device, index);
@@ -2240,15 +2419,16 @@
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
+ auto &curCurves = getVolumeCurves(static_cast<audio_stream_type_t>(curStream));
+ curCurves.addCurrentVolumeIndex(device, index);
}
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
- // - The device (or devices) selected by the strategy corresponding to this stream includes
+ // - The device (or devices) selected by the engine for this stream includes
// the requested device
// - For non default requested device, currently selected device on the output is either the
- // requested device or one of the devices selected by the strategy
+ // requested device or one of the devices selected by the engine for this stream
// - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
// no specific device volume value exists for currently selected device.
status_t status = NO_ERROR;
@@ -2259,12 +2439,12 @@
if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
continue;
}
- if (!(desc->isStreamActive((audio_stream_type_t)curStream) || isInCall())) {
+ if (!(desc->isActive(streamToVolumeSource((audio_stream_type_t)curStream)) || isInCall())) {
continue;
}
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- audio_devices_t curStreamDevice = Volume::getDeviceForVolume(getDeviceForStrategy(
- curStrategy, false /*fromCache*/));
+ audio_devices_t curStreamDevice = Volume::getDeviceForVolume(
+ mEngine->getOutputDevicesForStream((audio_stream_type_t)curStream,
+ false /*fromCache*/).types());
if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
((curStreamDevice & device) == 0)) {
continue;
@@ -2274,8 +2454,7 @@
curStreamDevice |= device;
applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
} else {
- applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
- stream, curStreamDevice);
+ applyVolume = !curves.hasVolumeIndexForDevice(curStreamDevice);
}
// rescale index before applying to curStream as ranges may be different for
// stream and curStream
@@ -2284,9 +2463,10 @@
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
- status_t volStatus =
- checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
- (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
+ status_t volStatus = checkAndSetVolume(
+ (audio_stream_type_t)curStream, idx, desc, curDevice,
+ (stream == AUDIO_STREAM_SYSTEM) ?
+ TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
@@ -2306,14 +2486,14 @@
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
- // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device corresponding to
- // the strategy the stream belongs to.
+ // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
+ // stream by the engine.
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+ device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types();
}
device = Volume::getDeviceForVolume(device);
- *index = mVolumeCurves->getVolumeIndex(stream, device);
+ *index = getVolumeCurves(stream).getVolumeIndex(device);
ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
return NO_ERROR;
}
@@ -2329,8 +2509,8 @@
// 3: The primary output
// 4: the first output in the list
- routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
- DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(
+ attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/);
SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
if (outputs.size() == 0) {
@@ -2347,7 +2527,7 @@
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
- if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
+ if (activeOnly && !desc->isActive(streamToVolumeSource(AUDIO_STREAM_MUSIC))) {
continue;
}
ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
@@ -2402,7 +2582,9 @@
return INVALID_OPERATION;
}
}
- return mEffects.registerEffect(desc, io, strategy, session, id);
+ return mEffects.registerEffect(desc, io, session, id,
+ (strategy == streamToStrategy(AUDIO_STREAM_MUSIC) ||
+ strategy == PRODUCT_STRATEGY_NONE));
}
status_t AudioPolicyManager::unregisterEffect(int id)
@@ -2439,14 +2621,14 @@
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
+ active = mOutputs.isActive(streamToVolumeSource((audio_stream_type_t)curStream), inPastMs);
}
return active;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isStreamActiveRemotely(stream, inPastMs);
+ return mOutputs.isActiveRemotely(streamToVolumeSource((audio_stream_type_t)stream), inPastMs);
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -2492,18 +2674,24 @@
// examine each mix's route type
for (size_t i = 0; i < mixes.size(); i++) {
AudioMix mix = mixes[i];
- // we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
- if ((mix.mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
+ // Only capture of playback is allowed in LOOP_BACK & RENDER mode
+ if (is_mix_loopback_render(mix.mRouteFlags) && mix.mMixType != MIX_TYPE_PLAYERS) {
+ ALOGE("Unsupported Policy Mix %zu of %zu: "
+ "Only capture of playback is allowed in LOOP_BACK & RENDER mode",
+ i, mixes.size());
res = INVALID_OPERATION;
break;
}
+ // LOOP_BACK and LOOP_BACK | RENDER have the same remote submix backend and are handled
+ // in the same way.
if ((mix.mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
- ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
+ ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK %d", i, mixes.size(),
+ mix.mRouteFlags);
if (rSubmixModule == 0) {
rSubmixModule = mHwModules.getModuleFromName(
AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
if (rSubmixModule == 0) {
- ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
+ ALOGE("Unable to find audio module for submix, aborting mix %zu registration",
i);
res = INVALID_OPERATION;
break;
@@ -2518,7 +2706,7 @@
}
if (mPolicyMixes.registerMix(address, mix, 0 /*output desc*/) != NO_ERROR) {
- ALOGE(" Error registering mix %zu for address %s", i, address.string());
+ ALOGE("Error registering mix %zu for address %s", i, address.string());
res = INVALID_OPERATION;
break;
}
@@ -2536,28 +2724,34 @@
if (mix.mMixType == MIX_TYPE_PLAYERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address.string(), "remote-submix");
+ address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
} else {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address.string(), "remote-submix");
+ address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
}
} else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
String8 address = mix.mDeviceAddress;
- audio_devices_t device = mix.mDeviceType;
+ audio_devices_t type = mix.mDeviceType;
ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
- i, mixes.size(), device, address.string());
+ i, mixes.size(), type, address.string());
+
+ sp<DeviceDescriptor> device = mHwModules.getDeviceDescriptor(
+ mix.mDeviceType, mix.mDeviceAddress,
+ String8(), AUDIO_FORMAT_DEFAULT);
+ if (device == nullptr) {
+ res = INVALID_OPERATION;
+ break;
+ }
bool foundOutput = false;
for (size_t j = 0 ; j < mOutputs.size() ; j++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
- sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
- if ((patch != 0) && (patch->mPatch.num_sinks != 0)
- && (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
- && (patch->mPatch.sinks[0].ext.device.type == device)
- && (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
- AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+
+ if (desc->supportedDevices().contains(device)) {
if (mPolicyMixes.registerMix(address, mix, desc) != NO_ERROR) {
+ ALOGE("Could not register mix RENDER, dev=0x%X addr=%s", type,
+ address.string());
res = INVALID_OPERATION;
} else {
foundOutput = true;
@@ -2568,12 +2762,12 @@
if (res != NO_ERROR) {
ALOGE(" Error registering mix %zu for device 0x%X addr %s",
- i, device, address.string());
+ i, type, address.string());
res = INVALID_OPERATION;
break;
} else if (!foundOutput) {
ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
- i, device, address.string());
+ i, type, address.string());
res = INVALID_OPERATION;
break;
}
@@ -2614,18 +2808,18 @@
AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address.string(), "remote-submix");
+ address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
}
if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address.string(), "remote-submix");
+ address.string(), "remote-submix", AUDIO_FORMAT_DEFAULT);
}
rSubmixModule->removeOutputProfile(address);
rSubmixModule->removeInputProfile(address);
- } if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ } else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
if (mPolicyMixes.unregisterMix(mix.mDeviceAddress) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
@@ -2664,7 +2858,8 @@
// reevaluate outputs for all given devices
for (size_t i = 0; i < devices.size(); i++) {
sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
- devices[i].mType, devices[i].mAddress, String8());
+ devices[i].mType, devices[i].mAddress, String8(),
+ AUDIO_FORMAT_DEFAULT);
SortedVector<audio_io_handle_t> outputs;
if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
outputs) != NO_ERROR) {
@@ -2685,7 +2880,8 @@
// reevaluate outputs for all found devices
for (size_t i = 0; i < devices.size(); i++) {
sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
- devices[i].mType, devices[i].mAddress, String8());
+ devices[i].mType, devices[i].mAddress, String8(),
+ AUDIO_FORMAT_DEFAULT);
SortedVector<audio_io_handle_t> outputs;
if (checkOutputsForDevice(devDesc, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
outputs) != NO_ERROR) {
@@ -2730,11 +2926,13 @@
mHwModulesAll.dump(dst);
mOutputs.dump(dst);
mInputs.dump(dst);
- mVolumeCurves->dump(dst);
mEffects.dump(dst);
mAudioPatches.dump(dst);
mPolicyMixes.dump(dst);
mAudioSources.dump(dst);
+
+ dst->appendFormat("\nPolicy Engine dump:\n");
+ mEngine->dump(dst);
}
status_t AudioPolicyManager::dump(int fd)
@@ -2821,7 +3019,7 @@
bool AudioPolicyManager::isDirectOutputSupported(const audio_config_base_t& config,
const audio_attributes_t& attributes) {
audio_output_flags_t output_flags = AUDIO_OUTPUT_FLAG_NONE;
- audio_attributes_flags_to_audio_output_flags(attributes.flags, output_flags);
+ audio_flags_to_audio_output_flags(attributes.flags, &output_flags);
sp<IOProfile> profile = getProfileForOutput(DeviceVector() /*ignore device */,
config.sample_rate,
config.format,
@@ -3326,27 +3524,27 @@
}
}
-void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
- audio_io_handle_t ouptutToSkip)
+void AudioPolicyManager::checkStrategyRoute(product_strategy_t ps, audio_io_handle_t ouptutToSkip)
{
- DeviceVector devices = getDevicesForStrategy(strategy, false /*fromCache*/);
+ // Take the first attributes following the product strategy as it is used to retrieve the routed
+ // device. All attributes wihin a strategy follows the same "routing strategy"
+ auto attributes = mEngine->getAllAttributesForProductStrategy(ps).front();
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(attributes, nullptr, false);
SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs);
for (size_t j = 0; j < mOutputs.size(); j++) {
if (mOutputs.keyAt(j) == ouptutToSkip) {
continue;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(j);
- if (!isStrategyActive(outputDesc, (routing_strategy)strategy)) {
+ if (!outputDesc->isStrategyActive(ps)) {
continue;
}
// If the default device for this strategy is on another output mix,
// invalidate all tracks in this strategy to force re connection.
// Otherwise select new device on the output mix.
if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- if (getStrategy((audio_stream_type_t)stream) == strategy) {
- mpClientInterface->invalidateStream((audio_stream_type_t)stream);
- }
+ for (auto stream : mEngine->getStreamTypesForProductStrategy(ps)) {
+ mpClientInterface->invalidateStream(stream);
}
} else {
setOutputDevices(
@@ -3358,13 +3556,18 @@
void AudioPolicyManager::clearSessionRoutes(uid_t uid)
{
// remove output routes associated with this uid
- SortedVector<routing_strategy> affectedStrategies;
+ std::vector<product_strategy_t> affectedStrategies;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
for (const auto& client : outputDesc->getClientIterable()) {
if (client->hasPreferredDevice() && client->uid() == uid) {
client->setPreferredDeviceId(AUDIO_PORT_HANDLE_NONE);
- affectedStrategies.add(getStrategy(client->stream()));
+ auto clientStrategy = client->strategy();
+ if (std::find(begin(affectedStrategies), end(affectedStrategies), clientStrategy) !=
+ end(affectedStrategies)) {
+ continue;
+ }
+ affectedStrategies.push_back(client->strategy());
}
}
}
@@ -3414,7 +3617,7 @@
*session = (audio_session_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
*ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
audio_attributes_t attr = { .source = AUDIO_SOURCE_HOTWORD };
- *device = getDeviceAndMixForAttributes(attr)->type();
+ *device = mEngine->getInputDeviceForAttributes(attr)->type();
return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
}
@@ -3442,7 +3645,8 @@
sp<DeviceDescriptor> srcDevice =
mAvailableInputDevices.getDevice(source->ext.device.type,
- String8(source->ext.device.address));
+ String8(source->ext.device.address),
+ AUDIO_FORMAT_DEFAULT);
if (srcDevice == 0) {
ALOGW("%s source->ext.device.type %08x not found", __FUNCTION__, source->ext.device.type);
return BAD_VALUE;
@@ -3453,10 +3657,11 @@
struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
- sp<SourceClientDescriptor> sourceDesc =
- new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDevice,
- streamTypefromAttributesInt(attributes),
- getStrategyForAttr(attributes));
+ sp<SourceClientDescriptor> sourceDesc = new SourceClientDescriptor(
+ *portId, uid, *attributes, patchDesc, srcDevice,
+ mEngine->getStreamTypeForAttributes(*attributes),
+ mEngine->getProductStrategyForAttributes(*attributes),
+ streamToVolumeSource(mEngine->getStreamTypeForAttributes(*attributes)));
status_t status = connectAudioSource(sourceDesc);
if (status == NO_ERROR) {
@@ -3473,12 +3678,12 @@
disconnectAudioSource(sourceDesc);
audio_attributes_t attributes = sourceDesc->attributes();
- routing_strategy strategy = getStrategyForAttr(&attributes);
audio_stream_type_t stream = sourceDesc->stream();
sp<DeviceDescriptor> srcDevice = sourceDesc->srcDevice();
- DeviceVector sinkDevices = getDevicesForStrategy(strategy, true);
- ALOG_ASSERT(!sinkDevices.isEmpty(), "connectAudioSource(): no device found for strategy");
+ DeviceVector sinkDevices =
+ mEngine->getOutputDevicesForAttributes(attributes, nullptr, true);
+ ALOG_ASSERT(!sinkDevices.isEmpty(), "connectAudioSource(): no device found for attributes");
sp<DeviceDescriptor> sinkDevice = sinkDevices.itemAt(0);
ALOG_ASSERT(mAvailableOutputDevices.contains(sinkDevice), "%s: Device %s not available",
__FUNCTION__, sinkDevice->toString().c_str());
@@ -3502,8 +3707,12 @@
config.format = sourceDesc->config().format;
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ bool isRequestedDeviceForExclusiveUse = false;
+ std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputs;
getOutputForAttrInt(&resultAttr, &output, AUDIO_SESSION_NONE,
- &attributes, &stream, sourceDesc->uid(), &config, &flags, &selectedDeviceId);
+ &attributes, &stream, sourceDesc->uid(), &config, &flags,
+ &selectedDeviceId, &isRequestedDeviceForExclusiveUse,
+ &secondaryOutputs);
if (output == AUDIO_IO_HANDLE_NONE) {
ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevices.types());
return INVALID_OPERATION;
@@ -3726,14 +3935,16 @@
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.c_str(),
- name.c_str());
+ name.c_str(),
+ AUDIO_FORMAT_DEFAULT);
if (status != NO_ERROR) {
continue;
}
status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.c_str(),
- name.c_str());
+ name.c_str(),
+ AUDIO_FORMAT_DEFAULT);
profileUpdated |= (status == NO_ERROR);
}
// FIXME: Why doing this for input HDMI devices if we don't augment their reported formats?
@@ -3746,14 +3957,16 @@
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.c_str(),
- name.c_str());
+ name.c_str(),
+ AUDIO_FORMAT_DEFAULT);
if (status != NO_ERROR) {
continue;
}
status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.c_str(),
- name.c_str());
+ name.c_str(),
+ AUDIO_FORMAT_DEFAULT);
profileUpdated |= (status == NO_ERROR);
}
@@ -3828,16 +4041,15 @@
return NO_ERROR;
}
-sp<SourceClientDescriptor> AudioPolicyManager::getSourceForStrategyOnOutput(
- audio_io_handle_t output, routing_strategy strategy)
+sp<SourceClientDescriptor> AudioPolicyManager::getSourceForAttributesOnOutput(
+ audio_io_handle_t output, const audio_attributes_t &attr)
{
sp<SourceClientDescriptor> source;
for (size_t i = 0; i < mAudioSources.size(); i++) {
sp<SourceClientDescriptor> sourceDesc = mAudioSources.valueAt(i);
- audio_attributes_t attributes = sourceDesc->attributes();
- routing_strategy sourceStrategy = getStrategyForAttr(&attributes);
sp<SwAudioOutputDescriptor> outputDesc = sourceDesc->swOutput().promote();
- if (sourceStrategy == strategy && outputDesc != 0 && outputDesc->mIoHandle == output) {
+ if (followsSameRouting(attr, sourceDesc->attributes()) &&
+ outputDesc != 0 && outputDesc->mIoHandle == output) {
source = sourceDesc;
break;
}
@@ -3864,10 +4076,18 @@
std::vector<const char*> fileNames;
status_t ret;
- if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
- property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
- // A2DP offload supported but disabled: try to use special XML file
- fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
+ if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) {
+ if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // Both BluetoothAudio@2.0 and BluetoothA2dp@1.0 (Offlaod) are disabled, and uses
+ // the legacy hardware module for A2DP and hearing aid.
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
+ } else if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
+ fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
+ }
+ } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false)) {
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
}
fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
@@ -3892,9 +4112,7 @@
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
- mVolumeCurves(new VolumeCurvesCollection()),
- mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
- mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
+ mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices, mDefaultOutputDevice),
mAudioPortGeneration(1),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
@@ -3928,8 +4146,6 @@
}
status_t AudioPolicyManager::initialize() {
- mVolumeCurves->initializeVolumeCurves(getConfig().isSpeakerDrcEnabled());
-
// Once policy config has been parsed, retrieve an instance of the engine and initialize it.
audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
if (!engineInstance) {
@@ -4178,7 +4394,8 @@
// first list already open outputs that can be routed to this device
for (size_t i = 0; i < mOutputs.size(); i++) {
desc = mOutputs.valueAt(i);
- if (!desc->isDuplicated() && desc->supportsDevice(device)) {
+ if (!desc->isDuplicated() && desc->supportsDevice(device)
+ && desc->deviceSupportsEncodedFormats(deviceType)) {
ALOGV("checkOutputsForDevice(): adding opened output %d on device %s",
mOutputs.keyAt(i), device->toString().c_str());
outputs.add(mOutputs.keyAt(i));
@@ -4341,7 +4558,8 @@
desc = mOutputs.valueAt(i);
if (!desc->isDuplicated()) {
// exact match on device
- if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)) {
+ if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
+ && desc->deviceSupportsEncodedFormats(deviceType)) {
outputs.add(mOutputs.keyAt(i));
} else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
@@ -4514,37 +4732,31 @@
{
ALOGV("closeOutput(%d)", output);
- sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc == NULL) {
+ sp<SwAudioOutputDescriptor> closingOutput = mOutputs.valueFor(output);
+ if (closingOutput == NULL) {
ALOGW("closeOutput() unknown output %d", output);
return;
}
- mPolicyMixes.closeOutput(outputDesc);
+ mPolicyMixes.closeOutput(closingOutput);
// look for duplicated outputs connected to the output being removed.
for (size_t i = 0; i < mOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i);
- if (dupOutputDesc->isDuplicated() &&
- (dupOutputDesc->mOutput1 == outputDesc ||
- dupOutputDesc->mOutput2 == outputDesc)) {
- sp<SwAudioOutputDescriptor> outputDesc2;
- if (dupOutputDesc->mOutput1 == outputDesc) {
- outputDesc2 = dupOutputDesc->mOutput2;
- } else {
- outputDesc2 = dupOutputDesc->mOutput1;
- }
+ sp<SwAudioOutputDescriptor> dupOutput = mOutputs.valueAt(i);
+ if (dupOutput->isDuplicated() &&
+ (dupOutput->mOutput1 == closingOutput || dupOutput->mOutput2 == closingOutput)) {
+ sp<SwAudioOutputDescriptor> remainingOutput =
+ dupOutput->mOutput1 == closingOutput ? dupOutput->mOutput2 : dupOutput->mOutput1;
// As all active tracks on duplicated output will be deleted,
// and as they were also referenced on the other output, the reference
// count for their stream type must be adjusted accordingly on
// the other output.
- const bool wasActive = outputDesc2->isActive();
- for (const auto &clientPair : dupOutputDesc->getActiveClients()) {
- outputDesc2->changeStreamActiveCount(clientPair.first, -clientPair.second);
- }
+ const bool wasActive = remainingOutput->isActive();
+ // Note: no-op on the closing output where all clients has already been set inactive
+ dupOutput->setAllClientsInactive();
// stop() will be a no op if the output is still active but is needed in case all
// active streams refcounts where cleared above
if (wasActive) {
- outputDesc2->stop();
+ remainingOutput->stop();
}
audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
@@ -4556,7 +4768,7 @@
nextAudioPortGeneration();
- ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
+ ssize_t index = mAudioPatches.indexOfKey(closingOutput->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
(void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -4564,7 +4776,7 @@
mpClientInterface->onAudioPatchListUpdate();
}
- outputDesc->close();
+ closingOutput->close();
removeOutput(output);
mPreviousOutputs = mOutputs;
@@ -4628,7 +4840,8 @@
ALOGVV("output %zu isDuplicated=%d device=%s",
i, openOutputs.valueAt(i)->isDuplicated(),
openOutputs.valueAt(i)->supportedDevices().toString().c_str());
- if (openOutputs.valueAt(i)->supportsAllDevices(devices)) {
+ if (openOutputs.valueAt(i)->supportsAllDevices(devices)
+ && openOutputs.valueAt(i)->deviceSupportsEncodedFormats(devices.types())) {
ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
outputs.add(openOutputs.keyAt(i));
}
@@ -4642,6 +4855,7 @@
// output is suspended before any tracks are moved to it
checkA2dpSuspend();
checkOutputForAllStrategies();
+ checkSecondaryOutputs();
if (onOutputsChecked != nullptr && onOutputsChecked()) checkA2dpSuspend();
updateDevicesAndOutputs();
if (mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD) != 0) {
@@ -4649,16 +4863,25 @@
}
}
-void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
+bool AudioPolicyManager::followsSameRouting(const audio_attributes_t &lAttr,
+ const audio_attributes_t &rAttr) const
{
- DeviceVector oldDevices = getDevicesForStrategy(strategy, true /*fromCache*/);
- DeviceVector newDevices = getDevicesForStrategy(strategy, false /*fromCache*/);
+ return mEngine->getProductStrategyForAttributes(lAttr) ==
+ mEngine->getProductStrategyForAttributes(rAttr);
+}
+
+void AudioPolicyManager::checkOutputForAttributes(const audio_attributes_t &attr)
+{
+ auto psId = mEngine->getProductStrategyForAttributes(attr);
+
+ DeviceVector oldDevices = mEngine->getOutputDevicesForAttributes(attr, 0, true /*fromCache*/);
+ DeviceVector newDevices = mEngine->getOutputDevicesForAttributes(attr, 0, false /*fromCache*/);
SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevices(oldDevices, mPreviousOutputs);
SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
// also take into account external policy-related changes: add all outputs which are
// associated with policies in the "before" and "after" output vectors
- ALOGVV("checkOutputForStrategy(): policy related outputs");
+ ALOGVV("%s(): policy related outputs", __func__);
for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
if (desc != 0 && desc->mPolicyMix != NULL) {
@@ -4674,7 +4897,7 @@
}
}
- if (!dstOutputs.isEmpty() && srcOutputs != dstOutputs) {
+ if (srcOutputs != dstOutputs) {
// get maximum latency of all source outputs to determine the minimum mute time guaranteeing
// audio from invalidated tracks will be rendered when unmuting
uint32_t maxLatency = 0;
@@ -4684,50 +4907,64 @@
maxLatency = desc->latency();
}
}
- ALOGV("%s: strategy %d, moving from output %s to output %s", __func__, strategy,
- (srcOutputs.isEmpty()? "none" : std::to_string(srcOutputs[0]).c_str()),
- (dstOutputs.isEmpty()? "none" : std::to_string(dstOutputs[0]).c_str()));
+ ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
+ "%s: strategy %d, moving from output %s to output %s", __func__, psId,
+ std::to_string(srcOutputs[0]).c_str(),
+ std::to_string(dstOutputs[0]).c_str());
// mute strategy while moving tracks from one output to another
for (audio_io_handle_t srcOut : srcOutputs) {
sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
- if (desc != 0 && isStrategyActive(desc, strategy)) {
- setStrategyMute(strategy, true, desc);
- setStrategyMute(strategy, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
+ if (desc != 0 && desc->isStrategyActive(psId)) {
+ setStrategyMute(psId, true, desc);
+ setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
newDevices.types());
}
- sp<SourceClientDescriptor> source =
- getSourceForStrategyOnOutput(srcOut, strategy);
+ sp<SourceClientDescriptor> source = getSourceForAttributesOnOutput(srcOut, attr);
if (source != 0){
connectAudioSource(source);
}
}
- // Move effects associated to this strategy from previous output to new output
- if (strategy == STRATEGY_MEDIA) {
+ // Move effects associated to this stream from previous output to new output
+ if (followsSameRouting(attr, attributes_initializer(AUDIO_USAGE_MEDIA))) {
selectOutputForMusicEffects();
}
- // Move tracks associated to this strategy from previous output to new output
- for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
- if (getStrategy((audio_stream_type_t)i) == strategy) {
- mpClientInterface->invalidateStream((audio_stream_type_t)i);
- }
+ // Move tracks associated to this stream (and linked) from previous output to new output
+ for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
+ mpClientInterface->invalidateStream(stream);
}
}
}
void AudioPolicyManager::checkOutputForAllStrategies()
{
- if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
- checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
- checkOutputForStrategy(STRATEGY_PHONE);
- if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
- checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
- checkOutputForStrategy(STRATEGY_SONIFICATION);
- checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
- checkOutputForStrategy(STRATEGY_ACCESSIBILITY);
- checkOutputForStrategy(STRATEGY_MEDIA);
- checkOutputForStrategy(STRATEGY_DTMF);
- checkOutputForStrategy(STRATEGY_REROUTING);
+ for (const auto &strategy : mEngine->getOrderedProductStrategies()) {
+ auto attributes = mEngine->getAllAttributesForProductStrategy(strategy).front();
+ checkOutputForAttributes(attributes);
+ }
+}
+
+void AudioPolicyManager::checkSecondaryOutputs() {
+ std::set<audio_stream_type_t> streamsToInvalidate;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
+ for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
+ // FIXME code duplicated from getOutputForAttrInt
+ sp<SwAudioOutputDescriptor> desc;
+ std::vector<sp<SwAudioOutputDescriptor>> secondaryDescs;
+ mPolicyMixes.getOutputForAttr(client->attributes(), client->uid(), desc,
+ &secondaryDescs);
+ if (!std::equal(client->getSecondaryOutputs().begin(),
+ client->getSecondaryOutputs().end(),
+ secondaryDescs.begin(), secondaryDescs.end())) {
+ streamsToInvalidate.insert(client->stream());
+ }
+ }
+ }
+ for (audio_stream_type_t stream : streamsToInvalidate) {
+ ALOGD("%s Invalidate stream %d due to secondary output change", __func__, stream);
+ mpClientInterface->invalidateStream(stream);
+ }
}
void AudioPolicyManager::checkA2dpSuspend()
@@ -4780,38 +5017,6 @@
}
}
-template <class IoDescriptor, class Filter>
-sp<DeviceDescriptor> AudioPolicyManager::findPreferredDevice(
- IoDescriptor& desc, Filter filter, bool& active, const DeviceVector& devices)
-{
- auto activeClients = desc->clientsList(true /*activeOnly*/);
- auto activeClientsWithRoute =
- desc->clientsList(true /*activeOnly*/, filter, true /*preferredDevice*/);
- active = activeClients.size() > 0;
- if (active && activeClients.size() == activeClientsWithRoute.size()) {
- return devices.getDeviceFromId(activeClientsWithRoute[0]->preferredDeviceId());
- }
- return nullptr;
-}
-
-template <class IoCollection, class Filter>
-sp<DeviceDescriptor> AudioPolicyManager::findPreferredDevice(
- IoCollection& ioCollection, Filter filter, const DeviceVector& devices)
-{
- sp<DeviceDescriptor> device;
- for (size_t i = 0; i < ioCollection.size(); i++) {
- auto desc = ioCollection.valueAt(i);
- bool active;
- sp<DeviceDescriptor> curDevice = findPreferredDevice(desc, filter, active, devices);
- if (active && curDevice == nullptr) {
- return nullptr;
- } else if (curDevice != nullptr) {
- device = curDevice;
- }
- }
- return device;
-}
-
DeviceVector AudioPolicyManager::getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
bool fromCache)
{
@@ -4831,59 +5036,34 @@
// input: a specific app can not force routing for other apps by setting a preferred device.
bool active; // unused
sp<DeviceDescriptor> device =
- findPreferredDevice(outputDesc, STRATEGY_NONE, active, mAvailableOutputDevices);
+ findPreferredDevice(outputDesc, PRODUCT_STRATEGY_NONE, active, mAvailableOutputDevices);
if (device != nullptr) {
return DeviceVector(device);
}
- // check the following by order of priority to request a routing change if necessary:
- // 1: the strategy enforced audible is active and enforced on the output:
- // use device for strategy enforced audible
- // 2: we are in call or the strategy phone is active on the output:
- // use device for strategy phone
- // 3: the strategy sonification is active on the output:
- // use device for strategy sonification
- // 4: the strategy for enforced audible is active but not enforced on the output:
- // use the device for strategy enforced audible
- // 5: the strategy accessibility is active on the output:
- // use device for strategy accessibility
- // 6: the strategy "respectful" sonification is active on the output:
- // use device for strategy "respectful" sonification
- // 7: the strategy media is active on the output:
- // use device for strategy media
- // 8: the strategy DTMF is active on the output:
- // use device for strategy DTMF
- // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
- // use device for strategy t-t-s
-
- // FIXME: extend use of isStrategyActiveOnSameModule() to all strategies
- // with a refined rule considering mutually exclusive devices (using same backend)
- // as opposed to all streams on the same audio HAL module.
- if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
- devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
- } else if (isInCall() ||
- isStrategyActiveOnSameModule(outputDesc, STRATEGY_PHONE)) {
- devices = getDevicesForStrategy(STRATEGY_PHONE, fromCache);
- } else if (isStrategyActiveOnSameModule(outputDesc, STRATEGY_SONIFICATION)) {
- devices = getDevicesForStrategy(STRATEGY_SONIFICATION, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
- devices = getDevicesForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
- devices = getDevicesForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
- devices = getDevicesForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
- devices = getDevicesForStrategy(STRATEGY_MEDIA, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_DTMF)) {
- devices = getDevicesForStrategy(STRATEGY_DTMF, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
- devices = getDevicesForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_REROUTING)) {
- devices = getDevicesForStrategy(STRATEGY_REROUTING, fromCache);
+ // Legacy Engine cannot take care of bus devices and mix, so we need to handle the conflict
+ // of setForceUse / Default Bus device here
+ device = mPolicyMixes.getDeviceAndMixForOutput(outputDesc, mAvailableOutputDevices);
+ if (device != nullptr) {
+ return DeviceVector(device);
}
- ALOGV("getNewOutputDevice() selected devices %s", devices.toString().c_str());
+ for (const auto &productStrategy : mEngine->getOrderedProductStrategies()) {
+ StreamTypeVector streams = mEngine->getStreamTypesForProductStrategy(productStrategy);
+ auto attr = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
+
+ if ((hasVoiceStream(streams) &&
+ (isInCall() || mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc))) ||
+ (hasStream(streams, AUDIO_STREAM_ALARM) &&
+ mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) ||
+ outputDesc->isStrategyActive(productStrategy)) {
+ // Retrieval of devices for voice DL is done on primary output profile, cannot
+ // check the route (would force modifying configuration file for this profile)
+ devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, fromCache);
+ break;
+ }
+ }
+ ALOGV("%s selected devices %s", __func__, devices.toString().c_str());
return devices;
}
@@ -4911,13 +5091,13 @@
}
// If we are not in call and no client is active on this input, this methods returns
- // AUDIO_DEVICE_NONE, causing the patch on the input stream to be released.
+ // a null sp<>, causing the patch on the input stream to be released.
audio_attributes_t attributes = inputDesc->getHighestPriorityAttributes();
if (attributes.source == AUDIO_SOURCE_DEFAULT && isInCall()) {
attributes.source = AUDIO_SOURCE_VOICE_COMMUNICATION;
}
if (attributes.source != AUDIO_SOURCE_DEFAULT) {
- device = getDeviceAndMixForAttributes(attributes);
+ device = mEngine->getInputDeviceForAttributes(attributes);
}
return device;
@@ -4928,30 +5108,26 @@
return (stream1 == stream2);
}
-uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
- return (uint32_t)getStrategy(stream);
-}
-
audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
// By checking the range of stream before calling getStrategy, we avoid
- // getStrategy's behavior for invalid streams. getStrategy would do a ALOGE
- // and then return STRATEGY_MEDIA, but we want to return the empty set.
- if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+ // getOutputDevicesForStream's behavior for invalid streams.
+ // engine's getOutputDevicesForStream would fallback on its default behavior (most probably
+ // device for music stream), but we want to return the empty set.
+ if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
return AUDIO_DEVICE_NONE;
}
DeviceVector activeDevices;
DeviceVector devices;
- for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ for (audio_stream_type_t curStream = AUDIO_STREAM_MIN; curStream < AUDIO_STREAM_PUBLIC_CNT;
+ curStream = (audio_stream_type_t) (curStream + 1)) {
+ if (!streamsMatchForvolume(stream, curStream)) {
continue;
}
- routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
- DeviceVector curDevices =
- getDevicesForStrategy((routing_strategy)curStrategy, false /*fromCache*/);
+ DeviceVector curDevices = mEngine->getOutputDevicesForStream(curStream, false/*fromCache*/);
devices.merge(curDevices);
for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
+ if (outputDesc->isActive(streamToVolumeSource((audio_stream_type_t)curStream))) {
activeDevices.merge(outputDesc->devices());
}
}
@@ -4972,28 +5148,10 @@
return devices.types();
}
-routing_strategy AudioPolicyManager::getStrategy(audio_stream_type_t stream) const
-{
- ALOG_ASSERT(stream != AUDIO_STREAM_PATCH,"getStrategy() called for AUDIO_STREAM_PATCH");
- return mEngine->getStrategyForStream(stream);
-}
-
-routing_strategy AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
- // flags to strategy mapping
- if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
- return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
- }
- if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
- return STRATEGY_ENFORCED_AUDIBLE;
- }
- // usage to strategy mapping
- return mEngine->getStrategyForUsage(attr->usage);
-}
-
void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
switch(stream) {
case AUDIO_STREAM_MUSIC:
- checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+ checkOutputForAttributes(attributes_initializer(AUDIO_USAGE_NOTIFICATION));
updateDevicesAndOutputs();
break;
default:
@@ -5060,33 +5218,14 @@
return 0;
}
-DeviceVector AudioPolicyManager::getDevicesForStrategy(routing_strategy strategy, bool fromCache)
-{
- // Honor explicit routing requests only if all active clients have a preferred route in which
- // case the last active client route is used
- sp<DeviceDescriptor> device = findPreferredDevice(mOutputs, strategy, mAvailableOutputDevices);
- if (device != nullptr) {
- return DeviceVector(device);
- }
-
- if (fromCache) {
- ALOGVV("%s from cache strategy %d, device %s", __func__, strategy,
- mDevicesForStrategy[strategy].toString().c_str());
- return mDevicesForStrategy[strategy];
- }
- return mAvailableOutputDevices.getDevicesFromTypeMask(mEngine->getDeviceForStrategy(strategy));
-}
-
void AudioPolicyManager::updateDevicesAndOutputs()
{
- for (int i = 0; i < NUM_STRATEGIES; i++) {
- mDevicesForStrategy[i] = getDevicesForStrategy((routing_strategy)i, false /*fromCache*/);
- }
+ mEngine->updateDeviceSelectionCache();
mPreviousOutputs = mOutputs;
}
uint32_t AudioPolicyManager::checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t prevDeviceType,
+ const DeviceVector &prevDevices,
uint32_t delayMs)
{
// mute/unmute strategies using an incompatible device combination
@@ -5097,22 +5236,24 @@
}
uint32_t muteWaitMs = 0;
- audio_devices_t deviceType = outputDesc->devices().types();
- bool shouldMute = outputDesc->isActive() && (popcount(deviceType) >= 2);
+ DeviceVector devices = outputDesc->devices();
+ bool shouldMute = outputDesc->isActive() && (devices.size() >= 2);
- for (size_t i = 0; i < NUM_STRATEGIES; i++) {
- audio_devices_t curDeviceType =
- getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
- curDeviceType = curDeviceType & outputDesc->supportedDevices().types();
- bool mute = shouldMute && (curDeviceType & deviceType) && (curDeviceType != deviceType);
+ auto productStrategies = mEngine->getOrderedProductStrategies();
+ for (const auto &productStrategy : productStrategies) {
+ auto attributes = mEngine->getAllAttributesForProductStrategy(productStrategy).front();
+ DeviceVector curDevices =
+ mEngine->getOutputDevicesForAttributes(attributes, nullptr, false/*fromCache*/);
+ curDevices = curDevices.filter(outputDesc->supportedDevices());
+ bool mute = shouldMute && curDevices.containsAtLeastOne(devices) && curDevices != devices;
bool doMute = false;
- if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
+ if (mute && !outputDesc->isStrategyMutedByDevice(productStrategy)) {
doMute = true;
- outputDesc->mStrategyMutedByDevice[i] = true;
- } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){
+ outputDesc->setStrategyMutedByDevice(productStrategy, true);
+ } else if (!mute && outputDesc->isStrategyMutedByDevice(productStrategy)) {
doMute = true;
- outputDesc->mStrategyMutedByDevice[i] = false;
+ outputDesc->setStrategyMutedByDevice(productStrategy, false);
}
if (doMute) {
for (size_t j = 0; j < mOutputs.size(); j++) {
@@ -5121,10 +5262,10 @@
if (!desc->supportedDevices().containsAtLeastOne(outputDesc->supportedDevices())) {
continue;
}
- ALOGVV("checkDeviceMuteStrategies() %s strategy %zu (curDevice %04x)",
- mute ? "muting" : "unmuting", i, curDeviceType);
- setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs);
- if (isStrategyActive(desc, (routing_strategy)i)) {
+ ALOGVV("%s() %s (curDevice %s)", __func__,
+ mute ? "muting" : "unmuting", curDevices.toString().c_str());
+ setStrategyMute(productStrategy, mute, desc, mute ? 0 : delayMs);
+ if (desc->isStrategyActive(productStrategy)) {
if (mute) {
// FIXME: should not need to double latency if volume could be applied
// immediately by the audioflinger mixer. We must account for the delay
@@ -5142,7 +5283,7 @@
// temporary mute output if device selection changes to avoid volume bursts due to
// different per device volumes
- if (outputDesc->isActive() && (deviceType != prevDeviceType)) {
+ if (outputDesc->isActive() && (devices != prevDevices)) {
uint32_t tempMuteWaitMs = outputDesc->latency() * 2;
// temporary mute duration is conservatively set to 4 times the reported latency
uint32_t tempMuteDurationMs = outputDesc->latency() * 4;
@@ -5150,13 +5291,13 @@
muteWaitMs = tempMuteWaitMs;
}
- for (size_t i = 0; i < NUM_STRATEGIES; i++) {
- if (isStrategyActive(outputDesc, (routing_strategy)i)) {
+ for (const auto &productStrategy : productStrategies) {
+ if (outputDesc->isStrategyActive(productStrategy)) {
// make sure that we do not start the temporary mute period too early in case of
// delayed device change
- setStrategyMute((routing_strategy)i, true, outputDesc, delayMs);
- setStrategyMute((routing_strategy)i, false, outputDesc,
- delayMs + tempMuteDurationMs, deviceType);
+ setStrategyMute(productStrategy, true, outputDesc, delayMs);
+ setStrategyMute(productStrategy, false, outputDesc, delayMs + tempMuteDurationMs,
+ devices.types());
}
}
}
@@ -5190,16 +5331,17 @@
// filter devices according to output selected
DeviceVector filteredDevices = outputDesc->filterSupportedDevices(devices);
+ DeviceVector prevDevices = outputDesc->devices();
// no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
- // output profile
- if (!devices.isEmpty() && filteredDevices.isEmpty()) {
+ // output profile or if new device is not supported AND previous device(s) is(are) still
+ // available (otherwise reset device must be done on the output)
+ if (!devices.isEmpty() && filteredDevices.isEmpty() &&
+ !mAvailableOutputDevices.filter(prevDevices).empty()) {
ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
return 0;
}
- DeviceVector prevDevices = outputDesc->devices();
-
ALOGV("setOutputDevices() prevDevice %s", prevDevices.toString().c_str());
if (!filteredDevices.isEmpty()) {
@@ -5208,7 +5350,7 @@
// if the outputs are not materially active, there is no need to mute.
if (requiresMuteCheck) {
- muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevices.types(), delayMs);
+ muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevices, delayMs);
} else {
ALOGV("%s: suppressing checkDeviceMuteStrategies", __func__);
muteWaitMs = 0;
@@ -5220,7 +5362,7 @@
// AND force is not specified
// AND the output is connected by a valid audio patch.
// Doing this check here allows the caller to call setOutputDevices() without conditions
- if ((!filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
+ if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
!force && outputDesc->getPatchHandle() != 0) {
ALOGV("%s setting same device %s or null device, force=%d, patch handle=%d", __func__,
filteredDevices.toString().c_str(), force, outputDesc->getPatchHandle());
@@ -5380,40 +5522,12 @@
return NULL;
}
-sp<DeviceDescriptor> AudioPolicyManager::getDeviceAndMixForAttributes(
- const audio_attributes_t &attributes, AudioMix **policyMix)
-{
- // Honor explicit routing requests only if all active clients have a preferred route in which
- // case the last active client route is used
- sp<DeviceDescriptor> device =
- findPreferredDevice(mInputs, attributes.source, mAvailableInputDevices);
- if (device != nullptr) {
- return device;
- }
-
- sp<DeviceDescriptor> selectedDeviceFromMix =
- mPolicyMixes.getDeviceAndMixForInputSource(attributes.source, mAvailableInputDevices,
- policyMix);
- return (selectedDeviceFromMix != nullptr) ?
- selectedDeviceFromMix : getDeviceForAttributes(attributes);
-}
-
-sp<DeviceDescriptor> AudioPolicyManager::getDeviceForAttributes(const audio_attributes_t &attributes)
-{
- audio_devices_t device = mEngine->getDeviceForInputSource(attributes.source);
- if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX &&
- strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) {
- return mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
- String8(attributes.tags + strlen("addr=")));
- }
- return mAvailableInputDevices.getDevice(device);
-}
-
float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
- float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
+ auto &curves = getVolumeCurves(stream);
+ float volumeDB = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
// handle the case of accessibility active while a ringtone is playing: if the ringtone is much
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
@@ -5428,7 +5542,7 @@
// in-call: always cap volume by voice volume + some low headroom
if ((stream != AUDIO_STREAM_VOICE_CALL) &&
- (isInCall() || mOutputs.isStreamActiveLocally(AUDIO_STREAM_VOICE_CALL))) {
+ (isInCall() || mOutputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL)))) {
switch (stream) {
case AUDIO_STREAM_SYSTEM:
case AUDIO_STREAM_RING:
@@ -5438,8 +5552,7 @@
case AUDIO_STREAM_ENFORCED_AUDIBLE:
case AUDIO_STREAM_DTMF:
case AUDIO_STREAM_ACCESSIBILITY: {
- int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
+ int voiceVolumeIndex = getVolumeCurves(AUDIO_STREAM_VOICE_CALL).getVolumeIndex(device);
const float maxVoiceVolDb =
computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
@@ -5461,30 +5574,31 @@
// speaker is part of the select devices
// - if music is playing, always limit the volume to current music volume,
// with a minimum threshold at -36dB so that notification is always perceived.
- const routing_strategy stream_strategy = getStrategy(stream);
if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
AUDIO_DEVICE_OUT_WIRED_HEADSET |
AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
AUDIO_DEVICE_OUT_USB_HEADSET |
AUDIO_DEVICE_OUT_HEARING_AID)) &&
- ((stream_strategy == STRATEGY_SONIFICATION)
- || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
+ ((stream == AUDIO_STREAM_ALARM || stream == AUDIO_STREAM_RING)
+ || (stream == AUDIO_STREAM_NOTIFICATION)
|| (stream == AUDIO_STREAM_SYSTEM)
- || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
- (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
- mVolumeCurves->canBeMuted(stream)) {
+ || ((stream == AUDIO_STREAM_ENFORCED_AUDIBLE) &&
+ (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
+ AUDIO_POLICY_FORCE_NONE))) &&
+ getVolumeCurves(stream).canBeMuted()) {
// when the phone is ringing we must consider that music could have been paused just before
// by the music application and behave as if music was active if the last music track was
// just stopped
if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
mLimitRingtoneVolume) {
volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
- audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
+ audio_devices_t musicDevice =
+ mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
+ nullptr, true /*fromCache*/).types();
float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
- musicDevice),
- musicDevice);
+ getVolumeCurves(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice),
+ musicDevice);
float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
if (volumeDB > minVolDB) {
@@ -5504,7 +5618,7 @@
}
}
} else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
- stream_strategy != STRATEGY_SONIFICATION) {
+ (stream != AUDIO_STREAM_ALARM && stream != AUDIO_STREAM_RING)) {
volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
}
}
@@ -5519,25 +5633,35 @@
if (srcStream == dstStream) {
return srcIndex;
}
- float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
- float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
- float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
- float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+ auto &srcCurves = getVolumeCurves(srcStream);
+ auto &dstCurves = getVolumeCurves(dstStream);
+ float minSrc = (float)srcCurves.getVolumeIndexMin();
+ float maxSrc = (float)srcCurves.getVolumeIndexMax();
+ float minDst = (float)dstCurves.getVolumeIndexMin();
+ float maxDst = (float)dstCurves.getVolumeIndexMax();
+ // preserve mute request or correct range
+ if (srcIndex < minSrc) {
+ if (srcIndex == 0) {
+ return 0;
+ }
+ srcIndex = minSrc;
+ } else if (srcIndex > maxSrc) {
+ srcIndex = maxSrc;
+ }
return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
}
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
- int index,
- const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
- int delayMs,
- bool force)
+ int index,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ audio_devices_t device,
+ int delayMs,
+ bool force)
{
// do not change actual stream volume if the stream is muted
- if (outputDesc->mMuteCount[stream] != 0) {
- ALOGVV("checkAndSetVolume() stream %d muted count %d",
- stream, outputDesc->mMuteCount[stream]);
+ if (outputDesc->isMuted(streamToVolumeSource(stream))) {
+ ALOGVV("%s() stream %d muted count %d", __func__, stream, outputDesc->getMuteCount(stream));
return NO_ERROR;
}
audio_policy_forced_cfg_t forceUseForComm =
@@ -5569,7 +5693,7 @@
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AUDIO_STREAM_VOICE_CALL) {
- voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
+ voiceVolume = (float)index/(float)getVolumeCurves(stream).getVolumeIndexMax();
} else {
voiceVolume = 1.0;
}
@@ -5592,7 +5716,7 @@
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
checkAndSetVolume((audio_stream_type_t)stream,
- mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
+ getVolumeCurves((audio_stream_type_t)stream).getVolumeIndex(device),
outputDesc,
device,
delayMs,
@@ -5600,18 +5724,16 @@
}
}
-void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
- bool on,
- const sp<AudioOutputDescriptor>& outputDesc,
- int delayMs,
- audio_devices_t device)
+void AudioPolicyManager::setStrategyMute(product_strategy_t strategy,
+ bool on,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ int delayMs,
+ audio_devices_t device)
{
- ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
- strategy, on, outputDesc->getId());
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- if (getStrategy((audio_stream_type_t)stream) == strategy) {
- setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
- }
+ for (auto stream: mEngine->getStreamTypesForProductStrategy(strategy)) {
+ ALOGVV("%s() stream %d, mute %d, output ID %d", __FUNCTION__, stream, on,
+ outputDesc->getId());
+ setStreamMute(stream, on, outputDesc, delayMs, device);
}
}
@@ -5626,26 +5748,26 @@
}
ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
- stream, on, outputDesc->mMuteCount[stream], device);
-
+ stream, on, outputDesc->getMuteCount(stream), device);
+ auto &curves = getVolumeCurves(stream);
if (on) {
- if (outputDesc->mMuteCount[stream] == 0) {
- if (mVolumeCurves->canBeMuted(stream) &&
+ if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
+ if (curves.canBeMuted() &&
((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
}
}
// increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
- outputDesc->mMuteCount[stream]++;
+ outputDesc->incMuteCount(streamToVolumeSource(stream));
} else {
- if (outputDesc->mMuteCount[stream] == 0) {
+ if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
ALOGV("setStreamMute() unmuting non muted stream!");
return;
}
- if (--outputDesc->mMuteCount[stream] == 0) {
+ if (outputDesc->decMuteCount(streamToVolumeSource(stream)) == 0) {
checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, device),
+ curves.getVolumeIndex(device),
outputDesc,
device,
delayMs);
@@ -5653,25 +5775,9 @@
}
}
-audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
-{
- // flags to stream type mapping
- if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
- return AUDIO_STREAM_ENFORCED_AUDIBLE;
- }
- if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
- return AUDIO_STREAM_BLUETOOTH_SCO;
- }
- if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
- return AUDIO_STREAM_TTS;
- }
-
- return audio_usage_to_stream_type(attr->usage);
-}
-
bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
{
- // has flags that map to a strategy?
+ // has flags that map to a stream type?
if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
return true;
}
@@ -5702,37 +5808,6 @@
return true;
}
-bool AudioPolicyManager::isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc,
- routing_strategy strategy, uint32_t inPastMs,
- nsecs_t sysTime) const
-{
- if ((sysTime == 0) && (inPastMs != 0)) {
- sysTime = systemTime();
- }
- for (int i = 0; i < (int)AUDIO_STREAM_FOR_POLICY_CNT; i++) {
- if (((getStrategy((audio_stream_type_t)i) == strategy) ||
- (STRATEGY_NONE == strategy)) &&
- outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
- return true;
- }
- }
- return false;
-}
-
-bool AudioPolicyManager::isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
- routing_strategy strategy, uint32_t inPastMs,
- nsecs_t sysTime) const
-{
- for (size_t i = 0; i < mOutputs.size(); i++) {
- sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
- if (outputDesc->sharesHwModuleWith(desc)
- && isStrategyActive(desc, strategy, inPastMs, sysTime)) {
- return true;
- }
- }
- return false;
-}
-
audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
{
return mEngine->getForceUse(usage);
@@ -5781,6 +5856,8 @@
}
}
+ mInputs.clearSessionRoutesForDevice(deviceDesc);
+
mHwModules.cleanUpForDevice(deviceDesc);
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index e99de16..9fe8d1d 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -49,7 +49,7 @@
#include <AudioPolicyMix.h>
#include <EffectDescriptor.h>
#include <SoundTriggerSession.h>
-#include <VolumeCurve.h>
+#include "TypeConverter.h"
namespace android {
@@ -97,12 +97,14 @@
virtual status_t setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
const char *device_address);
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
virtual void setPhoneState(audio_mode_t state);
virtual void setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config);
@@ -111,15 +113,16 @@
virtual void setSystemProperty(const char* property, const char* value);
virtual status_t initCheck();
virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
- virtual status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- uid_t uid,
- const audio_config_t *config,
- audio_output_flags_t *flags,
- audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId);
+ status_t getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t *flags,
+ audio_port_handle_t *selectedDeviceId,
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs) override;
virtual status_t startOutput(audio_port_handle_t portId);
virtual status_t stopOutput(audio_port_handle_t portId);
virtual void releaseOutput(audio_port_handle_t portId);
@@ -140,9 +143,17 @@
virtual status_t stopInput(audio_port_handle_t portId);
virtual void releaseInput(audio_port_handle_t portId);
virtual void closeAllInputs();
- virtual void initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax);
+ /**
+ * @brief initStreamVolume: even if the engine volume files provides min and max, keep this
+ * api for compatibility reason.
+ * AudioServer will get the min and max and may overwrite them if:
+ * -using property (highest priority)
+ * -not defined (-1 by convention), case when still using apm volume tables XML files
+ * @param stream to be considered
+ * @param indexMin to set
+ * @param indexMax to set
+ */
+ virtual void initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device);
@@ -151,9 +162,15 @@
audio_devices_t device);
// return the strategy corresponding to a given stream type
- virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
- // return the strategy corresponding to the given audio attributes
- virtual routing_strategy getStrategyForAttr(const audio_attributes_t *attr);
+ virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
+ {
+ return streamToStrategy(stream);
+ }
+ product_strategy_t streamToStrategy(audio_stream_type_t stream) const
+ {
+ auto attributes = mEngine->getAttributesForStreamType(stream);
+ return mEngine->getProductStrategyForAttributes(attributes);
+ }
// return the enabled output devices for the given stream type
virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
@@ -239,13 +256,37 @@
bool reported);
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
- // return the strategy corresponding to a given stream type
- routing_strategy getStrategy(audio_stream_type_t stream) const;
+ virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats);
virtual void setAppState(uid_t uid, app_state_t state);
virtual bool isHapticPlaybackSupported();
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
+ {
+ return mEngine->listAudioProductStrategies(strategies);
+ }
+
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
+ {
+ productStrategy = mEngine->getProductStrategyForAttributes(aa.getAttributes());
+ return productStrategy != PRODUCT_STRATEGY_NONE ? NO_ERROR : BAD_VALUE;
+ }
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+ {
+ return mEngine->listAudioVolumeGroups(groups);
+ }
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+ {
+ volumeGroup = mEngine->getVolumeGroupForAttributes(aa.getAttributes());
+ return volumeGroup != VOLUME_GROUP_NONE ? NO_ERROR : BAD_VALUE;
+ }
+
protected:
// A constructor that allows more fine-grained control over initialization process,
// used in automatic tests.
@@ -291,42 +332,28 @@
{
return mAvailableInputDevices;
}
- virtual IVolumeCurvesCollection &getVolumeCurves() { return *mVolumeCurves; }
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
{
return mDefaultOutputDevice;
}
+ IVolumeCurves &getVolumeCurves(const audio_attributes_t &attr)
+ {
+ auto *curves = mEngine->getVolumeCurvesForAttributes(attr);
+ ALOG_ASSERT(curves != nullptr, "No curves for attributes %s", toString(attr).c_str());
+ return *curves;
+ }
+ IVolumeCurves &getVolumeCurves(audio_stream_type_t stream)
+ {
+ auto *curves = mEngine->getVolumeCurvesForStreamType(stream);
+ ALOG_ASSERT(curves != nullptr, "No curves for stream %s", toString(stream).c_str());
+ return *curves;
+ }
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
- // return appropriate device for streams handled by the specified strategy according to current
- // phone state, connected devices...
- // if fromCache is true, the device is returned from mDeviceForStrategy[],
- // otherwise it is determine by current state
- // (device connected,phone state, force use, a2dp output...)
- // This allows to:
- // 1 speed up process when the state is stable (when starting or stopping an output)
- // 2 access to either current device selection (fromCache == true) or
- // "future" device selection (fromCache == false) when called from a context
- // where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
- // before updateDevicesAndOutputs() is called.
- virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
- bool fromCache)
- {
- return getDevicesForStrategy(strategy, fromCache).types();
- }
-
- DeviceVector getDevicesForStrategy(routing_strategy strategy, bool fromCache);
-
- bool isStrategyActive(const sp<AudioOutputDescriptor>& outputDesc, routing_strategy strategy,
- uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
-
- bool isStrategyActiveOnSameModule(const sp<SwAudioOutputDescriptor>& outputDesc,
- routing_strategy strategy, uint32_t inPastMs = 0,
- nsecs_t sysTime = 0) const;
-
// change the route of the specified output. Returns the number of ms we have slept to
// allow new routing to take effect in certain cases.
uint32_t setOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
@@ -345,9 +372,6 @@
status_t resetInputDevice(audio_io_handle_t input,
audio_patch_handle_t *patchHandle = NULL);
- // select input device corresponding to requested audio source
- sp<DeviceDescriptor> getDeviceForAttributes(const audio_attributes_t &attributes);
-
// compute the actual volume for a given stream according to the requested index and a particular
// device
virtual float computeVolume(audio_stream_type_t stream,
@@ -368,8 +392,16 @@
void applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
audio_devices_t device, int delayMs = 0, bool force = false);
- // Mute or unmute all streams handled by the specified strategy on the specified output
- void setStrategyMute(routing_strategy strategy,
+ /**
+ * @brief setStrategyMute Mute or unmute all active clients on the considered output
+ * following the given strategy.
+ * @param strategy to be considered
+ * @param on true for mute, false for unmute
+ * @param outputDesc to be considered
+ * @param delayMs
+ * @param device
+ */
+ void setStrategyMute(product_strategy_t strategy,
bool on,
const sp<AudioOutputDescriptor>& outputDesc,
int delayMs = 0,
@@ -415,26 +447,32 @@
// A2DP suspend status is rechecked.
void checkForDeviceAndOutputChanges(std::function<bool()> onOutputsChecked = nullptr);
- // checks and if necessary changes outputs used for all strategies.
- // must be called every time a condition that affects the output choice for a given strategy
- // changes: connected device, phone state, force use...
- // Must be called before updateDevicesAndOutputs()
- void checkOutputForStrategy(routing_strategy strategy);
+ /**
+ * @brief checkOutputForAttributes checks and if necessary changes outputs used for the
+ * given audio attributes.
+ * must be called every time a condition that affects the output choice for a given
+ * attributes changes: connected device, phone state, force use...
+ * Must be called before updateDevicesAndOutputs()
+ * @param attr to be considered
+ */
+ void checkOutputForAttributes(const audio_attributes_t &attr);
- // Same as checkOutputForStrategy() but for a all strategies in order of priority
+ bool followsSameRouting(const audio_attributes_t &lAttr,
+ const audio_attributes_t &rAttr) const;
+
+ /**
+ * @brief checkOutputForAllStrategies Same as @see checkOutputForAttributes()
+ * but for a all product strategies in order of priority
+ */
void checkOutputForAllStrategies();
+ // Same as checkOutputForStrategy but for secondary outputs. Make sure if a secondary
+ // output condition changes, the track is properly rerouted
+ void checkSecondaryOutputs();
+
// manages A2DP output suspend/restore according to phone state and BT SCO usage
void checkA2dpSuspend();
- template <class IoDescriptor, class Filter>
- sp<DeviceDescriptor> findPreferredDevice(IoDescriptor& desc, Filter filter,
- bool& active, const DeviceVector& devices);
-
- template <class IoCollection, class Filter>
- sp<DeviceDescriptor> findPreferredDevice(IoCollection& ioCollection, Filter filter,
- const DeviceVector& devices);
-
// selects the most appropriate device on output for current state
// must be called every time a condition that affects the device choice for a given output is
// changed: connected device, phone state, force use, output start, output stop..
@@ -442,11 +480,14 @@
DeviceVector getNewOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
bool fromCache);
- // updates cache of device used by all strategies (mDeviceForStrategy[])
- // must be called every time a condition that affects the device choice for a given strategy is
- // changed: connected device, phone state, force use...
- // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
- // Must be called after checkOutputForAllStrategies()
+ /**
+ * @brief updateDevicesAndOutputs: updates cache of devices of the engine
+ * must be called every time a condition that affects the device choice is changed:
+ * connected device, phone state, force use...
+ * cached values are used by getOutputDevicesForStream()/getDevicesForAttributes if
+ * parameter fromCache is true.
+ * Must be called after checkOutputForAllStrategies()
+ */
void updateDevicesAndOutputs();
// selects the most appropriate device on input for current state
@@ -465,13 +506,19 @@
SortedVector<audio_io_handle_t> getOutputsForDevices(
const DeviceVector &devices, const SwAudioOutputCollection& openOutputs);
- // mute/unmute strategies using an incompatible device combination
- // if muting, wait for the audio in pcm buffer to be drained before proceeding
- // if unmuting, unmute only after the specified delay
- // Returns the number of ms waited
- virtual uint32_t checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t prevDeviceType,
- uint32_t delayMs);
+ /**
+ * @brief checkDeviceMuteStrategies mute/unmute strategies
+ * using an incompatible device combination.
+ * if muting, wait for the audio in pcm buffer to be drained before proceeding
+ * if unmuting, unmute only after the specified delay
+ * @param outputDesc
+ * @param prevDevice
+ * @param delayMs
+ * @return the number of ms waited
+ */
+ virtual uint32_t checkDeviceMuteStrategies(const sp<AudioOutputDescriptor>& outputDesc,
+ const DeviceVector &prevDevices,
+ uint32_t delayMs);
audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
@@ -512,6 +559,13 @@
return mAudioPatches.removeAudioPatch(handle);
}
+ bool isPrimaryModule(const sp<HwModule> &module) const
+ {
+ if (module == 0 || !hasPrimaryOutput()) {
+ return false;
+ }
+ return module->getHandle() == mPrimaryOutput->getModuleHandle();
+ }
DeviceVector availablePrimaryOutputDevices() const
{
if (!hasPrimaryOutput()) {
@@ -558,15 +612,22 @@
void clearAudioPatches(uid_t uid);
void clearSessionRoutes(uid_t uid);
- void checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip);
+
+ /**
+ * @brief checkStrategyRoute: when an output is beeing rerouted, reconsider each output
+ * that may host a strategy playing on the considered output.
+ * @param ps product strategy that initiated the rerouting
+ * @param ouptutToSkip output that initiated the rerouting
+ */
+ void checkStrategyRoute(product_strategy_t ps, audio_io_handle_t ouptutToSkip);
status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
status_t connectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
status_t disconnectAudioSource(const sp<SourceClientDescriptor>& sourceDesc);
- sp<SourceClientDescriptor> getSourceForStrategyOnOutput(audio_io_handle_t output,
- routing_strategy strategy);
+ sp<SourceClientDescriptor> getSourceForAttributesOnOutput(audio_io_handle_t output,
+ const audio_attributes_t &attr);
void cleanUpForDevice(const sp<DeviceDescriptor>& deviceDesc);
@@ -594,24 +655,15 @@
bool mLimitRingtoneVolume; // limit ringtone volume to music volume if headset connected
- /**
- * @brief mDevicesForStrategy vector of devices that are assigned for a given strategy.
- * Note: in case of removal of device (@see setDeviceConnectionState), the device descriptor
- * will be removed from the @see mAvailableOutputDevices or @see mAvailableInputDevices
- * but the devices for strategies will be reevaluated within the
- * @see setDeviceConnectionState function.
- */
- DeviceVector mDevicesForStrategy[NUM_STRATEGIES];
-
float mLastVoiceVolume; // last voice volume value sent to audio HAL
bool mA2dpSuspended; // true if A2DP output is suspended
- std::unique_ptr<IVolumeCurvesCollection> mVolumeCurves; // Volume Curves per use case and device category
EffectDescriptorCollection mEffects; // list of registered audio effects
sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
// dumps
+
AudioPolicyConfig mConfig;
std::atomic<uint32_t> mAudioPortGeneration;
@@ -697,7 +749,9 @@
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
- audio_port_handle_t *selectedDeviceId);
+ audio_port_handle_t *selectedDeviceId,
+ bool *isRequestedDeviceForExclusiveUse,
+ std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs);
// internal method to return the output handle for the given device and format
audio_io_handle_t getOutputForDevices(
const DeviceVector &devices,
@@ -705,16 +759,26 @@
audio_stream_type_t stream,
const audio_config_t *config,
audio_output_flags_t *flags);
- // internal method to return the input handle for the given device and format
+
+ /**
+ * @brief getInputForDevice selects an input handle for a given input device and
+ * requester context
+ * @param device to be used by requester, selected by policy mix rules or engine
+ * @param session requester session id
+ * @param uid requester uid
+ * @param attributes requester audio attributes (e.g. input source and tags matter)
+ * @param config requester audio configuration (e.g. sample rate, format, channel mask).
+ * @param flags requester input flags
+ * @param policyMix may be null, policy rules to be followed by the requester
+ * @return input io handle aka unique input identifier selected for this device.
+ */
audio_io_handle_t getInputForDevice(const sp<DeviceDescriptor> &device,
audio_session_t session,
- audio_source_t inputSource,
+ const audio_attributes_t &attributes,
const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix);
- // internal function to derive a stream type value from audio attributes
- audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
// event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
// returns 0 if no mute/unmute event happened, the largest latency of the device where
// the mute/unmute happened
@@ -722,16 +786,12 @@
uint32_t setBeaconMute(bool mute);
bool isValidAttributes(const audio_attributes_t *paa);
- // select input device corresponding to requested audio source and return associated policy
- // mix if any. Calls getDeviceForInputSource().
- sp<DeviceDescriptor> getDeviceAndMixForAttributes(const audio_attributes_t &attributes,
- AudioMix **policyMix = NULL);
-
// Called by setDeviceConnectionState().
status_t setDeviceConnectionStateInt(audio_devices_t deviceType,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
void updateMono(audio_io_handle_t output) {
AudioParameter param;
param.addInt(String8(AudioParameter::keyMonoOutput), (int)mMasterMono);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 919a90d..4947714 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -23,7 +23,6 @@
#include <memory>
#include <cutils/misc.h>
#include <media/AudioEffect.h>
-#include <media/AudioPolicyHelper.h>
#include <media/EffectsConfig.h>
#include <mediautils/ServiceUtilities.h>
#include <system/audio.h>
@@ -398,8 +397,7 @@
ALOGE("addStreamDefaultEffect(): Null uuid or type uuid pointer");
return BAD_VALUE;
}
-
- audio_stream_type_t stream = audio_usage_to_stream_type(usage);
+ audio_stream_type_t stream = AudioSystem::attributesToStreamType(attributes_initializer(usage));
if (stream < AUDIO_STREAM_MIN || stream >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("addStreamDefaultEffect(): Unsupported stream type %d", stream);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 2c904d9..c19016f 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -19,9 +19,9 @@
#include "AudioPolicyService.h"
#include "TypeConverter.h"
-#include <media/AudioPolicyHelper.h>
#include <media/MediaAnalyticsItem.h>
#include <mediautils/ServiceUtilities.h>
+#include <media/AudioPolicy.h>
#include <utils/Log.h>
namespace android {
@@ -32,7 +32,8 @@
status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
@@ -49,7 +50,7 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
return mAudioPolicyManager->setDeviceConnectionState(device, state,
- device_address, device_name);
+ device_address, device_name, encodedFormat);
}
audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
@@ -66,7 +67,8 @@
status_t AudioPolicyService::handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name)
+ const char *device_name,
+ audio_format_t encodedFormat)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
@@ -79,7 +81,7 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
return mAudioPolicyManager->handleDeviceConfigChange(device, device_address,
- device_name);
+ device_name, encodedFormat);
}
status_t AudioPolicyService::setPhoneState(audio_mode_t state)
@@ -174,7 +176,8 @@
const audio_config_t *config,
audio_output_flags_t flags,
audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId)
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
@@ -192,7 +195,8 @@
AutoCallerClear acc;
status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
config,
- &flags, selectedDeviceId, portId);
+ &flags, selectedDeviceId, portId,
+ secondaryOutputs);
// FIXME: Introduce a way to check for the the telephony device before opening the output
if ((result == NO_ERROR) &&
@@ -204,9 +208,10 @@
flags = originalFlags;
*selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
*portId = AUDIO_PORT_HANDLE_NONE;
- result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
- config,
- &flags, selectedDeviceId, portId);
+ secondaryOutputs->clear();
+ result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, config,
+ &flags, selectedDeviceId, portId,
+ secondaryOutputs);
}
if (result == NO_ERROR) {
@@ -445,28 +450,14 @@
return NO_ERROR;
}
-// this is replicated from frameworks/av/media/libaudioclient/AudioRecord.cpp
-// XXX -- figure out how to put it into a common, shared location
-
-static std::string audioSourceString(audio_source_t value) {
- std::string source;
- if (SourceTypeConverter::toString(value, source)) {
- return source;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
- std::string typeStr;
struct audio_port port = {};
port.id = portId;
status_t status = mAudioPolicyManager->getAudioPort(&port);
if (status == NO_ERROR && port.type == AUDIO_PORT_TYPE_DEVICE) {
- deviceToString(port.ext.device.type, typeStr);
+ return toString(port.ext.device.type);
}
- return typeStr;
+ return {};
}
status_t AudioPolicyService::startInput(audio_port_handle_t portId)
@@ -523,13 +514,13 @@
static constexpr char kAudioPolicyActiveDevice[] =
"android.media.audiopolicy.active.device";
- MediaAnalyticsItem *item = new MediaAnalyticsItem(kAudioPolicy);
+ MediaAnalyticsItem *item = MediaAnalyticsItem::create(kAudioPolicy);
if (item != NULL) {
item->setInt32(kAudioPolicyStatus, status);
item->setCString(kAudioPolicyRqstSrc,
- audioSourceString(client->attributes.source).c_str());
+ toString(client->attributes.source).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
if (client->opPackageName.size() != 0) {
item->setCString(kAudioPolicyRqstPkg,
@@ -549,7 +540,7 @@
if (other->active) {
// keeps the last of the clients marked active
item->setCString(kAudioPolicyActiveSrc,
- audioSourceString(other->attributes.source).c_str());
+ toString(other->attributes.source).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
if (other->opPackageName.size() != 0) {
item->setCString(kAudioPolicyActivePkg,
@@ -701,11 +692,12 @@
uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
{
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
- return 0;
+ return PRODUCT_STRATEGY_NONE;
}
if (mAudioPolicyManager == NULL) {
- return 0;
+ return PRODUCT_STRATEGY_NONE;
}
+ // DO NOT LOCK, may be called from AudioFlinger with lock held, reaching deadlock
AutoCallerClear acc;
return mAudioPolicyManager->getStrategyForStream(stream);
}
@@ -1027,9 +1019,14 @@
status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration)
{
Mutex::Autolock _l(mLock);
- if(!modifyAudioRoutingAllowed()) {
+
+ // loopback|render only need a MediaProjection (checked in caller AudioService.java)
+ bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
+ return !is_mix_loopback_render(mix.mRouteFlags); });
+ if (needModifyAudioRouting && !modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
}
+
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
@@ -1138,6 +1135,17 @@
surroundFormatsEnabled, reported);
}
+status_t AudioPolicyService::getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getHwOffloadEncodingFormatsSupportedForA2DP(formats);
+}
+
status_t AudioPolicyService::setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled)
{
if (mAudioPolicyManager == NULL) {
@@ -1173,4 +1181,41 @@
return mAudioPolicyManager->isHapticPlaybackSupported();
}
+status_t AudioPolicyService::listAudioProductStrategies(AudioProductStrategyVector &strategies)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->listAudioProductStrategies(strategies);
+}
+
+status_t AudioPolicyService::getProductStrategyFromAudioAttributes(
+ const AudioAttributes &aa, product_strategy_t &productStrategy)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->getProductStrategyFromAudioAttributes(aa, productStrategy);
+}
+
+status_t AudioPolicyService::listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->listAudioVolumeGroups(groups);
+}
+
+status_t AudioPolicyService::getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->getVolumeGroupFromAudioAttributes(aa, volumeGroup);
+}
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 416817f..76ac191 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -383,6 +383,8 @@
// OR The client is an accessibility service
// AND is on TOP OR latest started
// AND the source is VOICE_RECOGNITION or HOTWORD
+// OR the source is one of: AUDIO_SOURCE_VOICE_DOWNLINK, AUDIO_SOURCE_VOICE_UPLINK,
+// AUDIO_SOURCE_VOICE_CALL
// OR Any other client
// AND The assistant is not on TOP
// AND is on TOP OR latest started
@@ -463,6 +465,10 @@
(source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
forceIdle = false;
}
+ } else if (source == AUDIO_SOURCE_VOICE_DOWNLINK ||
+ source == AUDIO_SOURCE_VOICE_CALL ||
+ (source == AUDIO_SOURCE_VOICE_UPLINK)) {
+ forceIdle = false;
} else {
if (!isAssistantOnTop && (isOnTop || isLatest) &&
(!isSensitiveActive || isLatestSensitive)) {
@@ -880,9 +886,8 @@
if (it == mA11yUids.end()) {
continue;
}
- if (uid.second.second == ActivityManager::PROCESS_STATE_TOP ||
- uid.second.second == ActivityManager::PROCESS_STATE_FOREGROUND_SERVICE ||
- uid.second.second == ActivityManager::PROCESS_STATE_BOUND_FOREGROUND_SERVICE) {
+ if (uid.second.second >= ActivityManager::PROCESS_STATE_TOP
+ && uid.second.second <= ActivityManager::PROCESS_STATE_BOUND_FOREGROUND_SERVICE) {
return true;
}
}
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 959e757..e19b4e5 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -61,27 +61,30 @@
virtual status_t setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
virtual audio_policy_dev_state_t getDeviceConnectionState(
audio_devices_t device,
const char *device_address);
virtual status_t handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
- const char *device_name);
+ const char *device_name,
+ audio_format_t encodedFormat);
virtual status_t setPhoneState(audio_mode_t state);
virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
virtual audio_io_handle_t getOutput(audio_stream_type_t stream);
- virtual status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- pid_t pid,
- uid_t uid,
- const audio_config_t *config,
- audio_output_flags_t flags,
- audio_port_handle_t *selectedDeviceId,
- audio_port_handle_t *portId);
+ status_t getOutputForAttr(const audio_attributes_t *attr,
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ pid_t pid,
+ uid_t uid,
+ const audio_config_t *config,
+ audio_output_flags_t flags,
+ audio_port_handle_t *selectedDeviceId,
+ audio_port_handle_t *portId,
+ std::vector<audio_io_handle_t> *secondaryOutputs) override;
virtual status_t startOutput(audio_port_handle_t portId);
virtual status_t stopOutput(audio_port_handle_t portId);
virtual void releaseOutput(audio_port_handle_t portId);
@@ -218,6 +221,8 @@
audio_format_t *surroundFormats,
bool *surroundFormatsEnabled,
bool reported);
+ virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
+ std::vector<audio_format_t> *formats);
virtual status_t setSurroundFormatEnabled(audio_format_t audioFormat, bool enabled);
virtual status_t setAssistantUid(uid_t uid);
@@ -225,6 +230,15 @@
virtual bool isHapticPlaybackSupported();
+ virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy);
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups);
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup);
+
status_t doStopOutput(audio_port_handle_t portId);
void doReleaseOutput(audio_port_handle_t portId);
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
index 2ccb542..97be44c 100644
--- a/services/audiopolicy/tests/Android.mk
+++ b/services/audiopolicy/tests/Android.mk
@@ -4,8 +4,6 @@
LOCAL_C_INCLUDES := \
frameworks/av/services/audiopolicy \
- frameworks/av/services/audiopolicy/common/include \
- frameworks/av/services/audiopolicy/engine/interface \
$(call include-path-for, audio-utils) \
LOCAL_SHARED_LIBRARIES := \
@@ -18,6 +16,10 @@
LOCAL_STATIC_LIBRARIES := \
libaudiopolicycomponents \
+LOCAL_HEADER_LIBRARIES := \
+ libaudiopolicycommon \
+ libaudiopolicyengine_interface_headers
+
LOCAL_SRC_FILES := \
audiopolicymanager_tests.cpp \
@@ -29,6 +31,8 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+LOCAL_COMPATIBILITY_SUITE := device-tests
+
include $(BUILD_NATIVE_TEST)
# system/audio.h utilities test
@@ -55,4 +59,6 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+LOCAL_COMPATIBILITY_SUITE := device-tests
+
include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 24326bb..de5670c 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -117,9 +117,14 @@
explicit PatchCountCheck(AudioPolicyManagerTestClient *client)
: mClient{client},
mInitialCount{mClient->getActivePatchesCount()} {}
- void assertDelta(int delta) const {
- ASSERT_EQ(mInitialCount + delta, mClient->getActivePatchesCount()); }
- void assertNoChange() const { assertDelta(0); }
+ int deltaFromSnapshot() const {
+ size_t currentCount = mClient->getActivePatchesCount();
+ if (mInitialCount <= currentCount) {
+ return currentCount - mInitialCount;
+ } else {
+ return -(static_cast<int>(mInitialCount - currentCount));
+ }
+ }
private:
const AudioPolicyManagerTestClient *mClient;
const size_t mInitialCount;
@@ -139,7 +144,7 @@
int sampleRate,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
audio_port_handle_t *portId = nullptr);
- PatchCountCheck snapPatchCount() { return PatchCountCheck(mClient.get()); }
+ PatchCountCheck snapshotPatchCount() { return PatchCountCheck(mClient.get()); }
std::unique_ptr<AudioPolicyManagerTestClient> mClient;
std::unique_ptr<AudioPolicyTestManager> mManager;
@@ -209,7 +214,7 @@
*portId = AUDIO_PORT_HANDLE_NONE;
ASSERT_EQ(OK, mManager->getOutputForAttr(
&attr, &output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
- selectedDeviceId, portId));
+ selectedDeviceId, portId, {}));
ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
}
@@ -225,7 +230,7 @@
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFailure) {
audio_patch patch{};
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(nullptr, &handle, 0));
ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, nullptr, 0));
ASSERT_EQ(BAD_VALUE, mManager->createAudioPatch(&patch, &handle, 0));
@@ -252,20 +257,20 @@
ASSERT_EQ(INVALID_OPERATION, mManager->createAudioPatch(&patch, &handle, 0));
// Verify that the handle is left unchanged.
ASSERT_EQ(AUDIO_PATCH_HANDLE_NONE, handle);
- patchCount.assertNoChange();
+ ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTest, CreateAudioPatchFromMix) {
audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
uid_t uid = 42;
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
ASSERT_FALSE(mManager->getConfig().getAvailableInputDevices().isEmpty());
PatchBuilder patchBuilder;
patchBuilder.addSource(mManager->getConfig().getAvailableInputDevices()[0]).
addSink(mManager->getConfig().getDefaultOutputDevice());
ASSERT_EQ(NO_ERROR, mManager->createAudioPatch(patchBuilder.patch(), &handle, uid));
ASSERT_NE(AUDIO_PATCH_HANDLE_NONE, handle);
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
// TODO: Add patch creation tests that involve already existing patch
@@ -350,84 +355,82 @@
}
TEST_F(AudioPolicyManagerTestMsd, PatchCreationOnSetForceUse) {
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
mManager->setForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND,
AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS);
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertNoChange();
+ ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrFormatSwitching) {
// Switch between formats that are supported and not supported by MSD.
{
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId, portId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
&portId);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(1);
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
- patchCount.assertDelta(1); // compared to the state at the block entry
- // TODO: make PatchCountCheck asserts more obvious. It's easy to
- // miss the fact that it is immutable.
+ ASSERT_EQ(1, patchCount.deltaFromSnapshot());
}
{
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId, portId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
&portId);
ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertDelta(-1);
+ ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
- patchCount.assertNoChange();
+ ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
{
- const PatchCountCheck patchCount = snapPatchCount();
+ const PatchCountCheck patchCount = snapshotPatchCount();
audio_port_handle_t selectedDeviceId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
- patchCount.assertNoChange();
+ ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 851dd69..7ec0e4c 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -39,6 +39,10 @@
"api1/client2/CaptureSequencer.cpp",
"api1/client2/ZslProcessor.cpp",
"api2/CameraDeviceClient.cpp",
+ "api2/CompositeStream.cpp",
+ "api2/DepthCompositeStream.cpp",
+ "api2/HeicEncoderInfoManager.cpp",
+ "api2/HeicCompositeStream.cpp",
"device1/CameraHardwareInterface.cpp",
"device3/Camera3Device.cpp",
"device3/Camera3Stream.cpp",
@@ -60,11 +64,14 @@
"hidl/HidlCameraService.cpp",
"utils/CameraTraces.cpp",
"utils/AutoConditionLock.cpp",
+ "utils/ExifUtils.cpp",
"utils/TagMonitor.cpp",
"utils/LatencyHistogram.cpp",
],
shared_libs: [
+ "libdl",
+ "libexif",
"libui",
"liblog",
"libutilscallstack",
@@ -82,14 +89,18 @@
"libhidlbase",
"libhidltransport",
"libjpeg",
+ "libmedia_omx",
"libmemunreachable",
"libsensorprivacy",
+ "libstagefright",
"libstagefright_foundation",
+ "libyuv",
"android.frameworks.cameraservice.common@2.0",
"android.frameworks.cameraservice.service@2.0",
"android.frameworks.cameraservice.device@2.0",
"android.hardware.camera.common@1.0",
"android.hardware.camera.provider@2.4",
+ "android.hardware.camera.provider@2.5",
"android.hardware.camera.device@1.0",
"android.hardware.camera.device@3.2",
"android.hardware.camera.device@3.3",
@@ -108,6 +119,8 @@
"system/media/private/camera/include",
"frameworks/native/include/media/openmax",
"frameworks/av/media/ndk",
+ "external/dynamic_depth/includes",
+ "external/dynamic_depth/internal",
],
export_include_dirs: ["."],
@@ -116,6 +129,45 @@
"-Wall",
"-Wextra",
"-Werror",
+ "-Wno-ignored-qualifiers",
+ ],
+
+}
+
+cc_library_shared {
+ name: "libdepthphoto",
+
+ srcs: [
+ "utils/ExifUtils.cpp",
+ "common/DepthPhotoProcessor.cpp",
+ ],
+
+ shared_libs: [
+ "libimage_io",
+ "libdynamic_depth",
+ "libxml2",
+ "liblog",
+ "libutilscallstack",
+ "libutils",
+ "libcutils",
+ "libjpeg",
+ "libmemunreachable",
+ "libexif",
+ "libcamera_client",
+ ],
+
+ include_dirs: [
+ "external/dynamic_depth/includes",
+ "external/dynamic_depth/internal",
+ ],
+
+ export_include_dirs: ["."],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wno-ignored-qualifiers",
],
}
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c3113bf..e06897f 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -49,6 +49,7 @@
#include <hardware/hardware.h>
#include "hidl/HidlCameraService.h"
#include <hidl/HidlTransportSupport.h>
+#include <hwbinder/IPCThreadState.h>
#include <memunreachable/memunreachable.h>
#include <media/AudioSystem.h>
#include <media/IMediaHTTPService.h>
@@ -226,7 +227,7 @@
Mutex::Autolock lock(mStatusListenerLock);
for (auto& i : mListenerList) {
- i->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+ i.second->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
}
}
@@ -1287,6 +1288,18 @@
return ret;
}
+bool CameraService::shouldRejectHiddenCameraConnection(const String8 & cameraId) {
+ // If the thread serving this call is not a hwbinder thread and the caller
+ // isn't the cameraserver itself, and the camera id being requested is to be
+ // publically hidden, we should reject the connection.
+ if (!hardware::IPCThreadState::self()->isServingCall() &&
+ CameraThreadState::getCallingPid() != getpid() &&
+ mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+ return true;
+ }
+ return false;
+}
+
Status CameraService::connectDevice(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
const String16& cameraId,
@@ -1299,6 +1312,7 @@
Status ret = Status::ok();
String8 id = String8(cameraId);
sp<CameraDeviceClient> client = nullptr;
+
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
/*api1CameraId*/-1,
CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
@@ -1330,6 +1344,14 @@
(halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
static_cast<int>(effectiveApiLevel));
+ if (shouldRejectHiddenCameraConnection(cameraId)) {
+ ALOGW("Attempting to connect to system-only camera id %s, connection rejected",
+ cameraId.c_str());
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ cameraId.string());
+
+ }
sp<CLIENT> client = nullptr;
{
// Acquire mServiceLock and prevent other clients from connecting
@@ -1632,9 +1654,60 @@
return Status::ok();
}
+Status CameraService::notifyDeviceStateChange(int64_t newState) {
+ const int pid = CameraThreadState::getCallingPid();
+ const int selfPid = getpid();
+
+ // Permission checks
+ if (pid != selfPid) {
+ // Ensure we're being called by system_server, or similar process with
+ // permissions to notify the camera service about system events
+ if (!checkCallingPermission(
+ String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+ const int uid = CameraThreadState::getCallingUid();
+ ALOGE("Permission Denial: cannot send updates to camera service about device"
+ " state changes from pid=%d, uid=%d", pid, uid);
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to send updates to camera service about device state"
+ " changes from pid=%d, uid=%d", pid, uid);
+ }
+ }
+
+ ATRACE_CALL();
+
+ using hardware::camera::provider::V2_5::DeviceState;
+ hardware::hidl_bitfield<DeviceState> newDeviceState{};
+ if (newState & ICameraService::DEVICE_STATE_BACK_COVERED) {
+ newDeviceState |= DeviceState::BACK_COVERED;
+ }
+ if (newState & ICameraService::DEVICE_STATE_FRONT_COVERED) {
+ newDeviceState |= DeviceState::FRONT_COVERED;
+ }
+ if (newState & ICameraService::DEVICE_STATE_FOLDED) {
+ newDeviceState |= DeviceState::FOLDED;
+ }
+ // Only map vendor bits directly
+ uint64_t vendorBits = static_cast<uint64_t>(newState) & 0xFFFFFFFF00000000l;
+ newDeviceState |= vendorBits;
+
+ ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
+ Mutex::Autolock l(mServiceLock);
+ mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
+
+ return Status::ok();
+}
+
Status CameraService::addListener(const sp<ICameraServiceListener>& listener,
/*out*/
std::vector<hardware::CameraStatus> *cameraStatuses) {
+ return addListenerHelper(listener, cameraStatuses);
+}
+
+Status CameraService::addListenerHelper(const sp<ICameraServiceListener>& listener,
+ /*out*/
+ std::vector<hardware::CameraStatus> *cameraStatuses,
+ bool isVendorListener) {
+
ATRACE_CALL();
ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
@@ -1649,20 +1722,26 @@
{
Mutex::Autolock lock(mStatusListenerLock);
for (auto& it : mListenerList) {
- if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
+ if (IInterface::asBinder(it.second) == IInterface::asBinder(listener)) {
ALOGW("%s: Tried to add listener %p which was already subscribed",
__FUNCTION__, listener.get());
return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
}
}
- mListenerList.push_back(listener);
+ mListenerList.emplace_back(isVendorListener, listener);
}
/* Collect current devices and status */
{
Mutex::Autolock lock(mCameraStatesLock);
for (auto& i : mCameraStates) {
+ if (!isVendorListener &&
+ mCameraProviderManager->isPublicallyHiddenSecureCamera(i.first.c_str())) {
+ ALOGV("Cannot add public listener for hidden system-only %s for pid %d",
+ i.first.c_str(), CameraThreadState::getCallingPid());
+ continue;
+ }
cameraStatuses->emplace_back(i.first, mapToInterface(i.second->getStatus()));
}
}
@@ -1697,7 +1776,7 @@
{
Mutex::Autolock lock(mStatusListenerLock);
for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
- if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
+ if (IInterface::asBinder(it->second) == IInterface::asBinder(listener)) {
mListenerList.erase(it);
return Status::ok();
}
@@ -3033,7 +3112,13 @@
Mutex::Autolock lock(mStatusListenerLock);
for (auto& listener : mListenerList) {
- listener->onStatusChanged(mapToInterface(status), String16(cameraId));
+ if (!listener.first &&
+ mCameraProviderManager->isPublicallyHiddenSecureCamera(cameraId.c_str())) {
+ ALOGV("Skipping camera discovery callback for system-only camera %s",
+ cameraId.c_str());
+ continue;
+ }
+ listener.second->onStatusChanged(mapToInterface(status), String16(cameraId));
}
});
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index a296198..cf0cef8 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -154,6 +154,8 @@
virtual binder::Status notifySystemEvent(int32_t eventId,
const std::vector<int32_t>& args);
+ virtual binder::Status notifyDeviceStateChange(int64_t newState);
+
// OK = supports api of that version, -EOPNOTSUPP = does not support
virtual binder::Status supportsCameraApi(
const String16& cameraId, int32_t apiVersion,
@@ -173,6 +175,10 @@
virtual status_t shellCommand(int in, int out, int err, const Vector<String16>& args);
+ binder::Status addListenerHelper(const sp<hardware::ICameraServiceListener>& listener,
+ /*out*/
+ std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false);
+
/////////////////////////////////////////////////////////////////////
// Client functionality
@@ -615,6 +621,10 @@
sp<BasicClient>* client,
std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial);
+ // Should an operation attempt on a cameraId be rejected, if the camera id is
+ // advertised as a publically hidden secure camera, by the camera HAL ?
+ bool shouldRejectHiddenCameraConnection(const String8 & cameraId);
+
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
@@ -781,7 +791,8 @@
sp<CameraProviderManager> mCameraProviderManager;
// Guarded by mStatusListenerMutex
- std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
+ std::vector<std::pair<bool, sp<hardware::ICameraServiceListener>>> mListenerList;
+
Mutex mStatusListenerLock;
/**
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c9c216b..162b50f 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1767,6 +1767,7 @@
case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
__FUNCTION__, errorCode, resultExtras.requestId);
+ mCaptureSequencer->notifyError(errorCode, resultExtras);
return;
default:
err = CAMERA_ERROR_UNKNOWN;
@@ -1927,9 +1928,6 @@
void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
nsecs_t timestamp) {
- (void)resultExtras;
- (void)timestamp;
-
ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
__FUNCTION__, resultExtras.requestId, timestamp);
mCaptureSequencer->notifyShutter(resultExtras, timestamp);
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 5029d4b..88799f9 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -117,6 +117,31 @@
}
}
+void CaptureSequencer::notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras) {
+ ATRACE_CALL();
+ bool jpegBufferLost = false;
+ if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == nullptr) {
+ return;
+ }
+ int captureStreamId = client->getCaptureStreamId();
+ if (captureStreamId == resultExtras.errorStreamId) {
+ jpegBufferLost = true;
+ }
+ } else if (errorCode ==
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST) {
+ if (resultExtras.requestId == mShutterCaptureId) {
+ jpegBufferLost = true;
+ }
+ }
+
+ if (jpegBufferLost) {
+ sp<MemoryBase> emptyBuffer;
+ onCaptureAvailable(/*timestamp*/0, emptyBuffer, /*captureError*/true);
+ }
+}
+
void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
ATRACE_CALL();
ALOGV("%s: New result available.", __FUNCTION__);
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index c23b12d..727dd53 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -65,6 +65,9 @@
void notifyShutter(const CaptureResultExtras& resultExtras,
nsecs_t timestamp);
+ // Notifications about shutter (capture start)
+ void notifyError(int32_t errorCode, const CaptureResultExtras& resultExtras);
+
// Notification from the frame processor
virtual void onResultAvailable(const CaptureResult &result);
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index b7020fe..ddfe5e3 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -62,26 +62,6 @@
}
}
-void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
- // Intentionally left empty
-}
-
-void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
- ALOGV("%s", __FUNCTION__);
- if (bufferInfo.mError) {
- // Only lock in case of error, since we get one of these for each
- // onFrameAvailable as well, and scheduling may delay this call late
- // enough to run into later preview restart operations, for non-error
- // cases.
- // b/29524651
- ALOGV("%s: JPEG buffer lost", __FUNCTION__);
- Mutex::Autolock l(mInputMutex);
- mCaptureDone = true;
- mCaptureSuccess = false;
- mCaptureDoneSignal.signal();
- }
-}
-
status_t JpegProcessor::updateStream(const Parameters ¶ms) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
@@ -176,13 +156,6 @@
strerror(-res), res);
return res;
}
-
- res = device->addBufferListenerForStream(mCaptureStreamId, this);
- if (res != OK) {
- ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
}
return OK;
}
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index 7187ad9..977f11d 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -25,6 +25,7 @@
#include <gui/CpuConsumer.h>
#include "camera/CameraMetadata.h"
+#include "device3/Camera3StreamBufferListener.h"
namespace android {
@@ -41,8 +42,7 @@
* Still image capture output image processing
*/
class JpegProcessor:
- public Thread, public CpuConsumer::FrameAvailableListener,
- public camera3::Camera3StreamBufferListener {
+ public Thread, public CpuConsumer::FrameAvailableListener {
public:
JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~JpegProcessor();
@@ -50,15 +50,14 @@
// CpuConsumer listener implementation
void onFrameAvailable(const BufferItem& item);
- // Camera3StreamBufferListener implementation
- void onBufferAcquired(const BufferInfo& bufferInfo) override;
- void onBufferReleased(const BufferInfo& bufferInfo) override;
-
status_t updateStream(const Parameters ¶ms);
status_t deleteStream();
int getStreamId() const;
void dump(int fd, const Vector<String16>& args) const;
+
+ static size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
+
private:
static const nsecs_t kWaitDuration = 10000000; // 10 ms
wp<CameraDeviceBase> mDevice;
@@ -82,7 +81,6 @@
virtual bool threadLoop();
status_t processNewCapture(bool captureSuccess);
- size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 46fbc3e..b512f2b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -33,6 +33,9 @@
#include <camera_metadata_hidden.h>
+#include "DepthCompositeStream.h"
+#include "HeicCompositeStream.h"
+
// Convenience methods for constructing binder::Status objects for error returns
#define STATUS_ERROR(errorCode, errorString) \
@@ -143,6 +146,7 @@
binder::Status CameraDeviceClient::insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
SurfaceMap* outSurfaceMap, Vector<int32_t>* outputStreamIds, int32_t *currentStreamId) {
+ int compositeIdx;
int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
// Trying to submit request with surface that wasn't created
@@ -152,6 +156,11 @@
__FUNCTION__, mCameraIdStr.string());
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
"Request targets Surface that is not part of current capture session");
+ } else if ((compositeIdx = mCompositeStreamMap.indexOfKey(IInterface::asBinder(gbp)))
+ != NAME_NOT_FOUND) {
+ mCompositeStreamMap.valueAt(compositeIdx)->insertGbp(outSurfaceMap, outputStreamIds,
+ currentStreamId);
+ return binder::Status::ok();
}
const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
@@ -489,6 +498,17 @@
mCameraIdStr.string(), strerror(-err), err);
ALOGE("%s: %s", __FUNCTION__, msg.string());
res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ } else {
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ err = mCompositeStreamMap.valueAt(i)->configureStream();
+ if (err != OK ) {
+ String8 msg = String8::format("Camera %s: Error configuring composite "
+ "streams: %s (%d)", mCameraIdStr.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ break;
+ }
+ }
}
return res;
@@ -692,8 +712,49 @@
return res;
if (!isStreamInfoValid) {
- mapStreamInfo(streamInfo, static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ bool isDepthCompositeStream =
+ camera3::DepthCompositeStream::isDepthCompositeStream(surface);
+ bool isHeicCompositeStream =
+ camera3::HeicCompositeStream::isHeicCompositeStream(surface);
+ if (isDepthCompositeStream || isHeicCompositeStream) {
+ // We need to take in to account that composite streams can have
+ // additional internal camera streams.
+ std::vector<OutputStreamInfo> compositeStreams;
+ if (isDepthCompositeStream) {
+ ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+ mDevice->info(), &compositeStreams);
+ } else {
+ ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
+ mDevice->info(), &compositeStreams);
+ }
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed adding composite streams: %s (%d)",
+ mCameraIdStr.string(), strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (compositeStreams.size() == 0) {
+ // No internal streams means composite stream not
+ // supported.
+ *status = false;
+ return binder::Status::ok();
+ } else if (compositeStreams.size() > 1) {
+ streamCount += compositeStreams.size() - 1;
+ streamConfiguration.streams.resize(streamCount);
+ }
+
+ for (const auto& compositeStream : compositeStreams) {
+ mapStreamInfo(compositeStream,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ } else {
+ mapStreamInfo(streamInfo,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
isStreamInfoValid = true;
}
}
@@ -743,6 +804,7 @@
bool isInput = false;
std::vector<sp<IBinder>> surfaces;
ssize_t dIndex = NAME_NOT_FOUND;
+ ssize_t compositeIndex = NAME_NOT_FOUND;
if (mInputStream.configured && mInputStream.id == streamId) {
isInput = true;
@@ -762,6 +824,13 @@
}
}
+ for (size_t i = 0; i < mCompositeStreamMap.size(); ++i) {
+ if (streamId == mCompositeStreamMap.valueAt(i)->getStreamId()) {
+ compositeIndex = i;
+ break;
+ }
+ }
+
if (surfaces.empty() && dIndex == NAME_NOT_FOUND) {
String8 msg = String8::format("Camera %s: Invalid stream ID (%d) specified, no such"
" stream created yet", mCameraIdStr.string(), streamId);
@@ -791,6 +860,19 @@
if (dIndex != NAME_NOT_FOUND) {
mDeferredStreams.removeItemsAt(dIndex);
}
+
+ if (compositeIndex != NAME_NOT_FOUND) {
+ status_t ret;
+ if ((ret = mCompositeStreamMap.valueAt(compositeIndex)->deleteStream())
+ != OK) {
+ String8 msg = String8::format("Camera %s: Unexpected error %s (%d) when "
+ "deleting composite stream %d", mCameraIdStr.string(), strerror(-err), err,
+ streamId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ mCompositeStreamMap.removeItemsAt(compositeIndex);
+ }
}
}
@@ -870,11 +952,32 @@
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
std::vector<int> surfaceIds;
- err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
- streamInfo.height, streamInfo.format, streamInfo.dataSpace,
- static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
- &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
- isShared);
+ bool isDepthCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
+ bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
+ if (isDepthCompositeStream || isHeicCompisiteStream) {
+ sp<CompositeStream> compositeStream;
+ if (isDepthCompositeStream) {
+ compositeStream = new camera3::DepthCompositeStream(mDevice, getRemoteCallback());
+ } else {
+ compositeStream = new camera3::HeicCompositeStream(mDevice, getRemoteCallback());
+ }
+
+ err = compositeStream->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ if (err == OK) {
+ mCompositeStreamMap.add(IInterface::asBinder(surfaces[0]->getIGraphicBufferProducer()),
+ compositeStream);
+ }
+ } else {
+ err = mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
+ streamInfo.height, streamInfo.format, streamInfo.dataSpace,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, physicalCameraId, &surfaceIds, outputConfiguration.getSurfaceSetID(),
+ isShared);
+ }
if (err != OK) {
res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
@@ -1356,6 +1459,8 @@
camera_metadata_ro_entry streamConfigs =
(dataSpace == HAL_DATASPACE_DEPTH) ?
info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
+ (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
+ info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
int32_t bestWidth = -1;
@@ -1808,7 +1913,14 @@
// Thread safe. Don't bother locking.
sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
- if (remoteCb != 0) {
+ // Composites can have multiple internal streams. Error notifications coming from such internal
+ // streams may need to remain within camera service.
+ bool skipClientNotification = false;
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ skipClientNotification |= mCompositeStreamMap.valueAt(i)->onError(errorCode, resultExtras);
+ }
+
+ if ((remoteCb != 0) && (!skipClientNotification)) {
remoteCb->onDeviceError(errorCode, resultExtras);
}
}
@@ -1842,6 +1954,10 @@
remoteCb->onCaptureStarted(resultExtras, timestamp);
}
Camera2ClientBase::notifyShutter(resultExtras, timestamp);
+
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ mCompositeStreamMap.valueAt(i)->onShutter(resultExtras, timestamp);
+ }
}
void CameraDeviceClient::notifyPrepared(int streamId) {
@@ -1901,6 +2017,10 @@
remoteCb->onResultReceived(result.mMetadata, result.mResultExtras,
result.mPhysicalMetadatas);
}
+
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ mCompositeStreamMap.valueAt(i)->onResultAvailable(result);
+ }
}
binder::Status CameraDeviceClient::checkPidStatus(const char* checkLocation) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 17a0983..1c5abb0 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -26,8 +26,10 @@
#include "CameraService.h"
#include "common/FrameProcessorBase.h"
#include "common/Camera2ClientBase.h"
+#include "CompositeStream.h"
using android::camera3::OutputStreamInfo;
+using android::camera3::CompositeStream;
namespace android {
@@ -314,6 +316,8 @@
// stream ID -> outputStreamInfo mapping
std::unordered_map<int32_t, OutputStreamInfo> mStreamInfoMap;
+ KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
+
static const int32_t MAX_SURFACES_PER_STREAM = 4;
sp<CameraProviderManager> mProviderManager;
};
diff --git a/services/camera/libcameraservice/api2/CompositeStream.cpp b/services/camera/libcameraservice/api2/CompositeStream.cpp
new file mode 100644
index 0000000..354eaf9
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-CompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "common/CameraDeviceBase.h"
+#include "CameraDeviceClient.h"
+#include "CompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+CompositeStream::CompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ mDevice(device),
+ mRemoteCallback(cb),
+ mNumPartialResults(1),
+ mErrorState(false) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ if (entry.count > 0) {
+ mNumPartialResults = entry.data.i32[0];
+ }
+ }
+}
+
+status_t CompositeStream::createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int * id, const String8& physicalCameraId,
+ std::vector<int> * surfaceIds, int streamSetId, bool isShared) {
+ if (hasDeferredConsumer) {
+ ALOGE("%s: Deferred consumers not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (streamSetId != camera3::CAMERA3_STREAM_ID_INVALID) {
+ ALOGE("%s: Surface groups not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if (isShared) {
+ ALOGE("%s: Shared surfaces not supported in case of composite streams!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ return createInternalStreams(consumers, hasDeferredConsumer, width, height, format, rotation, id,
+ physicalCameraId, surfaceIds, streamSetId, isShared);
+}
+
+status_t CompositeStream::deleteStream() {
+ {
+ Mutex::Autolock l(mMutex);
+ mPendingCaptureResults.clear();
+ mCaptureResults.clear();
+ mFrameNumberMap.clear();
+ mErrorFrameNumbers.clear();
+ }
+
+ return deleteInternalStreams();
+}
+
+void CompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+ const CameraMetadata& /*settings*/) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && (streamId == getStreamId())) {
+ mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
+ }
+}
+
+void CompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState && !bufferInfo.mError) {
+ mFrameNumberMap.emplace(bufferInfo.mFrameNumber, bufferInfo.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+}
+
+void CompositeStream::eraseResult(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it == mPendingCaptureResults.end()) {
+ return;
+ }
+
+ it = mPendingCaptureResults.erase(it);
+}
+
+void CompositeStream::onResultAvailable(const CaptureResult& result) {
+ bool resultError = false;
+ {
+ Mutex::Autolock l(mMutex);
+
+ uint64_t frameNumber = result.mResultExtras.frameNumber;
+ bool resultReady = false;
+ auto it = mPendingCaptureResults.find(frameNumber);
+ if (it != mPendingCaptureResults.end()) {
+ it->second.append(result.mMetadata);
+ if (result.mResultExtras.partialResultCount >= mNumPartialResults) {
+ auto entry = it->second.find(ANDROID_SENSOR_TIMESTAMP);
+ if (entry.count == 1) {
+ auto ts = entry.data.i64[0];
+ mCaptureResults.emplace(ts, std::make_tuple(frameNumber, it->second));
+ resultReady = true;
+ } else {
+ ALOGE("%s: Timestamp metadata entry missing for frameNumber: %" PRIu64,
+ __FUNCTION__, frameNumber);
+ resultError = true;
+ }
+ mPendingCaptureResults.erase(it);
+ }
+ }
+
+ if (resultReady) {
+ mInputReadyCondition.signal();
+ }
+ }
+
+ if (resultError) {
+ onResultError(result.mResultExtras);
+ }
+}
+
+void CompositeStream::flagAnErrorFrameNumber(int64_t frameNumber) {
+ Mutex::Autolock l(mMutex);
+ mErrorFrameNumbers.emplace(frameNumber);
+ mInputReadyCondition.signal();
+}
+
+status_t CompositeStream::registerCompositeStreamListener(int32_t streamId) {
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (device.get() == nullptr) {
+ return NO_INIT;
+ }
+
+ auto ret = device->addBufferListenerForStream(streamId, this);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register composite stream listener!", __FUNCTION__);
+ }
+
+ return ret;
+}
+
+bool CompositeStream::onError(int32_t errorCode, const CaptureResultExtras& resultExtras) {
+ auto ret = false;
+ switch (errorCode) {
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ onResultError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ ret = onStreamBufferError(resultExtras);
+ break;
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ // Invalid request, this shouldn't affect composite streams.
+ break;
+ default:
+ ALOGE("%s: Unrecoverable error: %d detected!", __FUNCTION__, errorCode);
+ Mutex::Autolock l(mMutex);
+ mErrorState = true;
+ break;
+ }
+
+ return ret;
+}
+
+void CompositeStream::notifyError(int64_t frameNumber) {
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb =
+ mRemoteCallback.promote();
+
+ if ((frameNumber >= 0) && (remoteCb.get() != nullptr)) {
+ CaptureResultExtras extras;
+ extras.errorStreamId = getStreamId();
+ extras.frameNumber = frameNumber;
+ remoteCb->onDeviceError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER,
+ extras);
+ }
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
new file mode 100644
index 0000000..a401a82
--- /dev/null
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_COMPOSITE_STREAM_H
+
+#include <set>
+#include <unordered_map>
+
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <camera/CameraMetadata.h>
+#include <camera/camera2/OutputConfiguration.h>
+#include <gui/IProducerListener.h>
+#include "common/CameraDeviceBase.h"
+#include "device3/Camera3StreamInterface.h"
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class CompositeStream : public camera3::Camera3StreamBufferListener {
+
+public:
+ CompositeStream(wp<CameraDeviceBase> device, wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ virtual ~CompositeStream() {}
+
+ status_t createStream(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared);
+
+ status_t deleteStream();
+
+ // Create and register all internal camera streams.
+ virtual status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) = 0;
+
+ // Release all internal streams and corresponding resources.
+ virtual status_t deleteInternalStreams() = 0;
+
+ // Stream configuration completed.
+ virtual status_t configureStream() = 0;
+
+ // Insert the internal composite stream id in the user capture request.
+ virtual status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t>* /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) = 0;
+
+ // Return composite stream id.
+ virtual int getStreamId() = 0;
+
+ // Notify when shutter notify is triggered
+ virtual void onShutter(const CaptureResultExtras& /*resultExtras*/, nsecs_t /*timestamp*/) {}
+
+ void onResultAvailable(const CaptureResult& result);
+ bool onError(int32_t errorCode, const CaptureResultExtras& resultExtras);
+
+ // Camera3StreamBufferListener implementation
+ void onBufferAcquired(const BufferInfo& /*bufferInfo*/) override { /*Empty for now */ }
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+ void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+ const CameraMetadata& settings) override;
+
+protected:
+ struct ProducerListener : public BnProducerListener {
+ // ProducerListener impementation
+ void onBufferReleased() override { /*No impl. for now*/ };
+ };
+
+ status_t registerCompositeStreamListener(int32_t streamId);
+ void eraseResult(int64_t frameNumber);
+ void flagAnErrorFrameNumber(int64_t frameNumber);
+ void notifyError(int64_t frameNumber);
+
+ // Subclasses should check for buffer errors from internal streams and return 'true' in
+ // case the error notification should remain within camera service.
+ virtual bool onStreamBufferError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Subclasses can decide how to handle result errors depending on whether or not the
+ // internal processing needs result data.
+ virtual void onResultError(const CaptureResultExtras& resultExtras) = 0;
+
+ // Device and/or service is in unrecoverable error state.
+ // Composite streams should behave accordingly.
+ void enableErrorState();
+
+ wp<CameraDeviceBase> mDevice;
+ wp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
+
+ mutable Mutex mMutex;
+ Condition mInputReadyCondition;
+ int32_t mNumPartialResults;
+ bool mErrorState;
+
+ // Frame number to capture result map of partial pending request results.
+ std::unordered_map<uint64_t, CameraMetadata> mPendingCaptureResults;
+
+ // Timestamp to capture (frame number, result) map of completed pending request results.
+ std::unordered_map<int64_t, std::tuple<int64_t, CameraMetadata>> mCaptureResults;
+
+ // Frame number to timestamp map
+ std::unordered_map<int64_t, int64_t> mFrameNumberMap;
+
+ // Keeps a set buffer/result frame numbers for any errors detected during processing.
+ std::set<int64_t> mErrorFrameNumbers;
+
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
new file mode 100644
index 0000000..9525ad2
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -0,0 +1,822 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DepthCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include "api1/client2/JpegProcessor.h"
+#include "common/CameraProviderManager.h"
+#include "dlfcn.h"
+#include <gui/Surface.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include "DepthCompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+DepthCompositeStream::DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ CompositeStream(device, cb),
+ mBlobStreamId(-1),
+ mBlobSurfaceId(-1),
+ mDepthStreamId(-1),
+ mDepthSurfaceId(-1),
+ mBlobWidth(0),
+ mBlobHeight(0),
+ mDepthBufferAcquired(false),
+ mBlobBufferAcquired(false),
+ mProducerListener(new ProducerListener()),
+ mMaxJpegSize(-1),
+ mIsLogicalCamera(false),
+ mDepthPhotoLibHandle(nullptr),
+ mDepthPhotoProcess(nullptr) {
+ sp<CameraDeviceBase> cameraDevice = device.promote();
+ if (cameraDevice.get() != nullptr) {
+ CameraMetadata staticInfo = cameraDevice->info();
+ auto entry = staticInfo.find(ANDROID_JPEG_MAX_SIZE);
+ if (entry.count > 0) {
+ mMaxJpegSize = entry.data.i32[0];
+ } else {
+ ALOGW("%s: Maximum jpeg size absent from camera characteristics", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_INTRINSIC_CALIBRATION);
+ if (entry.count == 5) {
+ mInstrinsicCalibration.reserve(5);
+ mInstrinsicCalibration.insert(mInstrinsicCalibration.end(), entry.data.f,
+ entry.data.f + 5);
+ } else {
+ ALOGW("%s: Intrinsic calibration absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_LENS_DISTORTION);
+ if (entry.count == 5) {
+ mLensDistortion.reserve(5);
+ mLensDistortion.insert(mLensDistortion.end(), entry.data.f, entry.data.f + 5);
+ } else {
+ ALOGW("%s: Lens distortion absent from camera characteristics!", __FUNCTION__);
+ }
+
+ entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ for (size_t i = 0; i < entry.count; ++i) {
+ uint8_t capability = entry.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA) {
+ mIsLogicalCamera = true;
+ break;
+ }
+ }
+
+ getSupportedDepthSizes(staticInfo, &mSupportedDepthSizes);
+
+ mDepthPhotoLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (mDepthPhotoLibHandle != nullptr) {
+ mDepthPhotoProcess = reinterpret_cast<camera3::process_depth_photo_frame> (
+ dlsym(mDepthPhotoLibHandle, camera3::kDepthPhotoProcessFunction));
+ if (mDepthPhotoProcess == nullptr) {
+ ALOGE("%s: Failed to link to depth photo process function: %s", __FUNCTION__,
+ dlerror());
+ }
+ } else {
+ ALOGE("%s: Failed to link to depth photo library: %s", __FUNCTION__, dlerror());
+ }
+
+ }
+}
+
+DepthCompositeStream::~DepthCompositeStream() {
+ mBlobConsumer.clear(),
+ mBlobSurface.clear(),
+ mBlobStreamId = -1;
+ mBlobSurfaceId = -1;
+ mDepthConsumer.clear();
+ mDepthSurface.clear();
+ mDepthConsumer = nullptr;
+ mDepthSurface = nullptr;
+ if (mDepthPhotoLibHandle != nullptr) {
+ dlclose(mDepthPhotoLibHandle);
+ mDepthPhotoLibHandle = nullptr;
+ }
+ mDepthPhotoProcess = nullptr;
+}
+
+void DepthCompositeStream::compilePendingInputLocked() {
+ CpuConsumer::LockedBuffer imgBuffer;
+
+ while (!mInputJpegBuffers.empty() && !mBlobBufferAcquired) {
+ auto it = mInputJpegBuffers.begin();
+ auto res = mBlobConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error locking blob image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputJpegBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting jpeg buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mBlobConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].jpegBuffer = imgBuffer;
+ mBlobBufferAcquired = true;
+ }
+ mInputJpegBuffers.erase(it);
+ }
+
+ while (!mInputDepthBuffers.empty() && !mDepthBufferAcquired) {
+ auto it = mInputDepthBuffers.begin();
+ auto res = mDepthConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Can not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error receiving depth image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputDepthBuffers.erase(it);
+ continue;
+ }
+
+ if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting depth buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mDepthConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].depthBuffer = imgBuffer;
+ mDepthBufferAcquired = true;
+ }
+ mInputDepthBuffers.erase(it);
+ }
+
+ while (!mCaptureResults.empty()) {
+ auto it = mCaptureResults.begin();
+ // Negative timestamp indicates that something went wrong during the capture result
+ // collection process.
+ if (it->first >= 0) {
+ mPendingInputFrames[it->first].frameNumber = std::get<0>(it->second);
+ mPendingInputFrames[it->first].result = std::get<1>(it->second);
+ }
+ mCaptureResults.erase(it);
+ }
+
+ while (!mFrameNumberMap.empty()) {
+ auto it = mFrameNumberMap.begin();
+ mPendingInputFrames[it->second].frameNumber = it->first;
+ mFrameNumberMap.erase(it);
+ }
+
+ auto it = mErrorFrameNumbers.begin();
+ while (it != mErrorFrameNumbers.end()) {
+ bool frameFound = false;
+ for (auto &inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == *it) {
+ inputFrame.second.error = true;
+ frameFound = true;
+ break;
+ }
+ }
+
+ if (frameFound) {
+ it = mErrorFrameNumbers.erase(it);
+ } else {
+ ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+ *it);
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*inout*/) {
+ if (currentTs == nullptr) {
+ return false;
+ }
+
+ bool newInputAvailable = false;
+ for (const auto& it : mPendingInputFrames) {
+ if ((!it.second.error) && (it.second.depthBuffer.data != nullptr) &&
+ (it.second.jpegBuffer.data != nullptr) && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ newInputAvailable = true;
+ }
+ }
+
+ return newInputAvailable;
+}
+
+int64_t DepthCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*inout*/) {
+ int64_t ret = -1;
+ if (currentTs == nullptr) {
+ return ret;
+ }
+
+ for (const auto& it : mPendingInputFrames) {
+ if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ ret = it.second.frameNumber;
+ }
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
+ status_t res;
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ ANativeWindowBuffer *anb;
+ int fenceFd;
+ void *dstBuffer;
+
+ auto jpegSize = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
+ inputFrame.jpegBuffer.width);
+ if (jpegSize == 0) {
+ ALOGW("%s: Failed to find input jpeg size, default to using entire buffer!", __FUNCTION__);
+ jpegSize = inputFrame.jpegBuffer.width;
+ }
+
+ size_t maxDepthJpegSize;
+ if (mMaxJpegSize > 0) {
+ maxDepthJpegSize = mMaxJpegSize;
+ } else {
+ maxDepthJpegSize = std::max<size_t> (jpegSize,
+ inputFrame.depthBuffer.width * inputFrame.depthBuffer.height * 3 / 2);
+ }
+ uint8_t jpegQuality = 100;
+ auto entry = inputFrame.result.find(ANDROID_JPEG_QUALITY);
+ if (entry.count > 0) {
+ jpegQuality = entry.data.u8[0];
+ }
+
+ // The final depth photo will consist of the main jpeg buffer, the depth map buffer (also in
+ // jpeg format) and confidence map (jpeg as well). Assume worst case that all 3 jpeg need
+ // max jpeg size.
+ size_t finalJpegBufferSize = maxDepthJpegSize * 3;
+
+ if ((res = native_window_set_buffers_dimensions(mOutputSurface.get(), finalJpegBufferSize, 1))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer dimensions"
+ " %zux%u for stream %d", __FUNCTION__, finalJpegBufferSize, 1U, mBlobStreamId);
+ return res;
+ }
+
+ res = outputANW->dequeueBuffer(mOutputSurface.get(), &anb, &fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
+ res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ if ((gb->getWidth() < finalJpegBufferSize) || (gb->getHeight() != 1)) {
+ ALOGE("%s: Blob buffer size mismatch, expected %dx%d received %zux%u", __FUNCTION__,
+ gb->getWidth(), gb->getHeight(), finalJpegBufferSize, 1U);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return BAD_VALUE;
+ }
+
+ DepthPhotoInputFrame depthPhoto;
+ depthPhoto.mMainJpegBuffer = reinterpret_cast<const char*> (inputFrame.jpegBuffer.data);
+ depthPhoto.mMainJpegWidth = mBlobWidth;
+ depthPhoto.mMainJpegHeight = mBlobHeight;
+ depthPhoto.mMainJpegSize = jpegSize;
+ depthPhoto.mDepthMapBuffer = reinterpret_cast<uint16_t*> (inputFrame.depthBuffer.data);
+ depthPhoto.mDepthMapWidth = inputFrame.depthBuffer.width;
+ depthPhoto.mDepthMapHeight = inputFrame.depthBuffer.height;
+ depthPhoto.mDepthMapStride = inputFrame.depthBuffer.stride;
+ depthPhoto.mJpegQuality = jpegQuality;
+ depthPhoto.mIsLogical = mIsLogicalCamera;
+ depthPhoto.mMaxJpegSize = maxDepthJpegSize;
+ // The camera intrinsic calibration layout is as follows:
+ // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
+ if (mInstrinsicCalibration.size() == 5) {
+ memcpy(depthPhoto.mInstrinsicCalibration, mInstrinsicCalibration.data(),
+ sizeof(depthPhoto.mInstrinsicCalibration));
+ depthPhoto.mIsInstrinsicCalibrationValid = 1;
+ } else {
+ depthPhoto.mIsInstrinsicCalibrationValid = 0;
+ }
+ // The camera lens distortion contains the following lens correction coefficients.
+ // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
+ if (mLensDistortion.size() == 5) {
+ memcpy(depthPhoto.mLensDistortion, mLensDistortion.data(),
+ sizeof(depthPhoto.mLensDistortion));
+ depthPhoto.mIsLensDistortionValid = 1;
+ } else {
+ depthPhoto.mIsLensDistortionValid = 0;
+ }
+ entry = inputFrame.result.find(ANDROID_JPEG_ORIENTATION);
+ if (entry.count > 0) {
+ // The camera jpeg orientation values must be within [0, 90, 180, 270].
+ switch (entry.data.i32[0]) {
+ case 0:
+ case 90:
+ case 180:
+ case 270:
+ depthPhoto.mOrientation = static_cast<DepthPhotoOrientation> (entry.data.i32[0]);
+ break;
+ default:
+ ALOGE("%s: Unexpected jpeg orientation value: %d, default to 0 degrees",
+ __FUNCTION__, entry.data.i32[0]);
+ }
+ }
+
+ size_t actualJpegSize = 0;
+ res = mDepthPhotoProcess(depthPhoto, finalJpegBufferSize, dstBuffer, &actualJpegSize);
+ if (res != 0) {
+ ALOGE("%s: Depth photo processing failed: %s (%d)", __FUNCTION__, strerror(-res), res);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return res;
+ }
+
+ size_t finalJpegSize = actualJpegSize + sizeof(struct camera3_jpeg_blob);
+ if (finalJpegSize > finalJpegBufferSize) {
+ ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
+ outputANW->cancelBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+ return NO_MEMORY;
+ }
+
+ ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
+ uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
+ (gb->getWidth() - sizeof(struct camera3_jpeg_blob));
+ struct camera3_jpeg_blob *blob = reinterpret_cast<struct camera3_jpeg_blob*> (header);
+ blob->jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+ blob->jpeg_size = actualJpegSize;
+ outputANW->queueBuffer(mOutputSurface.get(), anb, /*fence*/ -1);
+
+ return res;
+}
+
+void DepthCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+ if (inputFrame == nullptr) {
+ return;
+ }
+
+ if (inputFrame->depthBuffer.data != nullptr) {
+ mDepthConsumer->unlockBuffer(inputFrame->depthBuffer);
+ inputFrame->depthBuffer.data = nullptr;
+ mDepthBufferAcquired = false;
+ }
+
+ if (inputFrame->jpegBuffer.data != nullptr) {
+ mBlobConsumer->unlockBuffer(inputFrame->jpegBuffer);
+ inputFrame->jpegBuffer.data = nullptr;
+ mBlobBufferAcquired = false;
+ }
+
+ if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+ notifyError(inputFrame->frameNumber);
+ inputFrame->errorNotified = true;
+ }
+}
+
+void DepthCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+ auto it = mPendingInputFrames.begin();
+ while (it != mPendingInputFrames.end()) {
+ if (it->first <= currentTs) {
+ releaseInputFrameLocked(&it->second);
+ it = mPendingInputFrames.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+bool DepthCompositeStream::threadLoop() {
+ int64_t currentTs = INT64_MAX;
+ bool newInputAvailable = false;
+
+ {
+ Mutex::Autolock l(mMutex);
+
+ if (mErrorState) {
+ // In case we landed in error state, return any pending buffers and
+ // halt all further processing.
+ compilePendingInputLocked();
+ releaseInputFramesLocked(currentTs);
+ return false;
+ }
+
+ while (!newInputAvailable) {
+ compilePendingInputLocked();
+ newInputAvailable = getNextReadyInputLocked(¤tTs);
+ if (!newInputAvailable) {
+ auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
+ if (failingFrameNumber >= 0) {
+ // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+ // possible for two internal stream buffers to fail. In such scenario the
+ // composite stream should notify the client about a stream buffer error only
+ // once and this information is kept within 'errorNotified'.
+ // Any present failed input frames will be removed on a subsequent call to
+ // 'releaseInputFramesLocked()'.
+ releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+ currentTs = INT64_MAX;
+ }
+
+ auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+ if (ret == TIMED_OUT) {
+ return true;
+ } else if (ret != OK) {
+ ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ return false;
+ }
+ }
+ }
+ }
+
+ auto res = processInputFrame(mPendingInputFrames[currentTs]);
+ Mutex::Autolock l(mMutex);
+ if (res != OK) {
+ ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
+ currentTs, strerror(-res), res);
+ mPendingInputFrames[currentTs].error = true;
+ }
+
+ releaseInputFramesLocked(currentTs);
+
+ return true;
+}
+
+bool DepthCompositeStream::isDepthCompositeStream(const sp<Surface> &surface) {
+ ANativeWindow *anw = surface.get();
+ status_t err;
+ int format;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ int dataspace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+ String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ if ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_DYNAMIC_DEPTH)) {
+ return true;
+ }
+
+ return false;
+}
+
+status_t DepthCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+ if (mSupportedDepthSizes.empty()) {
+ ALOGE("%s: This camera device doesn't support any depth map streams!", __FUNCTION__);
+ return INVALID_OPERATION;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(width, height, mSupportedDepthSizes, &depthWidth, &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: Failed to find an appropriate depth stream size!", __FUNCTION__);
+ return ret;
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mBlobConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/1, /*controlledByApp*/ true);
+ mBlobConsumer->setFrameAvailableListener(this);
+ mBlobConsumer->setName(String8("Camera3-JpegCompositeStream"));
+ mBlobSurface = new Surface(producer);
+
+ ret = device->createStream(mBlobSurface, width, height, format, kJpegDataSpace, rotation,
+ id, physicalCameraId, surfaceIds);
+ if (ret == OK) {
+ mBlobStreamId = *id;
+ mBlobSurfaceId = (*surfaceIds)[0];
+ mOutputSurface = consumers[0];
+ } else {
+ return ret;
+ }
+
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mDepthConsumer = new CpuConsumer(consumer, /*maxLockedBuffers*/ 1, /*controlledByApp*/ true);
+ mDepthConsumer->setFrameAvailableListener(this);
+ mDepthConsumer->setName(String8("Camera3-DepthCompositeStream"));
+ mDepthSurface = new Surface(producer);
+ std::vector<int> depthSurfaceId;
+ ret = device->createStream(mDepthSurface, depthWidth, depthHeight, kDepthMapPixelFormat,
+ kDepthMapDataSpace, rotation, &mDepthStreamId, physicalCameraId, &depthSurfaceId);
+ if (ret == OK) {
+ mDepthSurfaceId = depthSurfaceId[0];
+ } else {
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(getStreamId());
+ if (ret != OK) {
+ ALOGE("%s: Failed to register blob stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ ret = registerCompositeStreamListener(mDepthStreamId);
+ if (ret != OK) {
+ ALOGE("%s: Failed to register depth stream listener!", __FUNCTION__);
+ return ret;
+ }
+
+ mBlobWidth = width;
+ mBlobHeight = height;
+
+ return ret;
+}
+
+status_t DepthCompositeStream::configureStream() {
+ if (isRunning()) {
+ // Processing thread is already running, nothing more to do.
+ return NO_ERROR;
+ }
+
+ if ((mDepthPhotoLibHandle == nullptr) || (mDepthPhotoProcess == nullptr)) {
+ ALOGE("%s: Depth photo library is not present!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ if (mOutputSurface.get() == nullptr) {
+ ALOGE("%s: No valid output surface set!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+ mBlobStreamId);
+ return res;
+ }
+
+ int maxProducerBuffers;
+ ANativeWindow *anw = mBlobSurface.get();
+ if ((res = anw->query(anw, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxProducerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ ANativeWindow *anwConsumer = mOutputSurface.get();
+ int maxConsumerBuffers;
+ if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffer_count(
+ anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mBlobStreamId);
+ return res;
+ }
+
+ run("DepthCompositeStreamProc");
+
+ return NO_ERROR;
+}
+
+status_t DepthCompositeStream::deleteInternalStreams() {
+ // The 'CameraDeviceClient' parent will delete the blob stream
+ requestExit();
+
+ auto ret = join();
+ if (ret != OK) {
+ ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ }
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ if (mDepthStreamId >= 0) {
+ ret = device->deleteStream(mDepthStreamId);
+ mDepthStreamId = -1;
+ }
+
+ if (mOutputSurface != nullptr) {
+ mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
+ mOutputSurface.clear();
+ }
+
+ return ret;
+}
+
+void DepthCompositeStream::onFrameAvailable(const BufferItem& item) {
+ if (item.mDataSpace == kJpegDataSpace) {
+ ALOGV("%s: Jpeg buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputJpegBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else if (item.mDataSpace == kDepthMapDataSpace) {
+ ALOGV("%s: Depth buffer with ts: %" PRIu64 " ms. arrived!", __func__,
+ ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputDepthBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else {
+ ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+ }
+}
+
+status_t DepthCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t> * /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+ if (outSurfaceMap->find(mDepthStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mDepthStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mDepthStreamId);
+ }
+ (*outSurfaceMap)[mDepthStreamId].push_back(mDepthSurfaceId);
+
+ if (outSurfaceMap->find(mBlobStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mBlobStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mBlobStreamId);
+ }
+ (*outSurfaceMap)[mBlobStreamId].push_back(mBlobSurfaceId);
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = mBlobStreamId;
+ }
+
+ return NO_ERROR;
+}
+
+void DepthCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+ // Processing can continue even in case of result errors.
+ // At the moment depth composite stream processing relies mainly on static camera
+ // characteristics data. The actual result data can be used for the jpeg quality but
+ // in case it is absent we can default to maximum.
+ eraseResult(resultExtras.frameNumber);
+}
+
+bool DepthCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+ bool ret = false;
+ // Buffer errors concerning internal composite streams should not be directly visible to
+ // camera clients. They must only receive a single buffer error with the public composite
+ // stream id.
+ if ((resultExtras.errorStreamId == mDepthStreamId) ||
+ (resultExtras.errorStreamId == mBlobStreamId)) {
+ flagAnErrorFrameNumber(resultExtras.frameNumber);
+ ret = true;
+ }
+
+ return ret;
+}
+
+status_t DepthCompositeStream::getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/) {
+ if ((depthWidth == nullptr) || (depthHeight == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ float arTol = CameraProviderManager::kDepthARTolerance;
+ *depthWidth = *depthHeight = 0;
+
+ float aspectRatio = static_cast<float> (width) / static_cast<float> (height);
+ for (const auto& it : supporedDepthSizes) {
+ auto currentWidth = std::get<0>(it);
+ auto currentHeight = std::get<1>(it);
+ if ((currentWidth == width) && (currentHeight == height)) {
+ *depthWidth = width;
+ *depthHeight = height;
+ break;
+ } else {
+ float currentRatio = static_cast<float> (currentWidth) /
+ static_cast<float> (currentHeight);
+ auto currentSize = currentWidth * currentHeight;
+ auto oldSize = (*depthWidth) * (*depthHeight);
+ if ((fabs(aspectRatio - currentRatio) <= arTol) && (currentSize > oldSize)) {
+ *depthWidth = currentWidth;
+ *depthHeight = currentHeight;
+ }
+ }
+ }
+
+ return ((*depthWidth > 0) && (*depthHeight > 0)) ? OK : BAD_VALUE;
+}
+
+void DepthCompositeStream::getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/) {
+ if (depthSizes == nullptr) {
+ return;
+ }
+
+ auto entry = ch.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ if (entry.count > 0) {
+ // Depth stream dimensions have four int32_t components
+ // (pixelformat, width, height, type)
+ size_t entryCount = entry.count / 4;
+ depthSizes->reserve(entryCount);
+ for (size_t i = 0; i < entry.count; i += 4) {
+ if ((entry.data.i32[i] == kDepthMapPixelFormat) &&
+ (entry.data.i32[i+3] ==
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
+ depthSizes->push_back(std::make_tuple(entry.data.i32[i+1],
+ entry.data.i32[i+2]));
+ }
+ }
+ }
+}
+
+status_t DepthCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+ if (compositeOutput == nullptr) {
+ return BAD_VALUE;
+ }
+
+ std::vector<std::tuple<size_t, size_t>> depthSizes;
+ getSupportedDepthSizes(ch, &depthSizes);
+ if (depthSizes.empty()) {
+ ALOGE("%s: No depth stream configurations present", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ size_t depthWidth, depthHeight;
+ auto ret = getMatchingDepthSize(streamInfo.width, streamInfo.height, depthSizes, &depthWidth,
+ &depthHeight);
+ if (ret != OK) {
+ ALOGE("%s: No matching depth stream size found", __FUNCTION__);
+ return ret;
+ }
+
+ compositeOutput->clear();
+ compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+
+ // Jpeg/Blob stream info
+ (*compositeOutput)[0].dataSpace = kJpegDataSpace;
+ (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ // Depth stream info
+ (*compositeOutput)[1].width = depthWidth;
+ (*compositeOutput)[1].height = depthHeight;
+ (*compositeOutput)[1].format = kDepthMapPixelFormat;
+ (*compositeOutput)[1].dataSpace = kDepthMapDataSpace;
+ (*compositeOutput)[1].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ return NO_ERROR;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
new file mode 100644
index 0000000..1bf31f4
--- /dev/null
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_COMPOSITE_STREAM_H
+
+#include "common/DepthPhotoProcessor.h"
+#include <dynamic_depth/imaging_model.h>
+#include <dynamic_depth/depth_map.h>
+
+#include <gui/CpuConsumer.h>
+
+#include "CompositeStream.h"
+
+using dynamic_depth::DepthMap;
+using dynamic_depth::Item;
+using dynamic_depth::ImagingModel;
+
+namespace android {
+
+class CameraDeviceClient;
+class CameraMetadata;
+class Surface;
+
+namespace camera3 {
+
+class DepthCompositeStream : public CompositeStream, public Thread,
+ public CpuConsumer::FrameAvailableListener {
+
+public:
+ DepthCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ ~DepthCompositeStream() override;
+
+ static bool isDepthCompositeStream(const sp<Surface> &surface);
+
+ // CompositeStream overrides
+ status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+ status_t deleteInternalStreams() override;
+ status_t configureStream() override;
+ status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+ int32_t* /*out*/currentStreamId) override;
+ int getStreamId() override { return mBlobStreamId; }
+
+ // CpuConsumer listener implementation
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // Return stream information about the internal camera streams
+ static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+protected:
+
+ bool threadLoop() override;
+ bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+ void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+ struct InputFrame {
+ CpuConsumer::LockedBuffer depthBuffer;
+ CpuConsumer::LockedBuffer jpegBuffer;
+ CameraMetadata result;
+ bool error;
+ bool errorNotified;
+ int64_t frameNumber;
+
+ InputFrame() : error(false), errorNotified(false), frameNumber(-1) { }
+ };
+
+ // Helper methods
+ static void getSupportedDepthSizes(const CameraMetadata& ch,
+ std::vector<std::tuple<size_t, size_t>>* depthSizes /*out*/);
+ static status_t getMatchingDepthSize(size_t width, size_t height,
+ const std::vector<std::tuple<size_t, size_t>>& supporedDepthSizes,
+ size_t *depthWidth /*out*/, size_t *depthHeight /*out*/);
+
+ // Dynamic depth processing
+ status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
+ const size_t maxOutSize, uint8_t jpegQuality, size_t &actualSize);
+ std::unique_ptr<DepthMap> processDepthMapFrame(const CpuConsumer::LockedBuffer &depthMapBuffer,
+ size_t maxJpegSize, uint8_t jpegQuality,
+ std::vector<std::unique_ptr<Item>>* items /*out*/);
+ std::unique_ptr<ImagingModel> getImagingModel();
+ status_t processInputFrame(const InputFrame &inputFrame);
+
+ // Buffer/Results handling
+ void compilePendingInputLocked();
+ void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+ void releaseInputFramesLocked(int64_t currentTs);
+
+ // Find first complete and valid frame with smallest timestamp
+ bool getNextReadyInputLocked(int64_t *currentTs /*inout*/);
+
+ // Find next failing frame number with smallest timestamp and return respective frame number
+ int64_t getNextFailingInputLocked(int64_t *currentTs /*inout*/);
+
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ static const auto kDepthMapPixelFormat = HAL_PIXEL_FORMAT_Y16;
+ static const auto kDepthMapDataSpace = HAL_DATASPACE_DEPTH;
+ static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+
+ int mBlobStreamId, mBlobSurfaceId, mDepthStreamId, mDepthSurfaceId;
+ size_t mBlobWidth, mBlobHeight;
+ sp<CpuConsumer> mBlobConsumer, mDepthConsumer;
+ bool mDepthBufferAcquired, mBlobBufferAcquired;
+ sp<Surface> mDepthSurface, mBlobSurface, mOutputSurface;
+ sp<ProducerListener> mProducerListener;
+
+ ssize_t mMaxJpegSize;
+ std::vector<std::tuple<size_t, size_t>> mSupportedDepthSizes;
+ std::vector<float> mInstrinsicCalibration, mLensDistortion;
+ bool mIsLogicalCamera;
+ void* mDepthPhotoLibHandle;
+ process_depth_photo_frame mDepthPhotoProcess;
+
+ // Keep all incoming Depth buffer timestamps pending further processing.
+ std::vector<int64_t> mInputDepthBuffers;
+
+ // Keep all incoming Jpeg/Blob buffer timestamps pending further processing.
+ std::vector<int64_t> mInputJpegBuffers;
+
+ // Map of all input frames pending further processing.
+ std::unordered_map<int64_t, InputFrame> mPendingInputFrames;
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
new file mode 100644
index 0000000..9fd0e8b
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -0,0 +1,1709 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-HeicCompositeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <linux/memfd.h>
+#include <pthread.h>
+#include <sys/syscall.h>
+
+#include <android/hardware/camera/device/3.5/types.h>
+#include <libyuv.h>
+#include <gui/Surface.h>
+#include <utils/Log.h>
+#include <utils/Trace.h>
+
+#include <media/ICrypto.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/MediaCodecConstants.h>
+
+#include "common/CameraDeviceBase.h"
+#include "utils/ExifUtils.h"
+#include "HeicEncoderInfoManager.h"
+#include "HeicCompositeStream.h"
+
+using android::hardware::camera::device::V3_5::CameraBlob;
+using android::hardware::camera::device::V3_5::CameraBlobId;
+
+namespace android {
+namespace camera3 {
+
+HeicCompositeStream::HeicCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb) :
+ CompositeStream(device, cb),
+ mUseHeic(false),
+ mNumOutputTiles(1),
+ mOutputWidth(0),
+ mOutputHeight(0),
+ mMaxHeicBufferSize(0),
+ mGridWidth(HeicEncoderInfoManager::kGridWidth),
+ mGridHeight(HeicEncoderInfoManager::kGridHeight),
+ mGridRows(1),
+ mGridCols(1),
+ mUseGrid(false),
+ mAppSegmentStreamId(-1),
+ mAppSegmentSurfaceId(-1),
+ mAppSegmentBufferAcquired(false),
+ mMainImageStreamId(-1),
+ mMainImageSurfaceId(-1),
+ mYuvBufferAcquired(false),
+ mProducerListener(new ProducerListener()),
+ mOutputBufferCounter(0),
+ mGridTimestampUs(0) {
+}
+
+HeicCompositeStream::~HeicCompositeStream() {
+ // Call deinitCodec in case stream hasn't been deleted yet to avoid any
+ // memory/resource leak.
+ deinitCodec();
+
+ mInputAppSegmentBuffers.clear();
+ mCodecOutputBuffers.clear();
+
+ mAppSegmentStreamId = -1;
+ mAppSegmentSurfaceId = -1;
+ mAppSegmentConsumer.clear();
+ mAppSegmentSurface.clear();
+
+ mMainImageStreamId = -1;
+ mMainImageSurfaceId = -1;
+ mMainImageConsumer.clear();
+ mMainImageSurface.clear();
+}
+
+bool HeicCompositeStream::isHeicCompositeStream(const sp<Surface> &surface) {
+ ANativeWindow *anw = surface.get();
+ status_t err;
+ int format;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Failed to query Surface format: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ int dataspace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE, &dataspace)) != OK) {
+ String8 msg = String8::format("Failed to query Surface dataspace: %s (%d)", strerror(-err),
+ err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return false;
+ }
+
+ return ((format == HAL_PIXEL_FORMAT_BLOB) && (dataspace == HAL_DATASPACE_HEIF));
+}
+
+status_t HeicCompositeStream::createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool /*hasDeferredConsumer*/, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int /*streamSetId*/, bool /*isShared*/) {
+
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ status_t res = initializeCodec(width, height, device);
+ if (res != OK) {
+ ALOGE("%s: Failed to initialize HEIC/HEVC codec: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return NO_INIT;
+ }
+
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mAppSegmentConsumer = new CpuConsumer(consumer, 1);
+ mAppSegmentConsumer->setFrameAvailableListener(this);
+ mAppSegmentConsumer->setName(String8("Camera3-HeicComposite-AppSegmentStream"));
+ mAppSegmentSurface = new Surface(producer);
+
+ mStaticInfo = device->info();
+
+ res = device->createStream(mAppSegmentSurface, mAppSegmentMaxSize, 1, format,
+ kAppSegmentDataSpace, rotation, &mAppSegmentStreamId, physicalCameraId, surfaceIds);
+ if (res == OK) {
+ mAppSegmentSurfaceId = (*surfaceIds)[0];
+ } else {
+ ALOGE("%s: Failed to create JPEG App segment stream: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ if (!mUseGrid) {
+ res = mCodec->createInputSurface(&producer);
+ if (res != OK) {
+ ALOGE("%s: Failed to create input surface for Heic codec: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ } else {
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ mMainImageConsumer = new CpuConsumer(consumer, 1);
+ mMainImageConsumer->setFrameAvailableListener(this);
+ mMainImageConsumer->setName(String8("Camera3-HeicComposite-HevcInputYUVStream"));
+ }
+ mMainImageSurface = new Surface(producer);
+
+ res = mCodec->start();
+ if (res != OK) {
+ ALOGE("%s: Failed to start codec: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ std::vector<int> sourceSurfaceId;
+ //Use YUV_888 format if framework tiling is needed.
+ int srcStreamFmt = mUseGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ res = device->createStream(mMainImageSurface, width, height, srcStreamFmt, kHeifDataSpace,
+ rotation, id, physicalCameraId, &sourceSurfaceId);
+ if (res == OK) {
+ mMainImageSurfaceId = sourceSurfaceId[0];
+ mMainImageStreamId = *id;
+ } else {
+ ALOGE("%s: Failed to create main image stream: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ mOutputSurface = consumers[0];
+ res = registerCompositeStreamListener(getStreamId());
+ if (res != OK) {
+ ALOGE("%s: Failed to register HAL main image stream", __FUNCTION__);
+ return res;
+ }
+
+ initCopyRowFunction(width);
+ return res;
+}
+
+status_t HeicCompositeStream::deleteInternalStreams() {
+ requestExit();
+ auto res = join();
+ if (res != OK) {
+ ALOGE("%s: Failed to join with the main processing thread: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ }
+
+ deinitCodec();
+
+ if (mAppSegmentStreamId >= 0) {
+ sp<CameraDeviceBase> device = mDevice.promote();
+ if (!device.get()) {
+ ALOGE("%s: Invalid camera device!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ res = device->deleteStream(mAppSegmentStreamId);
+ mAppSegmentStreamId = -1;
+ }
+
+ if (mOutputSurface != nullptr) {
+ mOutputSurface->disconnect(NATIVE_WINDOW_API_CAMERA);
+ mOutputSurface.clear();
+ }
+ return res;
+}
+
+void HeicCompositeStream::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mMutex);
+
+ if (bufferInfo.mError) return;
+
+ mCodecOutputBufferTimestamps.push(bufferInfo.mTimestamp);
+}
+
+// We need to get the settings early to handle the case where the codec output
+// arrives earlier than result metadata.
+void HeicCompositeStream::onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+ const CameraMetadata& settings) {
+ ATRACE_ASYNC_BEGIN("HEIC capture", frameNumber);
+
+ Mutex::Autolock l(mMutex);
+ if (mErrorState || (streamId != getStreamId())) {
+ return;
+ }
+
+ mPendingCaptureResults.emplace(frameNumber, CameraMetadata());
+
+ camera_metadata_ro_entry entry;
+
+ int32_t orientation = 0;
+ entry = settings.find(ANDROID_JPEG_ORIENTATION);
+ if (entry.count == 1) {
+ orientation = entry.data.i32[0];
+ }
+
+ int32_t quality = kDefaultJpegQuality;
+ entry = settings.find(ANDROID_JPEG_QUALITY);
+ if (entry.count == 1) {
+ quality = entry.data.i32[0];
+ }
+
+ mSettingsByFrameNumber[frameNumber] = std::make_pair(orientation, quality);
+}
+
+void HeicCompositeStream::onFrameAvailable(const BufferItem& item) {
+ if (item.mDataSpace == static_cast<android_dataspace>(kAppSegmentDataSpace)) {
+ ALOGV("%s: JPEG APP segments buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mErrorState) {
+ mInputAppSegmentBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else if (item.mDataSpace == kHeifDataSpace) {
+ ALOGV("%s: YUV_888 buffer with ts: %" PRIu64 " ms. arrived!",
+ __func__, ns2ms(item.mTimestamp));
+
+ Mutex::Autolock l(mMutex);
+ if (!mUseGrid) {
+ ALOGE("%s: YUV_888 internal stream is only supported for HEVC tiling",
+ __FUNCTION__);
+ return;
+ }
+ if (!mErrorState) {
+ mInputYuvBuffers.push_back(item.mTimestamp);
+ mInputReadyCondition.signal();
+ }
+ } else {
+ ALOGE("%s: Unexpected data space: 0x%x", __FUNCTION__, item.mDataSpace);
+ }
+}
+
+status_t HeicCompositeStream::getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/) {
+ if (compositeOutput == nullptr) {
+ return BAD_VALUE;
+ }
+
+ compositeOutput->clear();
+
+ bool useGrid, useHeic;
+ bool isSizeSupported = isSizeSupportedByHeifEncoder(
+ streamInfo.width, streamInfo.height, &useHeic, &useGrid, nullptr);
+ if (!isSizeSupported) {
+ // Size is not supported by either encoder.
+ return OK;
+ }
+
+ compositeOutput->insert(compositeOutput->end(), 2, streamInfo);
+
+ // JPEG APPS segments Blob stream info
+ (*compositeOutput)[0].width = calcAppSegmentMaxSize(ch);
+ (*compositeOutput)[0].height = 1;
+ (*compositeOutput)[0].format = HAL_PIXEL_FORMAT_BLOB;
+ (*compositeOutput)[0].dataSpace = kAppSegmentDataSpace;
+ (*compositeOutput)[0].consumerUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+
+ // YUV/IMPLEMENTATION_DEFINED stream info
+ (*compositeOutput)[1].width = streamInfo.width;
+ (*compositeOutput)[1].height = streamInfo.height;
+ (*compositeOutput)[1].format = useGrid ? HAL_PIXEL_FORMAT_YCbCr_420_888 :
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ (*compositeOutput)[1].dataSpace = kHeifDataSpace;
+ (*compositeOutput)[1].consumerUsage = useHeic ? GRALLOC_USAGE_HW_IMAGE_ENCODER :
+ useGrid ? GRALLOC_USAGE_SW_READ_OFTEN : GRALLOC_USAGE_HW_VIDEO_ENCODER;
+
+ return NO_ERROR;
+}
+
+bool HeicCompositeStream::isSizeSupportedByHeifEncoder(int32_t width, int32_t height,
+ bool* useHeic, bool* useGrid, int64_t* stall) {
+ static HeicEncoderInfoManager& heicManager = HeicEncoderInfoManager::getInstance();
+ return heicManager.isSizeSupported(width, height, useHeic, useGrid, stall);
+}
+
+bool HeicCompositeStream::isInMemoryTempFileSupported() {
+ int memfd = syscall(__NR_memfd_create, "HEIF-try-memfd", MFD_CLOEXEC);
+ if (memfd == -1) {
+ if (errno != ENOSYS) {
+ ALOGE("%s: Failed to create tmpfs file. errno %d", __FUNCTION__, errno);
+ }
+ return false;
+ }
+ close(memfd);
+ return true;
+}
+
+void HeicCompositeStream::onHeicOutputFrameAvailable(
+ const CodecOutputBufferInfo& outputBufferInfo) {
+ Mutex::Autolock l(mMutex);
+
+ ALOGV("%s: index %d, offset %d, size %d, time %" PRId64 ", flags 0x%x",
+ __FUNCTION__, outputBufferInfo.index, outputBufferInfo.offset,
+ outputBufferInfo.size, outputBufferInfo.timeUs, outputBufferInfo.flags);
+
+ if (!mErrorState) {
+ if ((outputBufferInfo.size > 0) &&
+ ((outputBufferInfo.flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) == 0)) {
+ mCodecOutputBuffers.push_back(outputBufferInfo);
+ mInputReadyCondition.signal();
+ } else {
+ mCodec->releaseOutputBuffer(outputBufferInfo.index);
+ }
+ } else {
+ mCodec->releaseOutputBuffer(outputBufferInfo.index);
+ }
+}
+
+void HeicCompositeStream::onHeicInputFrameAvailable(int32_t index) {
+ Mutex::Autolock l(mMutex);
+
+ if (!mUseGrid) {
+ ALOGE("%s: Codec YUV input mode must only be used for Hevc tiling mode", __FUNCTION__);
+ return;
+ }
+
+ mCodecInputBuffers.push_back(index);
+ mInputReadyCondition.signal();
+}
+
+void HeicCompositeStream::onHeicFormatChanged(sp<AMessage>& newFormat) {
+ if (newFormat == nullptr) {
+ ALOGE("%s: newFormat must not be null!", __FUNCTION__);
+ return;
+ }
+
+ Mutex::Autolock l(mMutex);
+
+ AString mime;
+ AString mimeHeic(MIMETYPE_IMAGE_ANDROID_HEIC);
+ newFormat->findString(KEY_MIME, &mime);
+ if (mime != mimeHeic) {
+ // For HEVC codec, below keys need to be filled out or overwritten so that the
+ // muxer can handle them as HEIC output image.
+ newFormat->setString(KEY_MIME, mimeHeic);
+ newFormat->setInt32(KEY_WIDTH, mOutputWidth);
+ newFormat->setInt32(KEY_HEIGHT, mOutputHeight);
+ if (mUseGrid) {
+ newFormat->setInt32(KEY_TILE_WIDTH, mGridWidth);
+ newFormat->setInt32(KEY_TILE_HEIGHT, mGridHeight);
+ newFormat->setInt32(KEY_GRID_ROWS, mGridRows);
+ newFormat->setInt32(KEY_GRID_COLUMNS, mGridCols);
+ }
+ }
+ newFormat->setInt32(KEY_IS_DEFAULT, 1 /*isPrimary*/);
+
+ int32_t gridRows, gridCols;
+ if (newFormat->findInt32(KEY_GRID_ROWS, &gridRows) &&
+ newFormat->findInt32(KEY_GRID_COLUMNS, &gridCols)) {
+ mNumOutputTiles = gridRows * gridCols;
+ } else {
+ mNumOutputTiles = 1;
+ }
+
+ ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
+ mFormat = newFormat;
+}
+
+void HeicCompositeStream::onHeicCodecError() {
+ Mutex::Autolock l(mMutex);
+ mErrorState = true;
+}
+
+status_t HeicCompositeStream::configureStream() {
+ if (isRunning()) {
+ // Processing thread is already running, nothing more to do.
+ return NO_ERROR;
+ }
+
+ if (mOutputSurface.get() == nullptr) {
+ ALOGE("%s: No valid output surface set!", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ auto res = mOutputSurface->connect(NATIVE_WINDOW_API_CAMERA, mProducerListener);
+ if (res != OK) {
+ ALOGE("%s: Unable to connect to native window for stream %d",
+ __FUNCTION__, mMainImageStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_format(mOutputSurface.get(), HAL_PIXEL_FORMAT_BLOB))
+ != OK) {
+ ALOGE("%s: Unable to configure stream buffer format for stream %d", __FUNCTION__,
+ mMainImageStreamId);
+ return res;
+ }
+
+ ANativeWindow *anwConsumer = mOutputSurface.get();
+ int maxConsumerBuffers;
+ if ((res = anwConsumer->query(anwConsumer, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+ &maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to query consumer undequeued"
+ " buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
+ return res;
+ }
+
+ // Cannot use SourceSurface buffer count since it could be codec's 512*512 tile
+ // buffer count.
+ int maxProducerBuffers = 1;
+ if ((res = native_window_set_buffer_count(
+ anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
+ return res;
+ }
+
+ if ((res = native_window_set_buffers_dimensions(anwConsumer, mMaxHeicBufferSize, 1)) != OK) {
+ ALOGE("%s: Unable to set buffer dimension %zu x 1 for stream %d: %s (%d)",
+ __FUNCTION__, mMaxHeicBufferSize, mMainImageStreamId, strerror(-res), res);
+ return res;
+ }
+
+ run("HeicCompositeStreamProc");
+
+ return NO_ERROR;
+}
+
+status_t HeicCompositeStream::insertGbp(SurfaceMap* /*out*/outSurfaceMap,
+ Vector<int32_t>* /*out*/outputStreamIds, int32_t* /*out*/currentStreamId) {
+ if (outSurfaceMap->find(mAppSegmentStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mAppSegmentStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mAppSegmentStreamId);
+ }
+ (*outSurfaceMap)[mAppSegmentStreamId].push_back(mAppSegmentSurfaceId);
+
+ if (outSurfaceMap->find(mMainImageStreamId) == outSurfaceMap->end()) {
+ (*outSurfaceMap)[mMainImageStreamId] = std::vector<size_t>();
+ outputStreamIds->push_back(mMainImageStreamId);
+ }
+ (*outSurfaceMap)[mMainImageStreamId].push_back(mMainImageSurfaceId);
+
+ if (currentStreamId != nullptr) {
+ *currentStreamId = mMainImageStreamId;
+ }
+
+ return NO_ERROR;
+}
+
+void HeicCompositeStream::onShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) {
+ Mutex::Autolock l(mMutex);
+ if (mErrorState) {
+ return;
+ }
+
+ if (mSettingsByFrameNumber.find(resultExtras.frameNumber) != mSettingsByFrameNumber.end()) {
+ mFrameNumberMap.emplace(resultExtras.frameNumber, timestamp);
+ mSettingsByTimestamp[timestamp] = mSettingsByFrameNumber[resultExtras.frameNumber];
+ mSettingsByFrameNumber.erase(resultExtras.frameNumber);
+ mInputReadyCondition.signal();
+ }
+}
+
+void HeicCompositeStream::compilePendingInputLocked() {
+ while (!mSettingsByTimestamp.empty()) {
+ auto it = mSettingsByTimestamp.begin();
+ mPendingInputFrames[it->first].orientation = it->second.first;
+ mPendingInputFrames[it->first].quality = it->second.second;
+ mSettingsByTimestamp.erase(it);
+ }
+
+ while (!mInputAppSegmentBuffers.empty() && !mAppSegmentBufferAcquired) {
+ CpuConsumer::LockedBuffer imgBuffer;
+ auto it = mInputAppSegmentBuffers.begin();
+ auto res = mAppSegmentConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Canot not lock any more buffers.
+ break;
+ } else if ((res != OK) || (*it != imgBuffer.timestamp)) {
+ if (res != OK) {
+ ALOGE("%s: Error locking JPEG_APP_SEGMENTS image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ } else {
+ ALOGE("%s: Expecting JPEG_APP_SEGMENTS buffer with time stamp: %" PRId64
+ " received buffer with time stamp: %" PRId64, __FUNCTION__,
+ *it, imgBuffer.timestamp);
+ }
+ mPendingInputFrames[*it].error = true;
+ mInputAppSegmentBuffers.erase(it);
+ continue;
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mAppSegmentConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].appSegmentBuffer = imgBuffer;
+ mAppSegmentBufferAcquired = true;
+ }
+ mInputAppSegmentBuffers.erase(it);
+ }
+
+ while (!mInputYuvBuffers.empty() && !mYuvBufferAcquired) {
+ CpuConsumer::LockedBuffer imgBuffer;
+ auto it = mInputYuvBuffers.begin();
+ auto res = mMainImageConsumer->lockNextBuffer(&imgBuffer);
+ if (res == NOT_ENOUGH_DATA) {
+ // Canot not lock any more buffers.
+ break;
+ } else if (res != OK) {
+ ALOGE("%s: Error locking YUV_888 image buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ mPendingInputFrames[*it].error = true;
+ mInputYuvBuffers.erase(it);
+ continue;
+ } else if (*it != imgBuffer.timestamp) {
+ ALOGW("%s: Expecting YUV_888 buffer with time stamp: %" PRId64 " received buffer with "
+ "time stamp: %" PRId64, __FUNCTION__, *it, imgBuffer.timestamp);
+ mPendingInputFrames[*it].error = true;
+ mInputYuvBuffers.erase(it);
+ continue;
+ }
+
+ if ((mPendingInputFrames.find(imgBuffer.timestamp) != mPendingInputFrames.end()) &&
+ (mPendingInputFrames[imgBuffer.timestamp].error)) {
+ mMainImageConsumer->unlockBuffer(imgBuffer);
+ } else {
+ mPendingInputFrames[imgBuffer.timestamp].yuvBuffer = imgBuffer;
+ mYuvBufferAcquired = true;
+ }
+ mInputYuvBuffers.erase(it);
+ }
+
+ while (!mCodecOutputBuffers.empty()) {
+ auto it = mCodecOutputBuffers.begin();
+ // Bitstream buffer timestamp doesn't necessarily directly correlate with input
+ // buffer timestamp. Assume encoder input to output is FIFO, use a queue
+ // to look up timestamp.
+ int64_t bufferTime = -1;
+ if (mCodecOutputBufferTimestamps.empty()) {
+ ALOGE("%s: Failed to find buffer timestamp for codec output buffer!", __FUNCTION__);
+ } else {
+ // Direct mapping between camera timestamp (in ns) and codec timestamp (in us).
+ bufferTime = mCodecOutputBufferTimestamps.front();
+ mOutputBufferCounter++;
+ if (mOutputBufferCounter == mNumOutputTiles) {
+ mCodecOutputBufferTimestamps.pop();
+ mOutputBufferCounter = 0;
+ }
+
+ mPendingInputFrames[bufferTime].codecOutputBuffers.push_back(*it);
+ }
+ mCodecOutputBuffers.erase(it);
+ }
+
+ while (!mFrameNumberMap.empty()) {
+ auto it = mFrameNumberMap.begin();
+ mPendingInputFrames[it->second].frameNumber = it->first;
+ mFrameNumberMap.erase(it);
+ }
+
+ while (!mCaptureResults.empty()) {
+ auto it = mCaptureResults.begin();
+ // Negative timestamp indicates that something went wrong during the capture result
+ // collection process.
+ if (it->first >= 0) {
+ if (mPendingInputFrames[it->first].frameNumber == std::get<0>(it->second)) {
+ mPendingInputFrames[it->first].result =
+ std::make_unique<CameraMetadata>(std::get<1>(it->second));
+ } else {
+ ALOGE("%s: Capture result frameNumber/timestamp mapping changed between "
+ "shutter and capture result!", __FUNCTION__);
+ }
+ }
+ mCaptureResults.erase(it);
+ }
+
+ // mErrorFrameNumbers stores frame number of dropped buffers.
+ auto it = mErrorFrameNumbers.begin();
+ while (it != mErrorFrameNumbers.end()) {
+ bool frameFound = false;
+ for (auto &inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == *it) {
+ inputFrame.second.error = true;
+ frameFound = true;
+ break;
+ }
+ }
+
+ if (frameFound) {
+ it = mErrorFrameNumbers.erase(it);
+ } else {
+ ALOGW("%s: Not able to find failing input with frame number: %" PRId64, __FUNCTION__,
+ *it);
+ it++;
+ }
+ }
+
+ // Distribute codec input buffers to be filled out from YUV output
+ for (auto it = mPendingInputFrames.begin();
+ it != mPendingInputFrames.end() && mCodecInputBuffers.size() > 0; it++) {
+ InputFrame& inputFrame(it->second);
+ if (inputFrame.codecInputCounter < mGridRows * mGridCols) {
+ // Available input tiles that are required for the current input
+ // image.
+ size_t newInputTiles = std::min(mCodecInputBuffers.size(),
+ mGridRows * mGridCols - inputFrame.codecInputCounter);
+ for (size_t i = 0; i < newInputTiles; i++) {
+ CodecInputBufferInfo inputInfo =
+ { mCodecInputBuffers[0], mGridTimestampUs++, inputFrame.codecInputCounter };
+ inputFrame.codecInputBuffers.push_back(inputInfo);
+
+ mCodecInputBuffers.erase(mCodecInputBuffers.begin());
+ inputFrame.codecInputCounter++;
+ }
+ break;
+ }
+ }
+}
+
+bool HeicCompositeStream::getNextReadyInputLocked(int64_t *currentTs /*out*/) {
+ if (currentTs == nullptr) {
+ return false;
+ }
+
+ bool newInputAvailable = false;
+ for (const auto& it : mPendingInputFrames) {
+ bool appSegmentReady = (it.second.appSegmentBuffer.data != nullptr) &&
+ !it.second.appSegmentWritten && it.second.result != nullptr;
+ bool codecOutputReady = !it.second.codecOutputBuffers.empty();
+ bool codecInputReady = (it.second.yuvBuffer.data != nullptr) &&
+ (!it.second.codecInputBuffers.empty());
+ if ((!it.second.error) &&
+ (it.first < *currentTs) &&
+ (appSegmentReady || codecOutputReady || codecInputReady)) {
+ *currentTs = it.first;
+ newInputAvailable = true;
+ break;
+ }
+ }
+
+ return newInputAvailable;
+}
+
+int64_t HeicCompositeStream::getNextFailingInputLocked(int64_t *currentTs /*out*/) {
+ int64_t res = -1;
+ if (currentTs == nullptr) {
+ return res;
+ }
+
+ for (const auto& it : mPendingInputFrames) {
+ if (it.second.error && !it.second.errorNotified && (it.first < *currentTs)) {
+ *currentTs = it.first;
+ res = it.second.frameNumber;
+ break;
+ }
+ }
+
+ return res;
+}
+
+status_t HeicCompositeStream::processInputFrame(nsecs_t timestamp,
+ InputFrame &inputFrame) {
+ ATRACE_CALL();
+ status_t res = OK;
+
+ bool appSegmentReady = inputFrame.appSegmentBuffer.data != nullptr &&
+ !inputFrame.appSegmentWritten && inputFrame.result != nullptr;
+ bool codecOutputReady = inputFrame.codecOutputBuffers.size() > 0;
+ bool codecInputReady = inputFrame.yuvBuffer.data != nullptr &&
+ !inputFrame.codecInputBuffers.empty();
+
+ if (!appSegmentReady && !codecOutputReady && !codecInputReady) {
+ ALOGW("%s: No valid appSegmentBuffer/codec input/outputBuffer available!", __FUNCTION__);
+ return OK;
+ }
+
+ // Handle inputs for Hevc tiling
+ if (codecInputReady) {
+ res = processCodecInputFrame(inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to process codec input frame: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ // Initialize and start muxer if not yet done so
+ if (inputFrame.muxer == nullptr) {
+ res = startMuxerForInputFrame(timestamp, inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to create and start muxer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ // Write JPEG APP segments data to the muxer.
+ if (appSegmentReady && inputFrame.muxer != nullptr) {
+ res = processAppSegment(timestamp, inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to process JPEG APP segments: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ // Write media codec bitstream buffers to muxer.
+ while (!inputFrame.codecOutputBuffers.empty()) {
+ res = processOneCodecOutputFrame(timestamp, inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to process codec output frame: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ if (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0) {
+ res = processCompletedInputFrame(timestamp, inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ }
+
+ return res;
+}
+
+status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame) {
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ if (inputFrame.codecOutputBuffers.size() == 0) {
+ // No single codec output buffer has been generated. Continue to
+ // wait.
+ return OK;
+ }
+
+ auto res = outputANW->dequeueBuffer(mOutputSurface.get(), &inputFrame.anb, &inputFrame.fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error retrieving output buffer: %s (%d)", __FUNCTION__, strerror(-res),
+ res);
+ return res;
+ }
+
+ // Combine current thread id, stream id and timestamp to uniquely identify image.
+ std::ostringstream tempOutputFile;
+ tempOutputFile << "HEIF-" << pthread_self() << "-"
+ << getStreamId() << "-" << timestamp;
+ inputFrame.fileFd = syscall(__NR_memfd_create, tempOutputFile.str().c_str(), MFD_CLOEXEC);
+ if (inputFrame.fileFd < 0) {
+ ALOGE("%s: Failed to create file %s. Error no is %d", __FUNCTION__,
+ tempOutputFile.str().c_str(), errno);
+ return NO_INIT;
+ }
+ inputFrame.muxer = new MediaMuxer(inputFrame.fileFd, MediaMuxer::OUTPUT_FORMAT_HEIF);
+ if (inputFrame.muxer == nullptr) {
+ ALOGE("%s: Failed to create MediaMuxer for file fd %d",
+ __FUNCTION__, inputFrame.fileFd);
+ return NO_INIT;
+ }
+
+ res = inputFrame.muxer->setOrientationHint(inputFrame.orientation);
+ if (res != OK) {
+ ALOGE("%s: Failed to setOrientationHint: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ // Set encoder quality
+ {
+ sp<AMessage> qualityParams = new AMessage;
+ qualityParams->setInt32(PARAMETER_KEY_VIDEO_BITRATE, inputFrame.quality);
+ res = mCodec->setParameters(qualityParams);
+ if (res != OK) {
+ ALOGE("%s: Failed to set codec quality: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+
+ ssize_t trackId = inputFrame.muxer->addTrack(mFormat);
+ if (trackId < 0) {
+ ALOGE("%s: Failed to addTrack to the muxer: %zd", __FUNCTION__, trackId);
+ return NO_INIT;
+ }
+
+ inputFrame.trackIndex = trackId;
+ inputFrame.pendingOutputTiles = mNumOutputTiles;
+
+ res = inputFrame.muxer->start();
+ if (res != OK) {
+ ALOGE("%s: Failed to start MediaMuxer: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ return OK;
+}
+
+status_t HeicCompositeStream::processAppSegment(nsecs_t timestamp, InputFrame &inputFrame) {
+ size_t app1Size = 0;
+ auto appSegmentSize = findAppSegmentsSize(inputFrame.appSegmentBuffer.data,
+ inputFrame.appSegmentBuffer.width * inputFrame.appSegmentBuffer.height,
+ &app1Size);
+ ALOGV("%s: appSegmentSize is %zu, width %d, height %d, app1Size %zu", __FUNCTION__,
+ appSegmentSize, inputFrame.appSegmentBuffer.width,
+ inputFrame.appSegmentBuffer.height, app1Size);
+ if (appSegmentSize == 0) {
+ ALOGE("%s: Failed to find JPEG APP segment size", __FUNCTION__);
+ return NO_INIT;
+ }
+
+ std::unique_ptr<ExifUtils> exifUtils(ExifUtils::create());
+ auto exifRes = exifUtils->initialize(inputFrame.appSegmentBuffer.data, app1Size);
+ if (!exifRes) {
+ ALOGE("%s: Failed to initialize ExifUtils object!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ exifRes = exifUtils->setFromMetadata(*inputFrame.result, mStaticInfo,
+ mOutputWidth, mOutputHeight);
+ if (!exifRes) {
+ ALOGE("%s: Failed to set Exif tags using metadata and main image sizes", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ exifRes = exifUtils->setOrientation(inputFrame.orientation);
+ if (!exifRes) {
+ ALOGE("%s: ExifUtils failed to set orientation", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ exifRes = exifUtils->generateApp1();
+ if (!exifRes) {
+ ALOGE("%s: ExifUtils failed to generate APP1 segment", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ unsigned int newApp1Length = exifUtils->getApp1Length();
+ const uint8_t *newApp1Segment = exifUtils->getApp1Buffer();
+
+ //Assemble the APP1 marker buffer required by MediaCodec
+ uint8_t kExifApp1Marker[] = {'E', 'x', 'i', 'f', 0xFF, 0xE1, 0x00, 0x00};
+ kExifApp1Marker[6] = static_cast<uint8_t>(newApp1Length >> 8);
+ kExifApp1Marker[7] = static_cast<uint8_t>(newApp1Length & 0xFF);
+ size_t appSegmentBufferSize = sizeof(kExifApp1Marker) +
+ appSegmentSize - app1Size + newApp1Length;
+ uint8_t* appSegmentBuffer = new uint8_t[appSegmentBufferSize];
+ memcpy(appSegmentBuffer, kExifApp1Marker, sizeof(kExifApp1Marker));
+ memcpy(appSegmentBuffer + sizeof(kExifApp1Marker), newApp1Segment, newApp1Length);
+ if (appSegmentSize - app1Size > 0) {
+ memcpy(appSegmentBuffer + sizeof(kExifApp1Marker) + newApp1Length,
+ inputFrame.appSegmentBuffer.data + app1Size, appSegmentSize - app1Size);
+ }
+
+ sp<ABuffer> aBuffer = new ABuffer(appSegmentBuffer, appSegmentBufferSize);
+ auto res = inputFrame.muxer->writeSampleData(aBuffer, inputFrame.trackIndex,
+ timestamp, MediaCodec::BUFFER_FLAG_MUXER_DATA);
+ delete[] appSegmentBuffer;
+
+ if (res != OK) {
+ ALOGE("%s: Failed to write JPEG APP segments to muxer: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ inputFrame.appSegmentWritten = true;
+
+ return OK;
+}
+
+status_t HeicCompositeStream::processCodecInputFrame(InputFrame &inputFrame) {
+ for (auto& inputBuffer : inputFrame.codecInputBuffers) {
+ sp<MediaCodecBuffer> buffer;
+ auto res = mCodec->getInputBuffer(inputBuffer.index, &buffer);
+ if (res != OK) {
+ ALOGE("%s: Error getting codec input buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ // Copy one tile from source to destination.
+ size_t tileX = inputBuffer.tileIndex % mGridCols;
+ size_t tileY = inputBuffer.tileIndex / mGridCols;
+ size_t top = mGridHeight * tileY;
+ size_t left = mGridWidth * tileX;
+ size_t width = (tileX == static_cast<size_t>(mGridCols) - 1) ?
+ mOutputWidth - tileX * mGridWidth : mGridWidth;
+ size_t height = (tileY == static_cast<size_t>(mGridRows) - 1) ?
+ mOutputHeight - tileY * mGridHeight : mGridHeight;
+ ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu",
+ __FUNCTION__, tileX, tileY, top, left, width, height);
+
+ res = copyOneYuvTile(buffer, inputFrame.yuvBuffer, top, left, width, height);
+ if (res != OK) {
+ ALOGE("%s: Failed to copy YUV tile %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ res = mCodec->queueInputBuffer(inputBuffer.index, 0, buffer->capacity(),
+ inputBuffer.timeUs, 0, nullptr /*errorDetailMsg*/);
+ if (res != OK) {
+ ALOGE("%s: Failed to queueInputBuffer to Codec: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+ }
+
+ inputFrame.codecInputBuffers.clear();
+ return OK;
+}
+
+status_t HeicCompositeStream::processOneCodecOutputFrame(nsecs_t timestamp,
+ InputFrame &inputFrame) {
+ auto it = inputFrame.codecOutputBuffers.begin();
+ sp<MediaCodecBuffer> buffer;
+ status_t res = mCodec->getOutputBuffer(it->index, &buffer);
+ if (res != OK) {
+ ALOGE("%s: Error getting Heic codec output buffer at index %d: %s (%d)",
+ __FUNCTION__, it->index, strerror(-res), res);
+ return res;
+ }
+ if (buffer == nullptr) {
+ ALOGE("%s: Invalid Heic codec output buffer at index %d",
+ __FUNCTION__, it->index);
+ return BAD_VALUE;
+ }
+
+ sp<ABuffer> aBuffer = new ABuffer(buffer->data(), buffer->size());
+ res = inputFrame.muxer->writeSampleData(
+ aBuffer, inputFrame.trackIndex, timestamp, 0 /*flags*/);
+ if (res != OK) {
+ ALOGE("%s: Failed to write buffer index %d to muxer: %s (%d)",
+ __FUNCTION__, it->index, strerror(-res), res);
+ return res;
+ }
+
+ mCodec->releaseOutputBuffer(it->index);
+ if (inputFrame.pendingOutputTiles == 0) {
+ ALOGW("%s: Codec generated more tiles than expected!", __FUNCTION__);
+ } else {
+ inputFrame.pendingOutputTiles--;
+ }
+
+ inputFrame.codecOutputBuffers.erase(inputFrame.codecOutputBuffers.begin());
+ return OK;
+}
+
+status_t HeicCompositeStream::processCompletedInputFrame(nsecs_t timestamp,
+ InputFrame &inputFrame) {
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ inputFrame.muxer->stop();
+
+ // Copy the content of the file to memory.
+ sp<GraphicBuffer> gb = GraphicBuffer::from(inputFrame.anb);
+ void* dstBuffer;
+ auto res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, inputFrame.fenceFd);
+ if (res != OK) {
+ ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ off_t fSize = lseek(inputFrame.fileFd, 0, SEEK_END);
+ if (static_cast<size_t>(fSize) > mMaxHeicBufferSize - sizeof(CameraBlob)) {
+ ALOGE("%s: Error: MediaMuxer output size %ld is larger than buffer sizer %zu",
+ __FUNCTION__, fSize, mMaxHeicBufferSize - sizeof(CameraBlob));
+ return BAD_VALUE;
+ }
+
+ lseek(inputFrame.fileFd, 0, SEEK_SET);
+ ssize_t bytesRead = read(inputFrame.fileFd, dstBuffer, fSize);
+ if (bytesRead < fSize) {
+ ALOGE("%s: Only %zd of %ld bytes read", __FUNCTION__, bytesRead, fSize);
+ return BAD_VALUE;
+ }
+
+ close(inputFrame.fileFd);
+ inputFrame.fileFd = -1;
+
+ // Fill in HEIC header
+ uint8_t *header = static_cast<uint8_t*>(dstBuffer) + mMaxHeicBufferSize - sizeof(CameraBlob);
+ struct CameraBlob *blobHeader = (struct CameraBlob *)header;
+ // Must be in sync with CAMERA3_HEIC_BLOB_ID in android_media_Utils.cpp
+ blobHeader->blobId = static_cast<CameraBlobId>(0x00FE);
+ blobHeader->blobSize = fSize;
+
+ res = native_window_set_buffers_timestamp(mOutputSurface.get(), timestamp);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+ __FUNCTION__, getStreamId(), strerror(-res), res);
+ return res;
+ }
+
+ res = outputANW->queueBuffer(mOutputSurface.get(), inputFrame.anb, /*fence*/ -1);
+ if (res != OK) {
+ ALOGE("%s: Failed to queueBuffer to Heic stream: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ inputFrame.anb = nullptr;
+
+ ATRACE_ASYNC_END("HEIC capture", inputFrame.frameNumber);
+ return OK;
+}
+
+
+void HeicCompositeStream::releaseInputFrameLocked(InputFrame *inputFrame /*out*/) {
+ if (inputFrame == nullptr) {
+ return;
+ }
+
+ if (inputFrame->appSegmentBuffer.data != nullptr) {
+ mAppSegmentConsumer->unlockBuffer(inputFrame->appSegmentBuffer);
+ inputFrame->appSegmentBuffer.data = nullptr;
+ mAppSegmentBufferAcquired = false;
+ }
+
+ while (!inputFrame->codecOutputBuffers.empty()) {
+ auto it = inputFrame->codecOutputBuffers.begin();
+ ALOGV("%s: releaseOutputBuffer index %d", __FUNCTION__, it->index);
+ mCodec->releaseOutputBuffer(it->index);
+ inputFrame->codecOutputBuffers.erase(it);
+ }
+
+ if (inputFrame->yuvBuffer.data != nullptr) {
+ mMainImageConsumer->unlockBuffer(inputFrame->yuvBuffer);
+ inputFrame->yuvBuffer.data = nullptr;
+ mYuvBufferAcquired = false;
+ }
+
+ while (!inputFrame->codecInputBuffers.empty()) {
+ auto it = inputFrame->codecInputBuffers.begin();
+ inputFrame->codecInputBuffers.erase(it);
+ }
+
+ if ((inputFrame->error || mErrorState) && !inputFrame->errorNotified) {
+ notifyError(inputFrame->frameNumber);
+ inputFrame->errorNotified = true;
+ }
+
+ if (inputFrame->fileFd >= 0) {
+ close(inputFrame->fileFd);
+ inputFrame->fileFd = -1;
+ }
+
+ if (inputFrame->anb != nullptr) {
+ sp<ANativeWindow> outputANW = mOutputSurface;
+ outputANW->cancelBuffer(mOutputSurface.get(), inputFrame->anb, /*fence*/ -1);
+ inputFrame->anb = nullptr;
+ }
+}
+
+void HeicCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+ auto it = mPendingInputFrames.begin();
+ while (it != mPendingInputFrames.end()) {
+ if (it->first <= currentTs) {
+ releaseInputFrameLocked(&it->second);
+ it = mPendingInputFrames.erase(it);
+ } else {
+ it++;
+ }
+ }
+}
+
+status_t HeicCompositeStream::initializeCodec(uint32_t width, uint32_t height,
+ const sp<CameraDeviceBase>& cameraDevice) {
+ ALOGV("%s", __FUNCTION__);
+
+ bool useGrid = false;
+ bool isSizeSupported = isSizeSupportedByHeifEncoder(width, height,
+ &mUseHeic, &useGrid, nullptr);
+ if (!isSizeSupported) {
+ ALOGE("%s: Encoder doesnt' support size %u x %u!",
+ __FUNCTION__, width, height);
+ return BAD_VALUE;
+ }
+
+ // Create Looper for MediaCodec.
+ auto desiredMime = mUseHeic ? MIMETYPE_IMAGE_ANDROID_HEIC : MIMETYPE_VIDEO_HEVC;
+ mCodecLooper = new ALooper;
+ mCodecLooper->setName("Camera3-HeicComposite-MediaCodecLooper");
+ status_t res = mCodecLooper->start(
+ false, // runOnCallingThread
+ false, // canCallJava
+ PRIORITY_AUDIO);
+ if (res != OK) {
+ ALOGE("%s: Failed to start codec looper: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return NO_INIT;
+ }
+
+ // Create HEIC/HEVC codec.
+ mCodec = MediaCodec::CreateByType(mCodecLooper, desiredMime, true /*encoder*/);
+ if (mCodec == nullptr) {
+ ALOGE("%s: Failed to create codec for %s", __FUNCTION__, desiredMime);
+ return NO_INIT;
+ }
+
+ // Create Looper and handler for Codec callback.
+ mCodecCallbackHandler = new CodecCallbackHandler(this);
+ if (mCodecCallbackHandler == nullptr) {
+ ALOGE("%s: Failed to create codec callback handler", __FUNCTION__);
+ return NO_MEMORY;
+ }
+ mCallbackLooper = new ALooper;
+ mCallbackLooper->setName("Camera3-HeicComposite-MediaCodecCallbackLooper");
+ res = mCallbackLooper->start(
+ false, // runOnCallingThread
+ false, // canCallJava
+ PRIORITY_AUDIO);
+ if (res != OK) {
+ ALOGE("%s: Failed to start media callback looper: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ return NO_INIT;
+ }
+ mCallbackLooper->registerHandler(mCodecCallbackHandler);
+
+ mAsyncNotify = new AMessage(kWhatCallbackNotify, mCodecCallbackHandler);
+ res = mCodec->setCallback(mAsyncNotify);
+ if (res != OK) {
+ ALOGE("%s: Failed to set MediaCodec callback: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ // Create output format and configure the Codec.
+ sp<AMessage> outputFormat = new AMessage();
+ outputFormat->setString(KEY_MIME, desiredMime);
+ outputFormat->setInt32(KEY_BITRATE_MODE, BITRATE_MODE_CQ);
+ outputFormat->setInt32(KEY_QUALITY, kDefaultJpegQuality);
+ // Ask codec to skip timestamp check and encode all frames.
+ outputFormat->setInt64("max-pts-gap-to-encoder", kNoFrameDropMaxPtsGap);
+
+ int32_t gridWidth, gridHeight, gridRows, gridCols;
+ if (useGrid || mUseHeic) {
+ gridWidth = HeicEncoderInfoManager::kGridWidth;
+ gridHeight = HeicEncoderInfoManager::kGridHeight;
+ gridRows = (height + gridHeight - 1)/gridHeight;
+ gridCols = (width + gridWidth - 1)/gridWidth;
+
+ if (mUseHeic) {
+ outputFormat->setInt32(KEY_TILE_WIDTH, gridWidth);
+ outputFormat->setInt32(KEY_TILE_HEIGHT, gridHeight);
+ outputFormat->setInt32(KEY_GRID_COLUMNS, gridCols);
+ outputFormat->setInt32(KEY_GRID_ROWS, gridRows);
+ }
+
+ } else {
+ gridWidth = width;
+ gridHeight = height;
+ gridRows = 1;
+ gridCols = 1;
+ }
+
+ outputFormat->setInt32(KEY_WIDTH, !useGrid ? width : gridWidth);
+ outputFormat->setInt32(KEY_HEIGHT, !useGrid ? height : gridHeight);
+ outputFormat->setInt32(KEY_I_FRAME_INTERVAL, 0);
+ outputFormat->setInt32(KEY_COLOR_FORMAT,
+ useGrid ? COLOR_FormatYUV420Flexible : COLOR_FormatSurface);
+ outputFormat->setInt32(KEY_FRAME_RATE, gridRows * gridCols);
+ // This only serves as a hint to encoder when encoding is not real-time.
+ outputFormat->setInt32(KEY_OPERATING_RATE, useGrid ? kGridOpRate : kNoGridOpRate);
+
+ res = mCodec->configure(outputFormat, nullptr /*nativeWindow*/,
+ nullptr /*crypto*/, CONFIGURE_FLAG_ENCODE);
+ if (res != OK) {
+ ALOGE("%s: Failed to configure codec: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ mGridWidth = gridWidth;
+ mGridHeight = gridHeight;
+ mGridRows = gridRows;
+ mGridCols = gridCols;
+ mUseGrid = useGrid;
+ mOutputWidth = width;
+ mOutputHeight = height;
+ mAppSegmentMaxSize = calcAppSegmentMaxSize(cameraDevice->info());
+ mMaxHeicBufferSize = mOutputWidth * mOutputHeight * 3 / 2 + mAppSegmentMaxSize;
+
+ return OK;
+}
+
+void HeicCompositeStream::deinitCodec() {
+ ALOGV("%s", __FUNCTION__);
+ if (mCodec != nullptr) {
+ mCodec->stop();
+ mCodec->release();
+ mCodec.clear();
+ }
+
+ if (mCodecLooper != nullptr) {
+ mCodecLooper->stop();
+ mCodecLooper.clear();
+ }
+
+ if (mCallbackLooper != nullptr) {
+ mCallbackLooper->stop();
+ mCallbackLooper.clear();
+ }
+
+ mAsyncNotify.clear();
+ mFormat.clear();
+}
+
+// Return the size of the complete list of app segment, 0 indicates failure
+size_t HeicCompositeStream::findAppSegmentsSize(const uint8_t* appSegmentBuffer,
+ size_t maxSize, size_t *app1SegmentSize) {
+ if (appSegmentBuffer == nullptr || app1SegmentSize == nullptr) {
+ ALOGE("%s: Invalid input appSegmentBuffer %p, app1SegmentSize %p",
+ __FUNCTION__, appSegmentBuffer, app1SegmentSize);
+ return 0;
+ }
+
+ size_t expectedSize = 0;
+ // First check for EXIF transport header at the end of the buffer
+ const uint8_t *header = appSegmentBuffer + (maxSize - sizeof(struct CameraBlob));
+ const struct CameraBlob *blob = (const struct CameraBlob*)(header);
+ if (blob->blobId != CameraBlobId::JPEG_APP_SEGMENTS) {
+ ALOGE("%s: Invalid EXIF blobId %hu", __FUNCTION__, blob->blobId);
+ return 0;
+ }
+
+ expectedSize = blob->blobSize;
+ if (expectedSize == 0 || expectedSize > maxSize - sizeof(struct CameraBlob)) {
+ ALOGE("%s: Invalid blobSize %zu.", __FUNCTION__, expectedSize);
+ return 0;
+ }
+
+ uint32_t totalSize = 0;
+
+ // Verify APP1 marker (mandatory)
+ uint8_t app1Marker[] = {0xFF, 0xE1};
+ if (memcmp(appSegmentBuffer, app1Marker, sizeof(app1Marker))) {
+ ALOGE("%s: Invalid APP1 marker: %x, %x", __FUNCTION__,
+ appSegmentBuffer[0], appSegmentBuffer[1]);
+ return 0;
+ }
+ totalSize += sizeof(app1Marker);
+
+ uint16_t app1Size = (static_cast<uint16_t>(appSegmentBuffer[totalSize]) << 8) +
+ appSegmentBuffer[totalSize+1];
+ totalSize += app1Size;
+
+ ALOGV("%s: Expected APP segments size %zu, APP1 segment size %u",
+ __FUNCTION__, expectedSize, app1Size);
+ while (totalSize < expectedSize) {
+ if (appSegmentBuffer[totalSize] != 0xFF ||
+ appSegmentBuffer[totalSize+1] <= 0xE1 ||
+ appSegmentBuffer[totalSize+1] > 0xEF) {
+ // Invalid APPn marker
+ ALOGE("%s: Invalid APPn marker: %x, %x", __FUNCTION__,
+ appSegmentBuffer[totalSize], appSegmentBuffer[totalSize+1]);
+ return 0;
+ }
+ totalSize += 2;
+
+ uint16_t appnSize = (static_cast<uint16_t>(appSegmentBuffer[totalSize]) << 8) +
+ appSegmentBuffer[totalSize+1];
+ totalSize += appnSize;
+ }
+
+ if (totalSize != expectedSize) {
+ ALOGE("%s: Invalid JPEG APP segments: totalSize %u vs expected size %zu",
+ __FUNCTION__, totalSize, expectedSize);
+ return 0;
+ }
+
+ *app1SegmentSize = app1Size + sizeof(app1Marker);
+ return expectedSize;
+}
+
+int64_t HeicCompositeStream::findTimestampInNsLocked(int64_t timeInUs) {
+ for (const auto& fn : mFrameNumberMap) {
+ if (timeInUs == ns2us(fn.second)) {
+ return fn.second;
+ }
+ }
+ for (const auto& inputFrame : mPendingInputFrames) {
+ if (timeInUs == ns2us(inputFrame.first)) {
+ return inputFrame.first;
+ }
+ }
+ return -1;
+}
+
+status_t HeicCompositeStream::copyOneYuvTile(sp<MediaCodecBuffer>& codecBuffer,
+ const CpuConsumer::LockedBuffer& yuvBuffer,
+ size_t top, size_t left, size_t width, size_t height) {
+ ATRACE_CALL();
+
+ // Get stride information for codecBuffer
+ sp<ABuffer> imageData;
+ if (!codecBuffer->meta()->findBuffer("image-data", &imageData)) {
+ ALOGE("%s: Codec input buffer is not for image data!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ if (imageData->size() != sizeof(MediaImage2)) {
+ ALOGE("%s: Invalid codec input image size %zu, expected %zu",
+ __FUNCTION__, imageData->size(), sizeof(MediaImage2));
+ return BAD_VALUE;
+ }
+ MediaImage2* imageInfo = reinterpret_cast<MediaImage2*>(imageData->data());
+ if (imageInfo->mType != MediaImage2::MEDIA_IMAGE_TYPE_YUV ||
+ imageInfo->mBitDepth != 8 ||
+ imageInfo->mBitDepthAllocated != 8 ||
+ imageInfo->mNumPlanes != 3) {
+ ALOGE("%s: Invalid codec input image info: mType %d, mBitDepth %d, "
+ "mBitDepthAllocated %d, mNumPlanes %d!", __FUNCTION__,
+ imageInfo->mType, imageInfo->mBitDepth,
+ imageInfo->mBitDepthAllocated, imageInfo->mNumPlanes);
+ return BAD_VALUE;
+ }
+
+ ALOGV("%s: yuvBuffer chromaStep %d, chromaStride %d",
+ __FUNCTION__, yuvBuffer.chromaStep, yuvBuffer.chromaStride);
+ ALOGV("%s: U offset %u, V offset %u, U rowInc %d, V rowInc %d, U colInc %d, V colInc %d",
+ __FUNCTION__, imageInfo->mPlane[MediaImage2::U].mOffset,
+ imageInfo->mPlane[MediaImage2::V].mOffset,
+ imageInfo->mPlane[MediaImage2::U].mRowInc,
+ imageInfo->mPlane[MediaImage2::V].mRowInc,
+ imageInfo->mPlane[MediaImage2::U].mColInc,
+ imageInfo->mPlane[MediaImage2::V].mColInc);
+
+ // Y
+ for (auto row = top; row < top+height; row++) {
+ uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::Y].mOffset +
+ imageInfo->mPlane[MediaImage2::Y].mRowInc * (row - top);
+ mFnCopyRow(yuvBuffer.data+row*yuvBuffer.stride+left, dst, width);
+ }
+
+ // U is Cb, V is Cr
+ bool codecUPlaneFirst = imageInfo->mPlane[MediaImage2::V].mOffset >
+ imageInfo->mPlane[MediaImage2::U].mOffset;
+ uint32_t codecUvOffsetDiff = codecUPlaneFirst ?
+ imageInfo->mPlane[MediaImage2::V].mOffset - imageInfo->mPlane[MediaImage2::U].mOffset :
+ imageInfo->mPlane[MediaImage2::U].mOffset - imageInfo->mPlane[MediaImage2::V].mOffset;
+ bool isCodecUvSemiplannar = (codecUvOffsetDiff == 1) &&
+ (imageInfo->mPlane[MediaImage2::U].mRowInc ==
+ imageInfo->mPlane[MediaImage2::V].mRowInc) &&
+ (imageInfo->mPlane[MediaImage2::U].mColInc == 2) &&
+ (imageInfo->mPlane[MediaImage2::V].mColInc == 2);
+ bool isCodecUvPlannar =
+ ((codecUPlaneFirst && codecUvOffsetDiff >=
+ imageInfo->mPlane[MediaImage2::U].mRowInc * imageInfo->mHeight/2) ||
+ ((!codecUPlaneFirst && codecUvOffsetDiff >=
+ imageInfo->mPlane[MediaImage2::V].mRowInc * imageInfo->mHeight/2))) &&
+ imageInfo->mPlane[MediaImage2::U].mColInc == 1 &&
+ imageInfo->mPlane[MediaImage2::V].mColInc == 1;
+ bool cameraUPlaneFirst = yuvBuffer.dataCr > yuvBuffer.dataCb;
+
+ if (isCodecUvSemiplannar && yuvBuffer.chromaStep == 2 &&
+ (codecUPlaneFirst == cameraUPlaneFirst)) {
+ // UV semiplannar
+ // The chrome plane could be either Cb first, or Cr first. Take the
+ // smaller address.
+ uint8_t *src = std::min(yuvBuffer.dataCb, yuvBuffer.dataCr);
+ MediaImage2::PlaneIndex dstPlane = codecUvOffsetDiff > 0 ? MediaImage2::U : MediaImage2::V;
+ for (auto row = top/2; row < (top+height)/2; row++) {
+ uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[dstPlane].mOffset +
+ imageInfo->mPlane[dstPlane].mRowInc * (row - top/2);
+ mFnCopyRow(src+row*yuvBuffer.chromaStride+left, dst, width);
+ }
+ } else if (isCodecUvPlannar && yuvBuffer.chromaStep == 1) {
+ // U plane
+ for (auto row = top/2; row < (top+height)/2; row++) {
+ uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::U].mOffset +
+ imageInfo->mPlane[MediaImage2::U].mRowInc * (row - top/2);
+ mFnCopyRow(yuvBuffer.dataCb+row*yuvBuffer.chromaStride+left/2, dst, width/2);
+ }
+
+ // V plane
+ for (auto row = top/2; row < (top+height)/2; row++) {
+ uint8_t *dst = codecBuffer->data() + imageInfo->mPlane[MediaImage2::V].mOffset +
+ imageInfo->mPlane[MediaImage2::V].mRowInc * (row - top/2);
+ mFnCopyRow(yuvBuffer.dataCr+row*yuvBuffer.chromaStride+left/2, dst, width/2);
+ }
+ } else {
+ // Convert between semiplannar and plannar, or when UV orders are
+ // different.
+ uint8_t *dst = codecBuffer->data();
+ for (auto row = top/2; row < (top+height)/2; row++) {
+ for (auto col = left/2; col < (left+width)/2; col++) {
+ // U/Cb
+ int32_t dstIndex = imageInfo->mPlane[MediaImage2::U].mOffset +
+ imageInfo->mPlane[MediaImage2::U].mRowInc * (row - top/2) +
+ imageInfo->mPlane[MediaImage2::U].mColInc * (col - left/2);
+ int32_t srcIndex = row * yuvBuffer.chromaStride + yuvBuffer.chromaStep * col;
+ dst[dstIndex] = yuvBuffer.dataCb[srcIndex];
+
+ // V/Cr
+ dstIndex = imageInfo->mPlane[MediaImage2::V].mOffset +
+ imageInfo->mPlane[MediaImage2::V].mRowInc * (row - top/2) +
+ imageInfo->mPlane[MediaImage2::V].mColInc * (col - left/2);
+ srcIndex = row * yuvBuffer.chromaStride + yuvBuffer.chromaStep * col;
+ dst[dstIndex] = yuvBuffer.dataCr[srcIndex];
+ }
+ }
+ }
+ return OK;
+}
+
+void HeicCompositeStream::initCopyRowFunction(int32_t width)
+{
+ using namespace libyuv;
+
+ mFnCopyRow = CopyRow_C;
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ mFnCopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ mFnCopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ mFnCopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ mFnCopyRow = CopyRow_MIPS;
+ }
+#endif
+}
+
+size_t HeicCompositeStream::calcAppSegmentMaxSize(const CameraMetadata& info) {
+ camera_metadata_ro_entry_t entry = info.find(ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT);
+ size_t maxAppsSegment = 1;
+ if (entry.count > 0) {
+ maxAppsSegment = entry.data.u8[0] < 1 ? 1 :
+ entry.data.u8[0] > 16 ? 16 : entry.data.u8[0];
+ }
+ return maxAppsSegment * (2 + 0xFFFF) + sizeof(struct CameraBlob);
+}
+
+bool HeicCompositeStream::threadLoop() {
+ int64_t currentTs = INT64_MAX;
+ bool newInputAvailable = false;
+
+ {
+ Mutex::Autolock l(mMutex);
+ if (mErrorState) {
+ // In case we landed in error state, return any pending buffers and
+ // halt all further processing.
+ compilePendingInputLocked();
+ releaseInputFramesLocked(currentTs);
+ return false;
+ }
+
+
+ while (!newInputAvailable) {
+ compilePendingInputLocked();
+ newInputAvailable = getNextReadyInputLocked(¤tTs);
+
+ if (!newInputAvailable) {
+ auto failingFrameNumber = getNextFailingInputLocked(¤tTs);
+ if (failingFrameNumber >= 0) {
+ // We cannot erase 'mPendingInputFrames[currentTs]' at this point because it is
+ // possible for two internal stream buffers to fail. In such scenario the
+ // composite stream should notify the client about a stream buffer error only
+ // once and this information is kept within 'errorNotified'.
+ // Any present failed input frames will be removed on a subsequent call to
+ // 'releaseInputFramesLocked()'.
+ releaseInputFrameLocked(&mPendingInputFrames[currentTs]);
+ currentTs = INT64_MAX;
+ }
+
+ auto ret = mInputReadyCondition.waitRelative(mMutex, kWaitDuration);
+ if (ret == TIMED_OUT) {
+ return true;
+ } else if (ret != OK) {
+ ALOGE("%s: Timed wait on condition failed: %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ return false;
+ }
+ }
+ }
+ }
+
+ auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
+ Mutex::Autolock l(mMutex);
+ if (res != OK) {
+ ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)",
+ __FUNCTION__, currentTs, strerror(-res), res);
+ mPendingInputFrames[currentTs].error = true;
+ }
+
+ if (mPendingInputFrames[currentTs].error ||
+ (mPendingInputFrames[currentTs].appSegmentWritten &&
+ mPendingInputFrames[currentTs].pendingOutputTiles == 0)) {
+ releaseInputFramesLocked(currentTs);
+ }
+
+ return true;
+}
+
+bool HeicCompositeStream::onStreamBufferError(const CaptureResultExtras& resultExtras) {
+ bool res = false;
+ // Buffer errors concerning internal composite streams should not be directly visible to
+ // camera clients. They must only receive a single buffer error with the public composite
+ // stream id.
+ if ((resultExtras.errorStreamId == mAppSegmentStreamId) ||
+ (resultExtras.errorStreamId == mMainImageStreamId)) {
+ flagAnErrorFrameNumber(resultExtras.frameNumber);
+ res = true;
+ }
+
+ return res;
+}
+
+void HeicCompositeStream::onResultError(const CaptureResultExtras& resultExtras) {
+ // For result error, since the APPS_SEGMENT buffer already contains EXIF,
+ // simply skip using the capture result metadata to override EXIF.
+ Mutex::Autolock l(mMutex);
+
+ int64_t timestamp = -1;
+ for (const auto& fn : mFrameNumberMap) {
+ if (fn.first == resultExtras.frameNumber) {
+ timestamp = fn.second;
+ break;
+ }
+ }
+ if (timestamp == -1) {
+ for (const auto& inputFrame : mPendingInputFrames) {
+ if (inputFrame.second.frameNumber == resultExtras.frameNumber) {
+ timestamp = inputFrame.first;
+ break;
+ }
+ }
+ }
+
+ if (timestamp == -1) {
+ ALOGE("%s: Failed to find shutter timestamp for result error!", __FUNCTION__);
+ return;
+ }
+
+ mCaptureResults.emplace(timestamp, std::make_tuple(resultExtras.frameNumber, CameraMetadata()));
+ mInputReadyCondition.signal();
+}
+
+void HeicCompositeStream::CodecCallbackHandler::onMessageReceived(const sp<AMessage> &msg) {
+ sp<HeicCompositeStream> parent = mParent.promote();
+ if (parent == nullptr) return;
+
+ switch (msg->what()) {
+ case kWhatCallbackNotify: {
+ int32_t cbID;
+ if (!msg->findInt32("callbackID", &cbID)) {
+ ALOGE("kWhatCallbackNotify: callbackID is expected.");
+ break;
+ }
+
+ ALOGV("kWhatCallbackNotify: cbID = %d", cbID);
+
+ switch (cbID) {
+ case MediaCodec::CB_INPUT_AVAILABLE: {
+ int32_t index;
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_INPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ parent->onHeicInputFrameAvailable(index);
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_AVAILABLE: {
+ int32_t index;
+ size_t offset;
+ size_t size;
+ int64_t timeUs;
+ int32_t flags;
+
+ if (!msg->findInt32("index", &index)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: index is expected.");
+ break;
+ }
+ if (!msg->findSize("offset", &offset)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: offset is expected.");
+ break;
+ }
+ if (!msg->findSize("size", &size)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: size is expected.");
+ break;
+ }
+ if (!msg->findInt64("timeUs", &timeUs)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: timeUs is expected.");
+ break;
+ }
+ if (!msg->findInt32("flags", &flags)) {
+ ALOGE("CB_OUTPUT_AVAILABLE: flags is expected.");
+ break;
+ }
+
+ CodecOutputBufferInfo bufferInfo = {
+ index,
+ (int32_t)offset,
+ (int32_t)size,
+ timeUs,
+ (uint32_t)flags};
+
+ parent->onHeicOutputFrameAvailable(bufferInfo);
+ break;
+ }
+
+ case MediaCodec::CB_OUTPUT_FORMAT_CHANGED: {
+ sp<AMessage> format;
+ if (!msg->findMessage("format", &format)) {
+ ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
+ break;
+ }
+
+ parent->onHeicFormatChanged(format);
+ break;
+ }
+
+ case MediaCodec::CB_ERROR: {
+ status_t err;
+ int32_t actionCode;
+ AString detail;
+ if (!msg->findInt32("err", &err)) {
+ ALOGE("CB_ERROR: err is expected.");
+ break;
+ }
+ if (!msg->findInt32("action", &actionCode)) {
+ ALOGE("CB_ERROR: action is expected.");
+ break;
+ }
+ msg->findString("detail", &detail);
+ ALOGE("Codec reported error(0x%x), actionCode(%d), detail(%s)",
+ err, actionCode, detail.c_str());
+
+ parent->onHeicCodecError();
+ break;
+ }
+
+ default: {
+ ALOGE("kWhatCallbackNotify: callbackID(%d) is unexpected.", cbID);
+ break;
+ }
+ }
+ break;
+ }
+
+ default:
+ ALOGE("shouldn't be here");
+ break;
+ }
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
new file mode 100644
index 0000000..2aa3c38
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
+
+#include <queue>
+
+#include <gui/IProducerListener.h>
+#include <gui/CpuConsumer.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaMuxer.h>
+
+#include "CompositeStream.h"
+
+namespace android {
+namespace camera3 {
+
+class HeicCompositeStream : public CompositeStream, public Thread,
+ public CpuConsumer::FrameAvailableListener {
+public:
+ HeicCompositeStream(wp<CameraDeviceBase> device,
+ wp<hardware::camera2::ICameraDeviceCallbacks> cb);
+ ~HeicCompositeStream() override;
+
+ static bool isHeicCompositeStream(const sp<Surface> &surface);
+
+ status_t createInternalStreams(const std::vector<sp<Surface>>& consumers,
+ bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
+ camera3_stream_rotation_t rotation, int *id, const String8& physicalCameraId,
+ std::vector<int> *surfaceIds, int streamSetId, bool isShared) override;
+
+ status_t deleteInternalStreams() override;
+
+ status_t configureStream() override;
+
+ status_t insertGbp(SurfaceMap* /*out*/outSurfaceMap, Vector<int32_t>* /*out*/outputStreamIds,
+ int32_t* /*out*/currentStreamId) override;
+
+ void onShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp) override;
+
+ int getStreamId() override { return mMainImageStreamId; }
+
+ // Use onShutter to keep track of frame number <-> timestamp mapping.
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+ void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+ const CameraMetadata& settings) override;
+
+ // CpuConsumer listener implementation
+ void onFrameAvailable(const BufferItem& item) override;
+
+ // Return stream information about the internal camera streams
+ static status_t getCompositeStreamInfo(const OutputStreamInfo &streamInfo,
+ const CameraMetadata& ch, std::vector<OutputStreamInfo>* compositeOutput /*out*/);
+
+ static bool isSizeSupportedByHeifEncoder(int32_t width, int32_t height,
+ bool* useHeic, bool* useGrid, int64_t* stall);
+ static bool isInMemoryTempFileSupported();
+protected:
+
+ bool threadLoop() override;
+ bool onStreamBufferError(const CaptureResultExtras& resultExtras) override;
+ void onResultError(const CaptureResultExtras& resultExtras) override;
+
+private:
+ //
+ // HEIC/HEVC Codec related structures, utility functions, and callbacks
+ //
+ struct CodecOutputBufferInfo {
+ int32_t index;
+ int32_t offset;
+ int32_t size;
+ int64_t timeUs;
+ uint32_t flags;
+ };
+
+ struct CodecInputBufferInfo {
+ int32_t index;
+ int64_t timeUs;
+ size_t tileIndex;
+ };
+
+ class CodecCallbackHandler : public AHandler {
+ public:
+ explicit CodecCallbackHandler(wp<HeicCompositeStream> parent) {
+ mParent = parent;
+ }
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+ private:
+ wp<HeicCompositeStream> mParent;
+ };
+
+ enum {
+ kWhatCallbackNotify,
+ };
+
+ bool mUseHeic;
+ sp<MediaCodec> mCodec;
+ sp<ALooper> mCodecLooper, mCallbackLooper;
+ sp<CodecCallbackHandler> mCodecCallbackHandler;
+ sp<AMessage> mAsyncNotify;
+ sp<AMessage> mFormat;
+ size_t mNumOutputTiles;
+
+ int32_t mOutputWidth, mOutputHeight;
+ size_t mMaxHeicBufferSize;
+ int32_t mGridWidth, mGridHeight;
+ size_t mGridRows, mGridCols;
+ bool mUseGrid; // Whether to use framework YUV frame tiling.
+
+ static const int64_t kNoFrameDropMaxPtsGap = -1000000;
+ static const int32_t kNoGridOpRate = 30;
+ static const int32_t kGridOpRate = 120;
+
+ void onHeicOutputFrameAvailable(const CodecOutputBufferInfo& bufferInfo);
+ void onHeicInputFrameAvailable(int32_t index); // Only called for YUV input mode.
+ void onHeicFormatChanged(sp<AMessage>& newFormat);
+ void onHeicCodecError();
+
+ status_t initializeCodec(uint32_t width, uint32_t height,
+ const sp<CameraDeviceBase>& cameraDevice);
+ void deinitCodec();
+
+ //
+ // Composite stream related structures, utility functions and callbacks.
+ //
+ struct InputFrame {
+ int32_t orientation;
+ int32_t quality;
+
+ CpuConsumer::LockedBuffer appSegmentBuffer;
+ std::vector<CodecOutputBufferInfo> codecOutputBuffers;
+ std::unique_ptr<CameraMetadata> result;
+
+ // Fields that are only applicable to HEVC tiling.
+ CpuConsumer::LockedBuffer yuvBuffer;
+ std::vector<CodecInputBufferInfo> codecInputBuffers;
+
+ bool error;
+ bool errorNotified;
+ int64_t frameNumber;
+
+ sp<MediaMuxer> muxer;
+ int fenceFd;
+ int fileFd;
+ ssize_t trackIndex;
+ ANativeWindowBuffer *anb;
+
+ bool appSegmentWritten;
+ size_t pendingOutputTiles;
+ size_t codecInputCounter;
+
+ InputFrame() : orientation(0), quality(kDefaultJpegQuality), error(false),
+ errorNotified(false), frameNumber(-1), fenceFd(-1), fileFd(-1),
+ trackIndex(-1), anb(nullptr), appSegmentWritten(false),
+ pendingOutputTiles(0), codecInputCounter(0) { }
+ };
+
+ void compilePendingInputLocked();
+ // Find first complete and valid frame with smallest timestamp
+ bool getNextReadyInputLocked(int64_t *currentTs /*out*/);
+ // Find next failing frame number with smallest timestamp and return respective frame number
+ int64_t getNextFailingInputLocked(int64_t *currentTs /*out*/);
+
+ status_t processInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+ status_t processCodecInputFrame(InputFrame &inputFrame);
+ status_t startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+ status_t processAppSegment(nsecs_t timestamp, InputFrame &inputFrame);
+ status_t processOneCodecOutputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+ status_t processCompletedInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
+
+ void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
+ void releaseInputFramesLocked(int64_t currentTs);
+
+ size_t findAppSegmentsSize(const uint8_t* appSegmentBuffer, size_t maxSize,
+ size_t* app1SegmentSize);
+ int64_t findTimestampInNsLocked(int64_t timeInUs);
+ status_t copyOneYuvTile(sp<MediaCodecBuffer>& codecBuffer,
+ const CpuConsumer::LockedBuffer& yuvBuffer,
+ size_t top, size_t left, size_t width, size_t height);
+ void initCopyRowFunction(int32_t width);
+ static size_t calcAppSegmentMaxSize(const CameraMetadata& info);
+
+ static const nsecs_t kWaitDuration = 10000000; // 10 ms
+ static const int32_t kDefaultJpegQuality = 99;
+ static const auto kJpegDataSpace = HAL_DATASPACE_V0_JFIF;
+ static const android_dataspace kAppSegmentDataSpace =
+ static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS);
+ static const android_dataspace kHeifDataSpace =
+ static_cast<android_dataspace>(HAL_DATASPACE_HEIF);
+
+ int mAppSegmentStreamId, mAppSegmentSurfaceId;
+ sp<CpuConsumer> mAppSegmentConsumer;
+ sp<Surface> mAppSegmentSurface;
+ bool mAppSegmentBufferAcquired;
+ size_t mAppSegmentMaxSize;
+ CameraMetadata mStaticInfo;
+
+ int mMainImageStreamId, mMainImageSurfaceId;
+ sp<Surface> mMainImageSurface;
+ sp<CpuConsumer> mMainImageConsumer; // Only applicable for HEVC codec.
+ bool mYuvBufferAcquired; // Only applicable to HEVC codec
+
+ sp<Surface> mOutputSurface;
+ sp<ProducerListener> mProducerListener;
+
+
+ // Map from frame number to JPEG setting of orientation+quality
+ std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByFrameNumber;
+ // Map from timestamp to JPEG setting of orientation+quality
+ std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByTimestamp;
+
+ // Keep all incoming APP segment Blob buffer pending further processing.
+ std::vector<int64_t> mInputAppSegmentBuffers;
+
+ // Keep all incoming HEIC blob buffer pending further processing.
+ std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
+ std::queue<int64_t> mCodecOutputBufferTimestamps;
+ size_t mOutputBufferCounter;
+
+ // Keep all incoming Yuv buffer pending tiling and encoding (for HEVC YUV tiling only)
+ std::vector<int64_t> mInputYuvBuffers;
+ // Keep all codec input buffers ready to be filled out (for HEVC YUV tiling only)
+ std::vector<int32_t> mCodecInputBuffers;
+
+ // Artificial strictly incremental YUV grid timestamp to make encoder happy.
+ int64_t mGridTimestampUs;
+
+ // In most common use case, entries are accessed in order.
+ std::map<int64_t, InputFrame> mPendingInputFrames;
+
+ // Function pointer of libyuv row copy.
+ void (*mFnCopyRow)(const uint8_t* src, uint8_t* dst, int width);
+};
+
+}; // namespace camera3
+}; // namespace android
+
+#endif //ANDROID_SERVERS_CAMERA_CAMERA3_HEIC_COMPOSITE_STREAM_H
diff --git a/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp
new file mode 100644
index 0000000..ed9be6e
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "HeicEncoderInfoManager"
+//#define LOG_NDEBUG 0
+
+#include <cstdint>
+#include <regex>
+
+#include <cutils/properties.h>
+#include <log/log_main.h>
+#include <system/graphics.h>
+
+#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+#include <media/stagefright/foundation/ABuffer.h>
+
+#include "HeicEncoderInfoManager.h"
+
+namespace android {
+namespace camera3 {
+
+HeicEncoderInfoManager::HeicEncoderInfoManager() :
+ mIsInited(false),
+ mMinSizeHeic(0, 0),
+ mMaxSizeHeic(INT32_MAX, INT32_MAX),
+ mHasHEVC(false),
+ mHasHEIC(false),
+ mDisableGrid(false) {
+ if (initialize() == OK) {
+ mIsInited = true;
+ }
+}
+
+HeicEncoderInfoManager::~HeicEncoderInfoManager() {
+}
+
+bool HeicEncoderInfoManager::isSizeSupported(int32_t width, int32_t height, bool* useHeic,
+ bool* useGrid, int64_t* stall) const {
+ if (useHeic == nullptr || useGrid == nullptr) {
+ ALOGE("%s: invalid parameters: useHeic %p, useGrid %p",
+ __FUNCTION__, useHeic, useGrid);
+ return false;
+ }
+ if (!mIsInited) return false;
+
+ bool chooseHeic = false, enableGrid = true;
+ if (mHasHEIC && width >= mMinSizeHeic.first &&
+ height >= mMinSizeHeic.second && width <= mMaxSizeHeic.first &&
+ height <= mMaxSizeHeic.second) {
+ chooseHeic = true;
+ enableGrid = false;
+ } else if (mHasHEVC) {
+ bool fullSizeSupportedByHevc = (width >= mMinSizeHevc.first &&
+ height >= mMinSizeHevc.second &&
+ width <= mMaxSizeHevc.first &&
+ height <= mMaxSizeHevc.second);
+ if (fullSizeSupportedByHevc && (mDisableGrid ||
+ (width <= 1920 && height <= 1080))) {
+ enableGrid = false;
+ }
+ } else {
+ // No encoder available for the requested size.
+ return false;
+ }
+
+ if (stall != nullptr) {
+ // Find preferred encoder which advertise
+ // "measured-frame-rate-WIDTHxHEIGHT-range" key.
+ const FrameRateMaps& maps =
+ (chooseHeic && mHeicFrameRateMaps.size() > 0) ?
+ mHeicFrameRateMaps : mHevcFrameRateMaps;
+ const auto& closestSize = findClosestSize(maps, width, height);
+ if (closestSize == maps.end()) {
+ // The "measured-frame-rate-WIDTHxHEIGHT-range" key is optional.
+ // Hardcode to some default value (3.33ms * tile count) based on resolution.
+ *stall = 3333333LL * width * height / (kGridWidth * kGridHeight);
+ return true;
+ }
+
+ // Derive stall durations based on average fps of the closest size.
+ constexpr int64_t NSEC_PER_SEC = 1000000000LL;
+ int32_t avgFps = (closestSize->second.first + closestSize->second.second)/2;
+ float ratio = 1.0f * width * height /
+ (closestSize->first.first * closestSize->first.second);
+ *stall = ratio * NSEC_PER_SEC / avgFps;
+ }
+
+ *useHeic = chooseHeic;
+ *useGrid = enableGrid;
+ return true;
+}
+
+status_t HeicEncoderInfoManager::initialize() {
+ mDisableGrid = property_get_bool("camera.heic.disable_grid", false);
+ sp<IMediaCodecList> codecsList = MediaCodecList::getInstance();
+ if (codecsList == nullptr) {
+ // No media codec available.
+ return OK;
+ }
+
+ sp<AMessage> heicDetails = getCodecDetails(codecsList, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
+ sp<AMessage> hevcDetails = getCodecDetails(codecsList, MEDIA_MIMETYPE_VIDEO_HEVC);
+
+ if (hevcDetails == nullptr) {
+ if (heicDetails != nullptr) {
+ ALOGE("%s: Device must support HEVC codec if HEIC codec is available!",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+
+ // Check CQ mode for HEVC codec
+ {
+ AString bitrateModes;
+ auto hasItem = hevcDetails->findString("feature-bitrate-modes", &bitrateModes);
+ if (!hasItem) {
+ ALOGE("%s: Failed to query bitrate modes for HEVC codec", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: HEVC codec's feature-bitrate-modes value is %d, %s",
+ __FUNCTION__, hasItem, bitrateModes.c_str());
+ std::regex pattern("(^|,)CQ($|,)", std::regex_constants::icase);
+ if (!std::regex_search(bitrateModes.c_str(), pattern)) {
+ return OK;
+ }
+ }
+
+ // HEIC size range
+ if (heicDetails != nullptr) {
+ auto res = getCodecSizeRange(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC,
+ heicDetails, &mMinSizeHeic, &mMaxSizeHeic, &mHeicFrameRateMaps);
+ if (res != OK) {
+ ALOGE("%s: Failed to get HEIC codec size range: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return BAD_VALUE;
+ }
+ mHasHEIC = true;
+ }
+
+ // HEVC size range
+ {
+ auto res = getCodecSizeRange(MEDIA_MIMETYPE_VIDEO_HEVC,
+ hevcDetails, &mMinSizeHevc, &mMaxSizeHevc, &mHevcFrameRateMaps);
+ if (res != OK) {
+ ALOGE("%s: Failed to get HEVC codec size range: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return BAD_VALUE;
+ }
+
+ mHasHEVC = true;
+ }
+
+ return OK;
+}
+
+status_t HeicEncoderInfoManager::getFrameRateMaps(sp<AMessage> details, FrameRateMaps* maps) {
+ if (details == nullptr || maps == nullptr) {
+ ALOGE("%s: Invalid input: details: %p, maps: %p", __FUNCTION__, details.get(), maps);
+ return BAD_VALUE;
+ }
+
+ for (size_t i = 0; i < details->countEntries(); i++) {
+ AMessage::Type type;
+ const char* entryName = details->getEntryNameAt(i, &type);
+ if (type != AMessage::kTypeString) continue;
+ std::regex frameRateNamePattern("measured-frame-rate-([0-9]+)[*x]([0-9]+)-range",
+ std::regex_constants::icase);
+ std::cmatch sizeMatch;
+ if (std::regex_match(entryName, sizeMatch, frameRateNamePattern) &&
+ sizeMatch.size() == 3) {
+ AMessage::ItemData item = details->getEntryAt(i);
+ AString fpsRangeStr;
+ if (item.find(&fpsRangeStr)) {
+ ALOGV("%s: %s", entryName, fpsRangeStr.c_str());
+ std::regex frameRatePattern("([0-9]+)-([0-9]+)");
+ std::cmatch fpsMatch;
+ if (std::regex_match(fpsRangeStr.c_str(), fpsMatch, frameRatePattern) &&
+ fpsMatch.size() == 3) {
+ maps->emplace(
+ std::make_pair(stoi(sizeMatch[1]), stoi(sizeMatch[2])),
+ std::make_pair(stoi(fpsMatch[1]), stoi(fpsMatch[2])));
+ } else {
+ return BAD_VALUE;
+ }
+ }
+ }
+ }
+ return OK;
+}
+
+status_t HeicEncoderInfoManager::getCodecSizeRange(
+ const char* codecName,
+ sp<AMessage> details,
+ std::pair<int32_t, int32_t>* minSize,
+ std::pair<int32_t, int32_t>* maxSize,
+ FrameRateMaps* frameRateMaps) {
+ if (codecName == nullptr || minSize == nullptr || maxSize == nullptr ||
+ details == nullptr || frameRateMaps == nullptr) {
+ return BAD_VALUE;
+ }
+
+ AString sizeRange;
+ auto hasItem = details->findString("size-range", &sizeRange);
+ if (!hasItem) {
+ ALOGE("%s: Failed to query size range for codec %s", __FUNCTION__, codecName);
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s codec's size range is %s", __FUNCTION__, codecName, sizeRange.c_str());
+ std::regex pattern("([0-9]+)[*x]([0-9]+)-([0-9]+)[*x]([0-9]+)");
+ std::cmatch match;
+ if (std::regex_match(sizeRange.c_str(), match, pattern)) {
+ if (match.size() == 5) {
+ minSize->first = stoi(match[1]);
+ minSize->second = stoi(match[2]);
+ maxSize->first = stoi(match[3]);
+ maxSize->second = stoi(match[4]);
+ if (minSize->first > maxSize->first ||
+ minSize->second > maxSize->second) {
+ ALOGE("%s: Invalid %s code size range: %s",
+ __FUNCTION__, codecName, sizeRange.c_str());
+ return BAD_VALUE;
+ }
+ } else {
+ return BAD_VALUE;
+ }
+ }
+
+ auto res = getFrameRateMaps(details, frameRateMaps);
+ if (res != OK) {
+ return res;
+ }
+
+ return OK;
+}
+
+HeicEncoderInfoManager::FrameRateMaps::const_iterator HeicEncoderInfoManager::findClosestSize(
+ const FrameRateMaps& maps, int32_t width, int32_t height) const {
+ int32_t minDiff = INT32_MAX;
+ FrameRateMaps::const_iterator closestIter = maps.begin();
+ for (auto iter = maps.begin(); iter != maps.end(); iter++) {
+ // Use area difference between the sizes to approximate size
+ // difference.
+ int32_t diff = abs(iter->first.first * iter->first.second - width * height);
+ if (diff < minDiff) {
+ closestIter = iter;
+ minDiff = diff;
+ }
+ }
+ return closestIter;
+}
+
+sp<AMessage> HeicEncoderInfoManager::getCodecDetails(
+ sp<IMediaCodecList> codecsList, const char* name) {
+ ssize_t idx = codecsList->findCodecByType(name, true /*encoder*/);
+ if (idx < 0) {
+ return nullptr;
+ }
+
+ const sp<MediaCodecInfo> info = codecsList->getCodecInfo(idx);
+ if (info == nullptr) {
+ ALOGE("%s: Failed to get codec info for %s", __FUNCTION__, name);
+ return nullptr;
+ }
+ const sp<MediaCodecInfo::Capabilities> caps =
+ info->getCapabilitiesFor(name);
+ if (caps == nullptr) {
+ ALOGE("%s: Failed to get capabilities for codec %s", __FUNCTION__, name);
+ return nullptr;
+ }
+ const sp<AMessage> details = caps->getDetails();
+ if (details == nullptr) {
+ ALOGE("%s: Failed to get details for codec %s", __FUNCTION__, name);
+ return nullptr;
+ }
+
+ return details;
+}
+} //namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h
new file mode 100644
index 0000000..fb0b914
--- /dev/null
+++ b/services/camera/libcameraservice/api2/HeicEncoderInfoManager.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
+#define ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
+
+#include <unordered_map>
+#include <utility>
+#include <utils/Errors.h>
+#include <utils/StrongPointer.h>
+
+#include <media/IMediaCodecList.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+namespace camera3 {
+
+class HeicEncoderInfoManager {
+public:
+ static HeicEncoderInfoManager& getInstance() {
+ static HeicEncoderInfoManager instance;
+ return instance;
+ }
+
+ bool isSizeSupported(int32_t width, int32_t height,
+ bool* useHeic, bool* useGrid, int64_t* stall) const;
+
+ static const auto kGridWidth = 512;
+ static const auto kGridHeight = 512;
+private:
+ struct SizePairHash {
+ std::size_t operator () (const std::pair<int32_t,int32_t> &p) const {
+ return p.first * 31 + p.second;
+ }
+ };
+
+ typedef std::unordered_map<std::pair<int32_t, int32_t>,
+ std::pair<int32_t, int32_t>, SizePairHash> FrameRateMaps;
+
+ HeicEncoderInfoManager();
+ virtual ~HeicEncoderInfoManager();
+
+ status_t initialize();
+ status_t getFrameRateMaps(sp<AMessage> details, FrameRateMaps* maps);
+ status_t getCodecSizeRange(const char* codecName, sp<AMessage> details,
+ std::pair<int32_t, int32_t>* minSize, std::pair<int32_t, int32_t>* maxSize,
+ FrameRateMaps* frameRateMaps);
+ FrameRateMaps::const_iterator findClosestSize(const FrameRateMaps& maps,
+ int32_t width, int32_t height) const;
+ sp<AMessage> getCodecDetails(sp<IMediaCodecList> codecsList, const char* name);
+
+ bool mIsInited;
+ std::pair<int32_t, int32_t> mMinSizeHeic, mMaxSizeHeic;
+ std::pair<int32_t, int32_t> mMinSizeHevc, mMaxSizeHevc;
+ bool mHasHEVC, mHasHEIC;
+ FrameRateMaps mHeicFrameRateMaps, mHevcFrameRateMaps;
+ bool mDisableGrid;
+
+};
+
+} // namespace camera3
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_HEICENCODER_INFO_MANAGER_H
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index a9cbe72..d6789a4 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -24,6 +24,8 @@
#include <algorithm>
#include <chrono>
+#include "common/DepthPhotoProcessor.h"
+#include <dlfcn.h>
#include <future>
#include <inttypes.h>
#include <hardware/camera_common.h>
@@ -36,6 +38,8 @@
#include <hwbinder/IPCThreadState.h>
#include <utils/Trace.h>
+#include "api2/HeicCompositeStream.h"
+
namespace android {
using namespace ::android::hardware::camera;
@@ -48,16 +52,10 @@
const std::string kLegacyProviderName("legacy/0");
const std::string kExternalProviderName("external/0");
const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
-
-// The extra amount of time to hold a reference to an ICameraProvider after it is no longer needed.
-// Hold the reference for this extra time so that if the camera is unreferenced and then referenced
-// again quickly, we do not let the HAL exit and then need to immediately restart it. An example
-// when this could happen is switching from a front-facing to a rear-facing camera. If the HAL were
-// to exit during the camera switch, the camera could appear janky to the user.
-const std::chrono::system_clock::duration kCameraKeepAliveDelay = 3s;
-
} // anonymous namespace
+const float CameraProviderManager::kDepthARTolerance = .1f;
+
CameraProviderManager::HardwareServiceInteractionProxy
CameraProviderManager::sHardwareServiceInteractionProxy{};
@@ -73,6 +71,8 @@
}
mListener = listener;
mServiceProxy = proxy;
+ mDeviceState = static_cast<hardware::hidl_bitfield<provider::V2_5::DeviceState>>(
+ provider::V2_5::DeviceState::NORMAL);
// Registering will trigger notifications for all already-known providers
bool success = mServiceProxy->registerForNotifications(
@@ -276,6 +276,26 @@
return OK;
}
+status_t CameraProviderManager::notifyDeviceStateChange(
+ hardware::hidl_bitfield<provider::V2_5::DeviceState> newState) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ mDeviceState = newState;
+ status_t res = OK;
+ for (auto& provider : mProviders) {
+ ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
+ __FUNCTION__, provider->mProviderName.c_str(), newState);
+ status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+ if (singleRes != OK) {
+ ALOGE("%s: Unable to notify provider %s about device state change",
+ __FUNCTION__,
+ provider->mProviderName.c_str());
+ res = singleRes;
+ // continue to do the rest of the providers instead of returning now
+ }
+ }
+ return res;
+}
+
status_t CameraProviderManager::openSession(const std::string &id,
const sp<device::V3_2::ICameraDeviceCallback>& callback,
/*out*/
@@ -361,7 +381,7 @@
if (!kEnableLazyHal) {
return;
}
- ALOGI("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
+ ALOGV("Saving camera provider %s for camera device %s", provider->descriptor, cameraId.c_str());
std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *primaryMap, *alternateMap;
if (usageType == DeviceMode::TORCH) {
@@ -385,7 +405,7 @@
if (!kEnableLazyHal) {
return;
}
- ALOGI("Removing camera device %s", cameraId.c_str());
+ ALOGV("Removing camera device %s", cameraId.c_str());
std::unordered_map<std::string, sp<provider::V2_4::ICameraProvider>> *providerMap;
if (usageType == DeviceMode::TORCH) {
providerMap = &mTorchProviderByCameraId;
@@ -395,12 +415,15 @@
std::lock_guard<std::mutex> lock(mProviderInterfaceMapLock);
auto search = providerMap->find(cameraId.c_str());
if (search != providerMap->end()) {
- auto ptr = search->second;
- auto future = std::async(std::launch::async, [ptr] {
- std::this_thread::sleep_for(kCameraKeepAliveDelay);
- IPCThreadState::self()->flushCommands();
- });
+ // Drop the reference to this ICameraProvider. This is safe to do immediately (without an
+ // added delay) because hwservicemanager guarantees to hold the reference for at least five
+ // more seconds. We depend on this behavior so that if the provider is unreferenced and
+ // then referenced again quickly, we do not let the HAL exit and then need to immediately
+ // restart it. An example when this could happen is switching from a front-facing to a
+ // rear-facing camera. If the HAL were to exit during the camera switch, the camera could
+ // appear janky to the user.
providerMap->erase(cameraId.c_str());
+ IPCThreadState::self()->flushCommands();
} else {
ALOGE("%s: Asked to remove reference for camera %s, but no reference to it was found. This "
"could mean removeRef was called twice for the same camera ID.", __FUNCTION__,
@@ -500,6 +523,285 @@
}
}
+bool CameraProviderManager::ProviderInfo::DeviceInfo3::isPublicallyHiddenSecureCamera() {
+ camera_metadata_entry_t entryCap;
+ entryCap = mCameraCharacteristics.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ if (entryCap.count != 1) {
+ // Do NOT hide this camera device if the capabilities specify anything more
+ // than ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA.
+ return false;
+ }
+ return entryCap.data.u8[0] == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA;
+}
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedSizes(
+ const CameraMetadata& ch, uint32_t tag, android_pixel_format_t format,
+ std::vector<std::tuple<size_t, size_t>> *sizes/*out*/) {
+ if (sizes == nullptr) {
+ return;
+ }
+
+ auto scalerDims = ch.find(tag);
+ if (scalerDims.count > 0) {
+ // Scaler entry contains 4 elements (format, width, height, type)
+ for (size_t i = 0; i < scalerDims.count; i += 4) {
+ if ((scalerDims.data.i32[i] == format) &&
+ (scalerDims.data.i32[i+3] ==
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
+ sizes->push_back(std::make_tuple(scalerDims.data.i32[i+1],
+ scalerDims.data.i32[i+2]));
+ }
+ }
+ }
+}
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDurations(
+ const CameraMetadata& ch, uint32_t tag, android_pixel_format_t format,
+ const std::vector<std::tuple<size_t, size_t>>& sizes,
+ std::vector<int64_t> *durations/*out*/) {
+ if (durations == nullptr) {
+ return;
+ }
+
+ auto availableDurations = ch.find(tag);
+ if (availableDurations.count > 0) {
+ // Duration entry contains 4 elements (format, width, height, duration)
+ for (size_t i = 0; i < availableDurations.count; i += 4) {
+ for (const auto& size : sizes) {
+ int64_t width = std::get<0>(size);
+ int64_t height = std::get<1>(size);
+ if ((availableDurations.data.i64[i] == format) &&
+ (availableDurations.data.i64[i+1] == width) &&
+ (availableDurations.data.i64[i+2] == height)) {
+ durations->push_back(availableDurations.data.i64[i+3]);
+ }
+ }
+ }
+ }
+}
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDynamicDepthDurations(
+ const std::vector<int64_t>& depthDurations, const std::vector<int64_t>& blobDurations,
+ std::vector<int64_t> *dynamicDepthDurations /*out*/) {
+ if ((dynamicDepthDurations == nullptr) || (depthDurations.size() != blobDurations.size())) {
+ return;
+ }
+
+ // Unfortunately there is no direct way to calculate the dynamic depth stream duration.
+ // Processing time on camera service side can vary greatly depending on multiple
+ // variables which are not under our control. Make a guesstimate by taking the maximum
+ // corresponding duration value from depth and blob.
+ auto depthDuration = depthDurations.begin();
+ auto blobDuration = blobDurations.begin();
+ dynamicDepthDurations->reserve(depthDurations.size());
+ while ((depthDuration != depthDurations.end()) && (blobDuration != blobDurations.end())) {
+ dynamicDepthDurations->push_back(std::max(*depthDuration, *blobDuration));
+ depthDuration++; blobDuration++;
+ }
+}
+
+void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDynamicDepthSizes(
+ const std::vector<std::tuple<size_t, size_t>>& blobSizes,
+ const std::vector<std::tuple<size_t, size_t>>& depthSizes,
+ std::vector<std::tuple<size_t, size_t>> *dynamicDepthSizes /*out*/,
+ std::vector<std::tuple<size_t, size_t>> *internalDepthSizes /*out*/) {
+ if (dynamicDepthSizes == nullptr || internalDepthSizes == nullptr) {
+ return;
+ }
+
+ // The dynamic depth spec. does not mention how close the AR ratio should be.
+ // Try using something appropriate.
+ float ARTolerance = kDepthARTolerance;
+
+ for (const auto& blobSize : blobSizes) {
+ float jpegAR = static_cast<float> (std::get<0>(blobSize)) /
+ static_cast<float>(std::get<1>(blobSize));
+ bool found = false;
+ for (const auto& depthSize : depthSizes) {
+ if (depthSize == blobSize) {
+ internalDepthSizes->push_back(depthSize);
+ found = true;
+ break;
+ } else {
+ float depthAR = static_cast<float> (std::get<0>(depthSize)) /
+ static_cast<float>(std::get<1>(depthSize));
+ if (std::fabs(jpegAR - depthAR) <= ARTolerance) {
+ internalDepthSizes->push_back(depthSize);
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ dynamicDepthSizes->push_back(blobSize);
+ }
+ }
+}
+
+bool CameraProviderManager::ProviderInfo::DeviceInfo3::isDepthPhotoLibraryPresent() {
+ static bool libraryPresent = false;
+ static bool initialized = false;
+ if (initialized) {
+ return libraryPresent;
+ } else {
+ initialized = true;
+ }
+
+ void* depthLibHandle = dlopen(camera3::kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (depthLibHandle == nullptr) {
+ return false;
+ }
+
+ auto processFunc = dlsym(depthLibHandle, camera3::kDepthPhotoProcessFunction);
+ if (processFunc != nullptr) {
+ libraryPresent = true;
+ } else {
+ libraryPresent = false;
+ }
+ dlclose(depthLibHandle);
+
+ return libraryPresent;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addDynamicDepthTags() {
+ uint32_t depthExclTag = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE;
+ uint32_t depthSizesTag = ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS;
+ auto& c = mCameraCharacteristics;
+ std::vector<std::tuple<size_t, size_t>> supportedBlobSizes, supportedDepthSizes,
+ supportedDynamicDepthSizes, internalDepthSizes;
+ auto chTags = c.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+ if (chTags.count == 0) {
+ ALOGE("%s: Supported camera characteristics is empty!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ bool isDepthExclusivePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
+ depthExclTag) != (chTags.data.i32 + chTags.count);
+ bool isDepthSizePresent = std::find(chTags.data.i32, chTags.data.i32 + chTags.count,
+ depthSizesTag) != (chTags.data.i32 + chTags.count);
+ if (!(isDepthExclusivePresent && isDepthSizePresent)) {
+ // No depth support, nothing more to do.
+ return OK;
+ }
+
+ auto depthExclusiveEntry = c.find(depthExclTag);
+ if (depthExclusiveEntry.count > 0) {
+ if (depthExclusiveEntry.data.u8[0] != ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE) {
+ // Depth support is exclusive, nothing more to do.
+ return OK;
+ }
+ } else {
+ ALOGE("%s: Advertised depth exclusive tag but value is not present!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ getSupportedSizes(c, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, HAL_PIXEL_FORMAT_BLOB,
+ &supportedBlobSizes);
+ getSupportedSizes(c, depthSizesTag, HAL_PIXEL_FORMAT_Y16, &supportedDepthSizes);
+ if (supportedBlobSizes.empty() || supportedDepthSizes.empty()) {
+ // Nothing to do in this case.
+ return OK;
+ }
+
+ getSupportedDynamicDepthSizes(supportedBlobSizes, supportedDepthSizes,
+ &supportedDynamicDepthSizes, &internalDepthSizes);
+ if (supportedDynamicDepthSizes.empty()) {
+ // Nothing more to do.
+ return OK;
+ }
+
+ if(!isDepthPhotoLibraryPresent()) {
+ // Depth photo processing library is not present, nothing more to do.
+ return OK;
+ }
+
+ std::vector<int32_t> dynamicDepthEntries;
+ for (const auto& it : supportedDynamicDepthSizes) {
+ int32_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(it)),
+ static_cast<int32_t> (std::get<1>(it)),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT };
+ dynamicDepthEntries.insert(dynamicDepthEntries.end(), entry, entry + 4);
+ }
+
+ std::vector<int64_t> depthMinDurations, depthStallDurations;
+ std::vector<int64_t> blobMinDurations, blobStallDurations;
+ std::vector<int64_t> dynamicDepthMinDurations, dynamicDepthStallDurations;
+
+ getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
+ HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthMinDurations);
+ getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobMinDurations);
+ if (blobMinDurations.empty() || depthMinDurations.empty() ||
+ (depthMinDurations.size() != blobMinDurations.size())) {
+ ALOGE("%s: Unexpected number of available depth min durations! %zu vs. %zu",
+ __FUNCTION__, depthMinDurations.size(), blobMinDurations.size());
+ return BAD_VALUE;
+ }
+
+ getSupportedDurations(c, ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,
+ HAL_PIXEL_FORMAT_Y16, internalDepthSizes, &depthStallDurations);
+ getSupportedDurations(c, ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ HAL_PIXEL_FORMAT_BLOB, supportedDynamicDepthSizes, &blobStallDurations);
+ if (blobStallDurations.empty() || depthStallDurations.empty() ||
+ (depthStallDurations.size() != blobStallDurations.size())) {
+ ALOGE("%s: Unexpected number of available depth stall durations! %zu vs. %zu",
+ __FUNCTION__, depthStallDurations.size(), blobStallDurations.size());
+ return BAD_VALUE;
+ }
+
+ getSupportedDynamicDepthDurations(depthMinDurations, blobMinDurations,
+ &dynamicDepthMinDurations);
+ getSupportedDynamicDepthDurations(depthStallDurations, blobStallDurations,
+ &dynamicDepthStallDurations);
+ if (dynamicDepthMinDurations.empty() || dynamicDepthStallDurations.empty() ||
+ (dynamicDepthMinDurations.size() != dynamicDepthStallDurations.size())) {
+ ALOGE("%s: Unexpected number of dynamic depth stall/min durations! %zu vs. %zu",
+ __FUNCTION__, dynamicDepthMinDurations.size(), dynamicDepthStallDurations.size());
+ return BAD_VALUE;
+ }
+
+ std::vector<int64_t> dynamicDepthMinDurationEntries;
+ auto itDuration = dynamicDepthMinDurations.begin();
+ auto itSize = supportedDynamicDepthSizes.begin();
+ while (itDuration != dynamicDepthMinDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ dynamicDepthMinDurationEntries.insert(dynamicDepthMinDurationEntries.end(), entry,
+ entry + 4);
+ itDuration++; itSize++;
+ }
+
+ std::vector<int64_t> dynamicDepthStallDurationEntries;
+ itDuration = dynamicDepthStallDurations.begin();
+ itSize = supportedDynamicDepthSizes.begin();
+ while (itDuration != dynamicDepthStallDurations.end()) {
+ int64_t entry[4] = {HAL_PIXEL_FORMAT_BLOB, static_cast<int32_t> (std::get<0>(*itSize)),
+ static_cast<int32_t> (std::get<1>(*itSize)), *itDuration};
+ dynamicDepthStallDurationEntries.insert(dynamicDepthStallDurationEntries.end(), entry,
+ entry + 4);
+ itDuration++; itSize++;
+ }
+
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
+ dynamicDepthEntries.data(), dynamicDepthEntries.size());
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS,
+ dynamicDepthMinDurationEntries.data(), dynamicDepthMinDurationEntries.size());
+ c.update(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS,
+ dynamicDepthStallDurationEntries.data(), dynamicDepthStallDurationEntries.size());
+
+ std::vector<int32_t> supportedChTags;
+ supportedChTags.reserve(chTags.count + 3);
+ supportedChTags.insert(supportedChTags.end(), chTags.data.i32,
+ chTags.data.i32 + chTags.count);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS);
+ supportedChTags.push_back(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
+ c.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, supportedChTags.data(),
+ supportedChTags.size());
+
+ return OK;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::fixupMonochromeTags() {
status_t res = OK;
auto& c = mCameraCharacteristics;
@@ -596,6 +898,130 @@
return res;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::fillHeicStreamCombinations(
+ std::vector<int32_t>* outputs,
+ std::vector<int64_t>* durations,
+ std::vector<int64_t>* stallDurations,
+ const camera_metadata_entry& halStreamConfigs,
+ const camera_metadata_entry& halStreamDurations) {
+ if (outputs == nullptr || durations == nullptr || stallDurations == nullptr) {
+ return BAD_VALUE;
+ }
+
+ static bool supportInMemoryTempFile =
+ camera3::HeicCompositeStream::isInMemoryTempFileSupported();
+ if (!supportInMemoryTempFile) {
+ ALOGI("%s: No HEIC support due to absence of in memory temp file support",
+ __FUNCTION__);
+ return OK;
+ }
+
+ for (size_t i = 0; i < halStreamConfigs.count; i += 4) {
+ int32_t format = halStreamConfigs.data.i32[i];
+ // Only IMPLEMENTATION_DEFINED and YUV_888 can be used to generate HEIC
+ // image.
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+ format != HAL_PIXEL_FORMAT_YCBCR_420_888) {
+ continue;
+ }
+
+ bool sizeAvail = false;
+ for (size_t j = 0; j < outputs->size(); j+= 4) {
+ if ((*outputs)[j+1] == halStreamConfigs.data.i32[i+1] &&
+ (*outputs)[j+2] == halStreamConfigs.data.i32[i+2]) {
+ sizeAvail = true;
+ break;
+ }
+ }
+ if (sizeAvail) continue;
+
+ int64_t stall = 0;
+ bool useHeic, useGrid;
+ if (camera3::HeicCompositeStream::isSizeSupportedByHeifEncoder(
+ halStreamConfigs.data.i32[i+1], halStreamConfigs.data.i32[i+2],
+ &useHeic, &useGrid, &stall)) {
+ if (useGrid != (format == HAL_PIXEL_FORMAT_YCBCR_420_888)) {
+ continue;
+ }
+
+ // HEIC configuration
+ int32_t config[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+ halStreamConfigs.data.i32[i+2], 0 /*isInput*/};
+ outputs->insert(outputs->end(), config, config + 4);
+
+ // HEIC minFrameDuration
+ for (size_t j = 0; j < halStreamDurations.count; j += 4) {
+ if (halStreamDurations.data.i64[j] == format &&
+ halStreamDurations.data.i64[j+1] == halStreamConfigs.data.i32[i+1] &&
+ halStreamDurations.data.i64[j+2] == halStreamConfigs.data.i32[i+2]) {
+ int64_t duration[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+ halStreamConfigs.data.i32[i+2], halStreamDurations.data.i64[j+3]};
+ durations->insert(durations->end(), duration, duration+4);
+ break;
+ }
+ }
+
+ // HEIC stallDuration
+ int64_t stallDuration[] = {HAL_PIXEL_FORMAT_BLOB, halStreamConfigs.data.i32[i+1],
+ halStreamConfigs.data.i32[i+2], stall};
+ stallDurations->insert(stallDurations->end(), stallDuration, stallDuration+4);
+ }
+ }
+ return OK;
+}
+
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::deriveHeicTags() {
+ auto& c = mCameraCharacteristics;
+
+ camera_metadata_entry halHeicSupport = c.find(ANDROID_HEIC_INFO_SUPPORTED);
+ if (halHeicSupport.count > 1) {
+ ALOGE("%s: Invalid entry count %zu for ANDROID_HEIC_INFO_SUPPORTED",
+ __FUNCTION__, halHeicSupport.count);
+ return BAD_VALUE;
+ } else if (halHeicSupport.count == 0 ||
+ halHeicSupport.data.u8[0] == ANDROID_HEIC_INFO_SUPPORTED_FALSE) {
+ // Camera HAL doesn't support mandatory stream combinations for HEIC.
+ return OK;
+ }
+
+ camera_metadata_entry maxJpegAppsSegments =
+ c.find(ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT);
+ if (maxJpegAppsSegments.count != 1 || maxJpegAppsSegments.data.u8[0] == 0 ||
+ maxJpegAppsSegments.data.u8[0] > 16) {
+ ALOGE("%s: ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT must be within [1, 16]",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // Populate HEIC output configurations and its related min frame duration
+ // and stall duration.
+ std::vector<int32_t> heicOutputs;
+ std::vector<int64_t> heicDurations;
+ std::vector<int64_t> heicStallDurations;
+
+ camera_metadata_entry halStreamConfigs =
+ c.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+ camera_metadata_entry minFrameDurations =
+ c.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+
+ status_t res = fillHeicStreamCombinations(&heicOutputs, &heicDurations, &heicStallDurations,
+ halStreamConfigs, minFrameDurations);
+ if (res != OK) {
+ ALOGE("%s: Failed to fill HEIC stream combinations: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+
+ c.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS,
+ heicOutputs.data(), heicOutputs.size());
+ c.update(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS,
+ heicDurations.data(), heicDurations.size());
+ c.update(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS,
+ heicStallDurations.data(), heicStallDurations.size());
+
+ return OK;
+}
+
bool CameraProviderManager::isLogicalCamera(const std::string& id,
std::vector<std::string>* physicalCameraIds) {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
@@ -609,6 +1035,16 @@
return deviceInfo->mIsLogicalCamera;
}
+bool CameraProviderManager::isPublicallyHiddenSecureCamera(const std::string& id) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo == nullptr) {
+ return false;
+ }
+ return deviceInfo->mIsPublicallyHiddenSecureCamera;
+}
+
bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) {
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
@@ -674,7 +1110,7 @@
}
sp<ProviderInfo> providerInfo = new ProviderInfo(newProvider, this);
- status_t res = providerInfo->initialize(interface);
+ status_t res = providerInfo->initialize(interface, mDeviceState);
if (res != OK) {
return res;
}
@@ -735,7 +1171,8 @@
}
status_t CameraProviderManager::ProviderInfo::initialize(
- sp<provider::V2_4::ICameraProvider>& interface) {
+ sp<provider::V2_4::ICameraProvider>& interface,
+ hardware::hidl_bitfield<provider::V2_5::DeviceState> currentDeviceState) {
status_t res = parseProviderName(mProviderName, &mType, &mId);
if (res != OK) {
ALOGE("%s: Invalid provider name, ignoring", __FUNCTION__);
@@ -743,6 +1180,15 @@
}
ALOGI("Connecting to new camera provider: %s, isRemote? %d",
mProviderName.c_str(), interface->isRemote());
+
+ // Determine minor version
+ auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+ if (castResult.isOk()) {
+ mMinorVersion = 5;
+ } else {
+ mMinorVersion = 4;
+ }
+
// cameraDeviceStatusChange callbacks may be called (and causing new devices added)
// before setCallback returns
hardware::Return<Status> status = interface->setCallback(this);
@@ -767,6 +1213,24 @@
__FUNCTION__, mProviderName.c_str());
}
+ if (!kEnableLazyHal) {
+ // Save HAL reference indefinitely
+ mSavedInterface = interface;
+ } else {
+ mActiveInterface = interface;
+ }
+
+ ALOGV("%s: Setting device state for %s: 0x%" PRIx64,
+ __FUNCTION__, mProviderName.c_str(), mDeviceState);
+ notifyDeviceStateChange(currentDeviceState);
+
+ res = setUpVendorTags();
+ if (res != OK) {
+ ALOGE("%s: Unable to set up vendor tags from provider '%s'",
+ __FUNCTION__, mProviderName.c_str());
+ return res;
+ }
+
// Get initial list of camera devices, if any
std::vector<std::string> devices;
hardware::Return<void> ret = interface->getCameraIdList([&status, this, &devices](
@@ -823,34 +1287,28 @@
}
}
- res = setUpVendorTags();
- if (res != OK) {
- ALOGE("%s: Unable to set up vendor tags from provider '%s'",
- __FUNCTION__, mProviderName.c_str());
- return res;
- }
-
ALOGI("Camera provider %s ready with %zu camera devices",
mProviderName.c_str(), mDevices.size());
mInitialized = true;
- if (!kEnableLazyHal) {
- // Save HAL reference indefinitely
- mSavedInterface = interface;
- }
return OK;
}
const sp<provider::V2_4::ICameraProvider>
CameraProviderManager::ProviderInfo::startProviderInterface() {
ATRACE_CALL();
- ALOGI("Request to start camera provider: %s", mProviderName.c_str());
+ ALOGV("Request to start camera provider: %s", mProviderName.c_str());
if (mSavedInterface != nullptr) {
return mSavedInterface;
}
+ if (!kEnableLazyHal) {
+ ALOGE("Bad provider state! Should not be here on a non-lazy HAL!");
+ return nullptr;
+ }
+
auto interface = mActiveInterface.promote();
if (interface == nullptr) {
- ALOGI("Could not promote, calling getService(%s)", mProviderName.c_str());
+ ALOGI("Camera HAL provider needs restart, calling getService(%s)", mProviderName.c_str());
interface = mManager->mServiceProxy->getService(mProviderName);
interface->setCallback(this);
hardware::Return<bool> linked = interface->linkToDeath(this, /*cookie*/ mId);
@@ -863,9 +1321,22 @@
ALOGW("%s: Unable to link to provider '%s' death notifications",
__FUNCTION__, mProviderName.c_str());
}
+ // Send current device state
+ if (mMinorVersion >= 5) {
+ auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+ if (castResult.isOk()) {
+ sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+ if (interface_2_5 != nullptr) {
+ ALOGV("%s: Initial device state for %s: 0x %" PRIx64,
+ __FUNCTION__, mProviderName.c_str(), mDeviceState);
+ interface_2_5->notifyDeviceStateChange(mDeviceState);
+ }
+ }
+ }
+
mActiveInterface = interface;
} else {
- ALOGI("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
+ ALOGV("Camera provider (%s) already in use. Re-using instance.", mProviderName.c_str());
}
return interface;
}
@@ -950,8 +1421,10 @@
}
status_t CameraProviderManager::ProviderInfo::dump(int fd, const Vector<String16>&) const {
- dprintf(fd, "== Camera Provider HAL %s (v2.4, %s) static info: %zu devices: ==\n",
- mProviderName.c_str(), mIsRemote ? "remote" : "passthrough",
+ dprintf(fd, "== Camera Provider HAL %s (v2.%d, %s) static info: %zu devices: ==\n",
+ mProviderName.c_str(),
+ mMinorVersion,
+ mIsRemote ? "remote" : "passthrough",
mDevices.size());
for (auto& device : mDevices) {
@@ -1150,6 +1623,26 @@
return OK;
}
+status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
+ hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
+ mDeviceState = newDeviceState;
+ if (mMinorVersion >= 5) {
+ // Check if the provider is currently active - not going to start it up for this notification
+ auto interface = mSavedInterface != nullptr ? mSavedInterface : mActiveInterface.promote();
+ if (interface != nullptr) {
+ // Send current device state
+ auto castResult = provider::V2_5::ICameraProvider::castFrom(interface);
+ if (castResult.isOk()) {
+ sp<provider::V2_5::ICameraProvider> interface_2_5 = castResult;
+ if (interface_2_5 != nullptr) {
+ interface_2_5->notifyDeviceStateChange(mDeviceState);
+ }
+ }
+ }
+ }
+ return OK;
+}
+
template<class DeviceInfoT>
std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
CameraProviderManager::ProviderInfo::initializeDeviceInfo(
@@ -1436,12 +1929,26 @@
__FUNCTION__, id.c_str(), CameraProviderManager::statusToString(status), status);
return;
}
+
+ mIsPublicallyHiddenSecureCamera = isPublicallyHiddenSecureCamera();
+
status_t res = fixupMonochromeTags();
if (OK != res) {
ALOGE("%s: Unable to fix up monochrome tags based for older HAL version: %s (%d)",
__FUNCTION__, strerror(-res), res);
return;
}
+ auto stat = addDynamicDepthTags();
+ if (OK != stat) {
+ ALOGE("%s: Failed appending dynamic depth tags: %s (%d)", __FUNCTION__, strerror(-stat),
+ stat);
+ }
+ res = deriveHeicTags();
+ if (OK != res) {
+ ALOGE("%s: Unable to derive HEIC tags based on camera and media capabilities: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
+
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
if (flashAvailable.count == 1 &&
@@ -1452,6 +1959,7 @@
}
queryPhysicalCameraIds();
+
// Get physical camera characteristics if applicable
auto castResult = device::V3_5::ICameraDevice::castFrom(interface);
if (!castResult.isOk()) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 0966743..a42fb4d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -28,9 +28,8 @@
#include <camera/CameraBase.h>
#include <utils/Errors.h>
#include <android/hardware/camera/common/1.0/types.h>
-#include <android/hardware/camera/provider/2.4/ICameraProvider.h>
+#include <android/hardware/camera/provider/2.5/ICameraProvider.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
-//#include <android/hardware/camera/provider/2.4/ICameraProviderCallbacks.h>
#include <android/hidl/manager/1.0/IServiceNotification.h>
#include <camera/VendorTagDescriptor.h>
@@ -206,6 +205,12 @@
status_t setUpVendorTags();
/**
+ * Inform registered providers about a device state change, such as folding or unfolding
+ */
+ status_t notifyDeviceStateChange(
+ android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState);
+
+ /**
* Open an active session to a camera device.
*
* This fully powers on the camera device hardware, and returns a handle to a
@@ -264,7 +269,10 @@
*/
bool isLogicalCamera(const std::string& id, std::vector<std::string>* physicalCameraIds);
+ bool isPublicallyHiddenSecureCamera(const std::string& id);
bool isHiddenPhysicalCamera(const std::string& cameraId);
+
+ static const float kDepthARTolerance;
private:
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
@@ -274,6 +282,9 @@
wp<StatusListener> mListener;
ServiceInteractionProxy* mServiceProxy;
+ // Current overall Android device physical status
+ android::hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+
// mProviderLifecycleLock is locked during onRegistration and removeProvider
mutable std::mutex mProviderLifecycleLock;
@@ -300,10 +311,14 @@
{
const std::string mProviderName;
const metadata_vendor_id_t mProviderTagid;
+ int mMinorVersion;
sp<VendorTagDescriptor> mVendorTagDescriptor;
bool mSetTorchModeSupported;
bool mIsRemote;
+ // Current overall Android device physical status
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> mDeviceState;
+
// This pointer is used to keep a reference to the ICameraProvider that was last accessed.
wp<hardware::camera::provider::V2_4::ICameraProvider> mActiveInterface;
@@ -313,7 +328,9 @@
CameraProviderManager *manager);
~ProviderInfo();
- status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface);
+ status_t initialize(sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ currentDeviceState);
const sp<hardware::camera::provider::V2_4::ICameraProvider> startProviderInterface();
@@ -342,6 +359,13 @@
*/
status_t setUpVendorTags();
+ /**
+ * Notify provider about top-level device physical state changes
+ */
+ status_t notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ newDeviceState);
+
// Basic device information, common to all camera devices
struct DeviceInfo {
const std::string mName; // Full instance name
@@ -352,6 +376,7 @@
std::vector<std::string> mPhysicalIds;
hardware::CameraInfo mInfo;
sp<IBase> mSavedInterface;
+ bool mIsPublicallyHiddenSecureCamera = false;
const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
@@ -469,9 +494,33 @@
CameraMetadata mCameraCharacteristics;
std::unordered_map<std::string, CameraMetadata> mPhysicalCameraCharacteristics;
void queryPhysicalCameraIds();
+ bool isPublicallyHiddenSecureCamera();
status_t fixupMonochromeTags();
+ status_t addDynamicDepthTags();
+ static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
+ android_pixel_format_t format,
+ std::vector<std::tuple<size_t, size_t>> *sizes /*out*/);
+ void getSupportedDurations( const CameraMetadata& ch, uint32_t tag,
+ android_pixel_format_t format,
+ const std::vector<std::tuple<size_t, size_t>>& sizes,
+ std::vector<int64_t> *durations/*out*/);
+ void getSupportedDynamicDepthDurations(const std::vector<int64_t>& depthDurations,
+ const std::vector<int64_t>& blobDurations,
+ std::vector<int64_t> *dynamicDepthDurations /*out*/);
+ static bool isDepthPhotoLibraryPresent();
+ static void getSupportedDynamicDepthSizes(
+ const std::vector<std::tuple<size_t, size_t>>& blobSizes,
+ const std::vector<std::tuple<size_t, size_t>>& depthSizes,
+ std::vector<std::tuple<size_t, size_t>> *dynamicDepthSizes /*out*/,
+ std::vector<std::tuple<size_t, size_t>> *internalDepthSizes /*out*/);
status_t removeAvailableKeys(CameraMetadata& c, const std::vector<uint32_t>& keys,
uint32_t keyTag);
+ status_t fillHeicStreamCombinations(std::vector<int32_t>* outputs,
+ std::vector<int64_t>* durations,
+ std::vector<int64_t>* stallDurations,
+ const camera_metadata_entry& halStreamConfigs,
+ const camera_metadata_entry& halStreamDurations);
+ status_t deriveHeicTags();
};
private:
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
new file mode 100644
index 0000000..6d96163
--- /dev/null
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.cpp
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-DepthPhotoProcessor"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+//
+
+#include "DepthPhotoProcessor.h"
+
+#include <dynamic_depth/camera.h>
+#include <dynamic_depth/cameras.h>
+#include <dynamic_depth/container.h>
+#include <dynamic_depth/device.h>
+#include <dynamic_depth/dimension.h>
+#include <dynamic_depth/dynamic_depth.h>
+#include <dynamic_depth/point.h>
+#include <dynamic_depth/pose.h>
+#include <dynamic_depth/profile.h>
+#include <dynamic_depth/profiles.h>
+#include <jpeglib.h>
+#include <libexif/exif-data.h>
+#include <libexif/exif-system.h>
+#include <math.h>
+#include <sstream>
+#include <utils/Errors.h>
+#include <utils/ExifUtils.h>
+#include <utils/Log.h>
+#include <xmpmeta/xmp_data.h>
+#include <xmpmeta/xmp_writer.h>
+
+using dynamic_depth::Camera;
+using dynamic_depth::Cameras;
+using dynamic_depth::CameraParams;
+using dynamic_depth::Container;
+using dynamic_depth::DepthFormat;
+using dynamic_depth::DepthMap;
+using dynamic_depth::DepthMapParams;
+using dynamic_depth::DepthUnits;
+using dynamic_depth::Device;
+using dynamic_depth::DeviceParams;
+using dynamic_depth::Dimension;
+using dynamic_depth::Image;
+using dynamic_depth::ImagingModel;
+using dynamic_depth::ImagingModelParams;
+using dynamic_depth::Item;
+using dynamic_depth::Pose;
+using dynamic_depth::Profile;
+using dynamic_depth::Profiles;
+
+namespace android {
+namespace camera3 {
+
+ExifOrientation getExifOrientation(const unsigned char *jpegBuffer, size_t jpegBufferSize) {
+ if ((jpegBuffer == nullptr) || (jpegBufferSize == 0)) {
+ return ExifOrientation::ORIENTATION_UNDEFINED;
+ }
+
+ auto exifData = exif_data_new();
+ exif_data_load_data(exifData, jpegBuffer, jpegBufferSize);
+ ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+ EXIF_TAG_ORIENTATION);
+ if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+ ALOGV("%s: Orientation EXIF entry invalid!", __FUNCTION__);
+ exif_data_unref(exifData);
+ return ExifOrientation::ORIENTATION_0_DEGREES;
+ }
+
+ auto orientationValue = exif_get_short(orientation->data, exif_data_get_byte_order(exifData));
+ ExifOrientation ret;
+ switch (orientationValue) {
+ case ExifOrientation::ORIENTATION_0_DEGREES:
+ case ExifOrientation::ORIENTATION_90_DEGREES:
+ case ExifOrientation::ORIENTATION_180_DEGREES:
+ case ExifOrientation::ORIENTATION_270_DEGREES:
+ ret = static_cast<ExifOrientation> (orientationValue);
+ break;
+ default:
+ ALOGE("%s: Unexpected EXIF orientation value: %d, defaulting to 0 degrees",
+ __FUNCTION__, orientationValue);
+ ret = ExifOrientation::ORIENTATION_0_DEGREES;
+ }
+
+ exif_data_unref(exifData);
+
+ return ret;
+}
+
+status_t encodeGrayscaleJpeg(size_t width, size_t height, uint8_t *in, void *out,
+ const size_t maxOutSize, uint8_t jpegQuality, ExifOrientation exifOrientation,
+ size_t &actualSize) {
+ status_t ret;
+ // libjpeg is a C library so we use C-style "inheritance" by
+ // putting libjpeg's jpeg_destination_mgr first in our custom
+ // struct. This allows us to cast jpeg_destination_mgr* to
+ // CustomJpegDestMgr* when we get it passed to us in a callback.
+ struct CustomJpegDestMgr : public jpeg_destination_mgr {
+ JOCTET *mBuffer;
+ size_t mBufferSize;
+ size_t mEncodedSize;
+ bool mSuccess;
+ } dmgr;
+
+ jpeg_compress_struct cinfo = {};
+ jpeg_error_mgr jerr;
+
+ // Initialize error handling with standard callbacks, but
+ // then override output_message (to print to ALOG) and
+ // error_exit to set a flag and print a message instead
+ // of killing the whole process.
+ cinfo.err = jpeg_std_error(&jerr);
+
+ cinfo.err->output_message = [](j_common_ptr cinfo) {
+ char buffer[JMSG_LENGTH_MAX];
+
+ /* Create the message */
+ (*cinfo->err->format_message)(cinfo, buffer);
+ ALOGE("libjpeg error: %s", buffer);
+ };
+
+ cinfo.err->error_exit = [](j_common_ptr cinfo) {
+ (*cinfo->err->output_message)(cinfo);
+ if(cinfo->client_data) {
+ auto & dmgr = *static_cast<CustomJpegDestMgr*>(cinfo->client_data);
+ dmgr.mSuccess = false;
+ }
+ };
+
+ // Now that we initialized some callbacks, let's create our compressor
+ jpeg_create_compress(&cinfo);
+ dmgr.mBuffer = static_cast<JOCTET*>(out);
+ dmgr.mBufferSize = maxOutSize;
+ dmgr.mEncodedSize = 0;
+ dmgr.mSuccess = true;
+ cinfo.client_data = static_cast<void*>(&dmgr);
+
+ // These lambdas become C-style function pointers and as per C++11 spec
+ // may not capture anything.
+ dmgr.init_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.next_output_byte = dmgr.mBuffer;
+ dmgr.free_in_buffer = dmgr.mBufferSize;
+ ALOGV("%s:%d jpeg start: %p [%zu]",
+ __FUNCTION__, __LINE__, dmgr.mBuffer, dmgr.mBufferSize);
+ };
+
+ dmgr.empty_output_buffer = [](j_compress_ptr cinfo __unused) {
+ ALOGV("%s:%d Out of buffer", __FUNCTION__, __LINE__);
+ return 0;
+ };
+
+ dmgr.term_destination = [](j_compress_ptr cinfo) {
+ auto & dmgr = static_cast<CustomJpegDestMgr&>(*cinfo->dest);
+ dmgr.mEncodedSize = dmgr.mBufferSize - dmgr.free_in_buffer;
+ ALOGV("%s:%d Done with jpeg: %zu", __FUNCTION__, __LINE__, dmgr.mEncodedSize);
+ };
+ cinfo.dest = reinterpret_cast<struct jpeg_destination_mgr*>(&dmgr);
+ cinfo.image_width = width;
+ cinfo.image_height = height;
+ cinfo.input_components = 1;
+ cinfo.in_color_space = JCS_GRAYSCALE;
+
+ // Initialize defaults and then override what we want
+ jpeg_set_defaults(&cinfo);
+
+ jpeg_set_quality(&cinfo, jpegQuality, 1);
+ jpeg_set_colorspace(&cinfo, JCS_GRAYSCALE);
+ cinfo.raw_data_in = 0;
+ cinfo.dct_method = JDCT_IFAST;
+
+ cinfo.comp_info[0].h_samp_factor = 1;
+ cinfo.comp_info[1].h_samp_factor = 1;
+ cinfo.comp_info[2].h_samp_factor = 1;
+ cinfo.comp_info[0].v_samp_factor = 1;
+ cinfo.comp_info[1].v_samp_factor = 1;
+ cinfo.comp_info[2].v_samp_factor = 1;
+
+ jpeg_start_compress(&cinfo, TRUE);
+
+ if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+ std::unique_ptr<ExifUtils> utils(ExifUtils::create());
+ utils->initializeEmpty();
+ utils->setImageWidth(width);
+ utils->setImageHeight(height);
+ utils->setOrientationValue(exifOrientation);
+
+ if (utils->generateApp1()) {
+ const uint8_t* exifBuffer = utils->getApp1Buffer();
+ size_t exifBufferSize = utils->getApp1Length();
+ jpeg_write_marker(&cinfo, JPEG_APP0 + 1, static_cast<const JOCTET*>(exifBuffer),
+ exifBufferSize);
+ } else {
+ ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
+ }
+ }
+
+ for (size_t i = 0; i < cinfo.image_height; i++) {
+ auto currentRow = static_cast<JSAMPROW>(in + i*width);
+ jpeg_write_scanlines(&cinfo, ¤tRow, /*num_lines*/1);
+ }
+
+ jpeg_finish_compress(&cinfo);
+
+ actualSize = dmgr.mEncodedSize;
+ if (dmgr.mSuccess) {
+ ret = NO_ERROR;
+ } else {
+ ret = UNKNOWN_ERROR;
+ }
+
+ return ret;
+}
+
+inline void unpackDepth16(uint16_t value, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ // Android densely packed depth map. The units for the range are in
+ // millimeters and need to be scaled to meters.
+ // The confidence value is encoded in the 3 most significant bits.
+ // The confidence data needs to be additionally normalized with
+ // values 1.0f, 0.0f representing maximum and minimum confidence
+ // respectively.
+ auto point = static_cast<float>(value & 0x1FFF) / 1000.f;
+ points->push_back(point);
+
+ auto conf = (value >> 13) & 0x7;
+ float normConfidence = (conf == 0) ? 1.f : (static_cast<float>(conf) - 1) / 7.f;
+ confidence->push_back(normConfidence);
+
+ if (*near > point) {
+ *near = point;
+ }
+ if (*far < point) {
+ *far = point;
+ }
+}
+
+// Trivial case, read forward from top,left corner.
+void rotate0AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapHeight; i++) {
+ for (size_t j = 0; j < inputFrame.mDepthMapWidth; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 90 degrees CW rotation can be applied by starting to read from bottom, left corner
+// transposing rows and columns.
+void rotate90AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (size_t i = 0; i < inputFrame.mDepthMapWidth; i++) {
+ for (ssize_t j = inputFrame.mDepthMapHeight-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 180 CW degrees rotation can be applied by starting to read backwards from bottom, right corner.
+void rotate180AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapHeight-1; i >= 0; i--) {
+ for (ssize_t j = inputFrame.mDepthMapWidth-1; j >= 0; j--) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[i*inputFrame.mDepthMapStride + j], points,
+ confidence, near, far);
+ }
+ }
+}
+
+// 270 degrees CW rotation can be applied by starting to read from top, right corner
+// transposing rows and columns.
+void rotate270AndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ for (ssize_t i = inputFrame.mDepthMapWidth-1; i >= 0; i--) {
+ for (size_t j = 0; j < inputFrame.mDepthMapHeight; j++) {
+ unpackDepth16(inputFrame.mDepthMapBuffer[j*inputFrame.mDepthMapStride + i], points,
+ confidence, near, far);
+ }
+ }
+}
+
+bool rotateAndUnpack(DepthPhotoInputFrame inputFrame, std::vector<float> *points /*out*/,
+ std::vector<float> *confidence /*out*/, float *near /*out*/, float *far /*out*/) {
+ switch (inputFrame.mOrientation) {
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES:
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES:
+ rotate90AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES:
+ rotate180AndUnpack(inputFrame, points, confidence, near, far);
+ return false;
+ case DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES:
+ rotate270AndUnpack(inputFrame, points, confidence, near, far);
+ return true;
+ default:
+ ALOGE("%s: Unsupported depth photo rotation: %d, default to 0", __FUNCTION__,
+ inputFrame.mOrientation);
+ rotate0AndUnpack(inputFrame, points, confidence, near, far);
+ }
+
+ return false;
+}
+
+std::unique_ptr<dynamic_depth::DepthMap> processDepthMapFrame(DepthPhotoInputFrame inputFrame,
+ ExifOrientation exifOrientation, std::vector<std::unique_ptr<Item>> *items /*out*/,
+ bool *switchDimensions /*out*/) {
+ if ((items == nullptr) || (switchDimensions == nullptr)) {
+ return nullptr;
+ }
+
+ std::vector<float> points, confidence;
+
+ size_t pointCount = inputFrame.mDepthMapWidth * inputFrame.mDepthMapHeight;
+ points.reserve(pointCount);
+ confidence.reserve(pointCount);
+ float near = UINT16_MAX;
+ float far = .0f;
+ *switchDimensions = false;
+ // Physical rotation of depth and confidence maps may be needed in case
+ // the EXIF orientation is set to 0 degrees and the depth photo orientation
+ // (source color image) has some different value.
+ if (exifOrientation == ExifOrientation::ORIENTATION_0_DEGREES) {
+ *switchDimensions = rotateAndUnpack(inputFrame, &points, &confidence, &near, &far);
+ } else {
+ rotate0AndUnpack(inputFrame, &points, &confidence, &near, &far);
+ }
+
+ size_t width = inputFrame.mDepthMapWidth;
+ size_t height = inputFrame.mDepthMapHeight;
+ if (*switchDimensions) {
+ width = inputFrame.mDepthMapHeight;
+ height = inputFrame.mDepthMapWidth;
+ }
+
+ if (near == far) {
+ ALOGE("%s: Near and far range values must not match!", __FUNCTION__);
+ return nullptr;
+ }
+
+ std::vector<uint8_t> pointsQuantized, confidenceQuantized;
+ pointsQuantized.reserve(pointCount); confidenceQuantized.reserve(pointCount);
+ auto pointIt = points.begin();
+ auto confidenceIt = confidence.begin();
+ while ((pointIt != points.end()) && (confidenceIt != confidence.end())) {
+ pointsQuantized.push_back(floorf(((far * (*pointIt - near)) /
+ (*pointIt * (far - near))) * 255.0f));
+ confidenceQuantized.push_back(floorf(*confidenceIt * 255.0f));
+ confidenceIt++; pointIt++;
+ }
+
+ DepthMapParams depthParams(DepthFormat::kRangeInverse, near, far, DepthUnits::kMeters,
+ "android/depthmap");
+ depthParams.confidence_uri = "android/confidencemap";
+ depthParams.mime = "image/jpeg";
+ depthParams.depth_image_data.resize(inputFrame.mMaxJpegSize);
+ depthParams.confidence_data.resize(inputFrame.mMaxJpegSize);
+ size_t actualJpegSize;
+ auto ret = encodeGrayscaleJpeg(width, height, pointsQuantized.data(),
+ depthParams.depth_image_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Depth map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.depth_image_data.resize(actualJpegSize);
+
+ ret = encodeGrayscaleJpeg(width, height, confidenceQuantized.data(),
+ depthParams.confidence_data.data(), inputFrame.mMaxJpegSize,
+ inputFrame.mJpegQuality, exifOrientation, actualJpegSize);
+ if (ret != NO_ERROR) {
+ ALOGE("%s: Confidence map compression failed!", __FUNCTION__);
+ return nullptr;
+ }
+ depthParams.confidence_data.resize(actualJpegSize);
+
+ return DepthMap::FromData(depthParams, items);
+}
+
+extern "C" int processDepthPhotoFrame(DepthPhotoInputFrame inputFrame, size_t depthPhotoBufferSize,
+ void* depthPhotoBuffer /*out*/, size_t* depthPhotoActualSize /*out*/) {
+ if ((inputFrame.mMainJpegBuffer == nullptr) || (inputFrame.mDepthMapBuffer == nullptr) ||
+ (depthPhotoBuffer == nullptr) || (depthPhotoActualSize == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ std::vector<std::unique_ptr<Item>> items;
+ std::vector<std::unique_ptr<Camera>> cameraList;
+ auto image = Image::FromDataForPrimaryImage("android/mainimage", &items);
+ std::unique_ptr<CameraParams> cameraParams(new CameraParams(std::move(image)));
+ if (cameraParams == nullptr) {
+ ALOGE("%s: Failed to initialize camera parameters", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ ExifOrientation exifOrientation = getExifOrientation(
+ reinterpret_cast<const unsigned char*> (inputFrame.mMainJpegBuffer),
+ inputFrame.mMainJpegSize);
+ bool switchDimensions;
+ cameraParams->depth_map = processDepthMapFrame(inputFrame, exifOrientation, &items,
+ &switchDimensions);
+ if (cameraParams->depth_map == nullptr) {
+ ALOGE("%s: Depth map processing failed!", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ // It is not possible to generate an imaging model without instrinsic calibration.
+ if (inputFrame.mIsInstrinsicCalibrationValid) {
+ // The camera intrinsic calibration layout is as follows:
+ // [focalLengthX, focalLengthY, opticalCenterX, opticalCenterY, skew]
+ const dynamic_depth::Point<double> focalLength(inputFrame.mInstrinsicCalibration[0],
+ inputFrame.mInstrinsicCalibration[1]);
+ size_t width = inputFrame.mMainJpegWidth;
+ size_t height = inputFrame.mMainJpegHeight;
+ if (switchDimensions) {
+ width = inputFrame.mMainJpegHeight;
+ height = inputFrame.mMainJpegWidth;
+ }
+ const Dimension imageSize(width, height);
+ ImagingModelParams imagingParams(focalLength, imageSize);
+ imagingParams.principal_point.x = inputFrame.mInstrinsicCalibration[2];
+ imagingParams.principal_point.y = inputFrame.mInstrinsicCalibration[3];
+ imagingParams.skew = inputFrame.mInstrinsicCalibration[4];
+
+ // The camera lens distortion contains the following lens correction coefficients.
+ // [kappa_1, kappa_2, kappa_3 kappa_4, kappa_5]
+ if (inputFrame.mIsLensDistortionValid) {
+ // According to specification the lens distortion coefficients should be ordered
+ // as [1, kappa_4, kappa_1, kappa_5, kappa_2, 0, kappa_3, 0]
+ float distortionData[] = {1.f, inputFrame.mLensDistortion[3],
+ inputFrame.mLensDistortion[0], inputFrame.mLensDistortion[4],
+ inputFrame.mLensDistortion[1], 0.f, inputFrame.mLensDistortion[2], 0.f};
+ auto distortionDataLength = sizeof(distortionData) / sizeof(distortionData[0]);
+ imagingParams.distortion.reserve(distortionDataLength);
+ imagingParams.distortion.insert(imagingParams.distortion.end(), distortionData,
+ distortionData + distortionDataLength);
+ }
+
+ cameraParams->imaging_model = ImagingModel::FromData(imagingParams);
+ }
+
+ if (inputFrame.mIsLogical) {
+ cameraParams->trait = dynamic_depth::CameraTrait::LOGICAL;
+ } else {
+ cameraParams->trait = dynamic_depth::CameraTrait::PHYSICAL;
+ }
+
+ cameraList.emplace_back(Camera::FromData(std::move(cameraParams)));
+
+ auto deviceParams = std::make_unique<DeviceParams> (Cameras::FromCameraArray(&cameraList));
+ deviceParams->container = Container::FromItems(&items);
+ std::vector<std::unique_ptr<Profile>> profileList;
+ profileList.emplace_back(Profile::FromData("DepthPhoto", {0}));
+ deviceParams->profiles = Profiles::FromProfileArray(&profileList);
+ std::unique_ptr<Device> device = Device::FromData(std::move(deviceParams));
+ if (device == nullptr) {
+ ALOGE("%s: Failed to initialize camera device", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ std::istringstream inputJpegStream(
+ std::string(inputFrame.mMainJpegBuffer, inputFrame.mMainJpegSize));
+ std::ostringstream outputJpegStream;
+ if (!WriteImageAndMetadataAndContainer(&inputJpegStream, device.get(), &outputJpegStream)) {
+ ALOGE("%s: Failed writing depth output", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ *depthPhotoActualSize = static_cast<size_t> (outputJpegStream.tellp());
+ if (*depthPhotoActualSize > depthPhotoBufferSize) {
+ ALOGE("%s: Depth photo output buffer not sufficient, needed %zu actual %zu", __FUNCTION__,
+ *depthPhotoActualSize, depthPhotoBufferSize);
+ return NO_MEMORY;
+ }
+
+ memcpy(depthPhotoBuffer, outputJpegStream.str().c_str(), *depthPhotoActualSize);
+
+ return 0;
+}
+
+}; // namespace camera3
+}; // namespace android
diff --git a/services/camera/libcameraservice/common/DepthPhotoProcessor.h b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
new file mode 100644
index 0000000..6a2fbff
--- /dev/null
+++ b/services/camera/libcameraservice/common/DepthPhotoProcessor.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_PROCESSOR_H
+#define ANDROID_SERVERS_CAMERA_CAMERA3_DEPTH_PROCESSOR_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace android {
+namespace camera3 {
+
+enum DepthPhotoOrientation {
+ DEPTH_ORIENTATION_0_DEGREES = 0,
+ DEPTH_ORIENTATION_90_DEGREES = 90,
+ DEPTH_ORIENTATION_180_DEGREES = 180,
+ DEPTH_ORIENTATION_270_DEGREES = 270,
+};
+
+struct DepthPhotoInputFrame {
+ const char* mMainJpegBuffer;
+ size_t mMainJpegSize;
+ size_t mMainJpegWidth, mMainJpegHeight;
+ uint16_t* mDepthMapBuffer;
+ size_t mDepthMapWidth, mDepthMapHeight, mDepthMapStride;
+ size_t mMaxJpegSize;
+ uint8_t mJpegQuality;
+ uint8_t mIsLogical;
+ float mInstrinsicCalibration[5];
+ uint8_t mIsInstrinsicCalibrationValid;
+ float mLensDistortion[5];
+ uint8_t mIsLensDistortionValid;
+ DepthPhotoOrientation mOrientation;
+
+ DepthPhotoInputFrame() :
+ mMainJpegBuffer(nullptr),
+ mMainJpegSize(0),
+ mMainJpegWidth(0),
+ mMainJpegHeight(0),
+ mDepthMapBuffer(nullptr),
+ mDepthMapWidth(0),
+ mDepthMapHeight(0),
+ mDepthMapStride(0),
+ mMaxJpegSize(0),
+ mJpegQuality(100),
+ mIsLogical(0),
+ mInstrinsicCalibration{0.f},
+ mIsInstrinsicCalibrationValid(0),
+ mLensDistortion{0.f},
+ mIsLensDistortionValid(0),
+ mOrientation(DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES) {}
+};
+
+static const char *kDepthPhotoLibrary = "libdepthphoto.so";
+static const char *kDepthPhotoProcessFunction = "processDepthPhotoFrame";
+typedef int (*process_depth_photo_frame) (DepthPhotoInputFrame /*inputFrame*/,
+ size_t /*depthPhotoBufferSize*/, void* /*depthPhotoBuffer out*/,
+ size_t* /*depthPhotoActualSize out*/);
+
+}; // namespace camera3
+}; // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 12fbf82..923d17a 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -885,14 +885,14 @@
return OK;
}
-status_t Camera3Device::capture(CameraMetadata &request, int64_t* /*lastFrameNumber*/) {
+status_t Camera3Device::capture(CameraMetadata &request, int64_t* lastFrameNumber) {
ATRACE_CALL();
List<const PhysicalCameraSettingsList> requestsList;
std::list<const SurfaceMap> surfaceMaps;
convertToRequestList(requestsList, surfaceMaps, request);
- return captureList(requestsList, surfaceMaps, /*lastFrameNumber*/NULL);
+ return captureList(requestsList, surfaceMaps, lastFrameNumber);
}
void Camera3Device::convertToRequestList(List<const PhysicalCameraSettingsList>& requestsList,
@@ -1027,11 +1027,22 @@
return hardware::Void();
}
+ if (outputStream->isAbandoned()) {
+ bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
+ allReqsSucceeds = false;
+ continue;
+ }
+
bufRet.streamId = streamId;
+ size_t handOutBufferCount = outputStream->getOutstandingBuffersCount();
uint32_t numBuffersRequested = bufReq.numBuffersRequested;
- size_t totalHandout = outputStream->getOutstandingBuffersCount() + numBuffersRequested;
- if (totalHandout > outputStream->asHalStream()->max_buffers) {
+ size_t totalHandout = handOutBufferCount + numBuffersRequested;
+ uint32_t maxBuffers = outputStream->asHalStream()->max_buffers;
+ if (totalHandout > maxBuffers) {
// Not able to allocate enough buffer. Exit early for this stream
+ ALOGE("%s: request too much buffers for stream %d: at HAL: %zu + requesting: %d"
+ " > max: %d", __FUNCTION__, streamId, handOutBufferCount,
+ numBuffersRequested, maxBuffers);
bufRet.val.error(StreamBufferRequestError::MAX_BUFFER_EXCEEDED);
allReqsSucceeds = false;
continue;
@@ -1757,18 +1768,20 @@
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
- if (dataSpace != HAL_DATASPACE_DEPTH) {
- blobBufferSize = getJpegBufferSize(width, height);
- if (blobBufferSize <= 0) {
- SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
- return BAD_VALUE;
- }
- } else {
+ if (dataSpace == HAL_DATASPACE_DEPTH) {
blobBufferSize = getPointCloudBufferSize();
if (blobBufferSize <= 0) {
SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
return BAD_VALUE;
}
+ } else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
+ blobBufferSize = width * height;
+ } else {
+ blobBufferSize = getJpegBufferSize(width, height);
+ if (blobBufferSize <= 0) {
+ SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
+ return BAD_VALUE;
+ }
}
newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
width, height, blobBufferSize, format, dataSpace, rotation,
@@ -2138,7 +2151,11 @@
// Pause to reconfigure
status_t Camera3Device::internalPauseAndWaitLocked(nsecs_t maxExpectedDuration) {
- mRequestThread->setPaused(true);
+ if (mRequestThread.get() != nullptr) {
+ mRequestThread->setPaused(true);
+ } else {
+ return NO_INIT;
+ }
ALOGV("%s: Camera %s: Internal wait until idle (% " PRIi64 " ns)", __FUNCTION__, mId.string(),
maxExpectedDuration);
@@ -2184,12 +2201,11 @@
mStatusWaiters++;
- // Notify HAL to start draining. We need to notify the HalInterface layer
- // even when the device is already IDLE, so HalInterface can reject incoming
- // requestStreamBuffers call.
if (!active && mUseHalBufManager) {
auto streamIds = mOutputStreams.getStreamIds();
- mRequestThread->signalPipelineDrain(streamIds);
+ if (mStatus == STATUS_ACTIVE) {
+ mRequestThread->signalPipelineDrain(streamIds);
+ }
mRequestBufferSM.onWaitUntilIdle();
}
@@ -3119,10 +3135,12 @@
status_t res = OK;
if (it != outputSurfaces.end()) {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing, it->second);
+ outputBuffers[i], timestamp, timestampIncreasing, it->second,
+ inResultExtras.frameNumber);
} else {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing);
+ outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
+ inResultExtras.frameNumber);
}
// Note: stream may be deallocated at this point, if this buffer was
@@ -3139,7 +3157,8 @@
// cancel the buffer
camera3_stream_buffer_t sb = outputBuffers[i];
sb.status = CAMERA3_BUFFER_STATUS_ERROR;
- stream->returnBuffer(sb, /*timestamp*/0, timestampIncreasing);
+ stream->returnBuffer(sb, /*timestamp*/0, timestampIncreasing, std::vector<size_t> (),
+ inResultExtras.frameNumber);
// notify client buffer error
sp<NotificationListener> listener;
@@ -3279,7 +3298,8 @@
streamBuffer.stream = halStream;
switch (halStream->stream_type) {
case CAMERA3_STREAM_OUTPUT:
- res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0);
+ res = stream->returnBuffer(streamBuffer, /*timestamp*/ 0,
+ /*timestampIncreasing*/true, std::vector<size_t> (), frameNumber);
if (res != OK) {
ALOGE("%s: Can't return output buffer for frame %d to"
" stream %d: %s (%d)", __FUNCTION__,
@@ -3874,7 +3894,8 @@
bool useHalBufManager) :
mHidlSession(session),
mRequestMetadataQueue(queue),
- mUseHalBufManager(useHalBufManager) {
+ mUseHalBufManager(useHalBufManager),
+ mIsReconfigurationQuerySupported(true) {
// Check with hardware service manager if we can downcast these interfaces
// Somewhat expensive, so cache the results at startup
auto castResult_3_5 = device::V3_5::ICameraDeviceSession::castFrom(mHidlSession);
@@ -3980,6 +4001,52 @@
return res;
}
+bool Camera3Device::HalInterface::isReconfigurationRequired(CameraMetadata& oldSessionParams,
+ CameraMetadata& newSessionParams) {
+ // We do reconfiguration by default;
+ bool ret = true;
+ if ((mHidlSession_3_5 != nullptr) && mIsReconfigurationQuerySupported) {
+ android::hardware::hidl_vec<uint8_t> oldParams, newParams;
+ camera_metadata_t* oldSessioMeta = const_cast<camera_metadata_t*>(
+ oldSessionParams.getAndLock());
+ camera_metadata_t* newSessioMeta = const_cast<camera_metadata_t*>(
+ newSessionParams.getAndLock());
+ oldParams.setToExternal(reinterpret_cast<uint8_t*>(oldSessioMeta),
+ get_camera_metadata_size(oldSessioMeta));
+ newParams.setToExternal(reinterpret_cast<uint8_t*>(newSessioMeta),
+ get_camera_metadata_size(newSessioMeta));
+ hardware::camera::common::V1_0::Status callStatus;
+ bool required;
+ auto hidlCb = [&callStatus, &required] (hardware::camera::common::V1_0::Status s,
+ bool requiredFlag) {
+ callStatus = s;
+ required = requiredFlag;
+ };
+ auto err = mHidlSession_3_5->isReconfigurationRequired(oldParams, newParams, hidlCb);
+ oldSessionParams.unlock(oldSessioMeta);
+ newSessionParams.unlock(newSessioMeta);
+ if (err.isOk()) {
+ switch (callStatus) {
+ case hardware::camera::common::V1_0::Status::OK:
+ ret = required;
+ break;
+ case hardware::camera::common::V1_0::Status::METHOD_NOT_SUPPORTED:
+ mIsReconfigurationQuerySupported = false;
+ ret = true;
+ break;
+ default:
+ ALOGV("%s: Reconfiguration query failed: %d", __FUNCTION__, callStatus);
+ ret = true;
+ }
+ } else {
+ ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, err.description().c_str());
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
status_t Camera3Device::HalInterface::configureStreams(const camera_metadata_t *sessionParams,
camera3_stream_configuration *config, const std::vector<uint32_t>& bufferSizes) {
ATRACE_NAME("CameraHal::configureStreams");
@@ -4495,7 +4562,7 @@
return;
}
- auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter);
+ auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter - 1);
if (!err.isOk()) {
ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
return;
@@ -5091,9 +5158,10 @@
ATRACE_CALL();
bool updatesDetected = false;
+ CameraMetadata updatedParams(mLatestSessionParams);
for (auto tag : mSessionParamKeys) {
camera_metadata_ro_entry entry = settings.find(tag);
- camera_metadata_entry lastEntry = mLatestSessionParams.find(tag);
+ camera_metadata_entry lastEntry = updatedParams.find(tag);
if (entry.count > 0) {
bool isDifferent = false;
@@ -5122,17 +5190,26 @@
if (!skipHFRTargetFPSUpdate(tag, entry, lastEntry)) {
updatesDetected = true;
}
- mLatestSessionParams.update(entry);
+ updatedParams.update(entry);
}
} else if (lastEntry.count > 0) {
// Value has been removed
ALOGV("%s: Session parameter tag id %d removed", __FUNCTION__, tag);
- mLatestSessionParams.erase(tag);
+ updatedParams.erase(tag);
updatesDetected = true;
}
}
- return updatesDetected;
+ bool reconfigureRequired;
+ if (updatesDetected) {
+ reconfigureRequired = mInterface->isReconfigurationRequired(mLatestSessionParams,
+ updatedParams);
+ mLatestSessionParams = updatedParams;
+ } else {
+ reconfigureRequired = false;
+ }
+
+ return reconfigureRequired;
}
bool Camera3Device::RequestThread::threadLoop() {
@@ -5245,6 +5322,11 @@
ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__,
mNextRequests.size());
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != nullptr) {
+ parent->mRequestBufferSM.onSubmittingRequest();
+ }
+
bool submitRequestSuccess = false;
nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
if (mInterface->supportBatchRequest()) {
@@ -5255,13 +5337,6 @@
nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mRequestLatency.add(tRequestStart, tRequestEnd);
- if (submitRequestSuccess) {
- sp<Camera3Device> parent = mParent.promote();
- if (parent != nullptr) {
- parent->mRequestBufferSM.onRequestSubmitted();
- }
- }
-
if (useFlushLock) {
mFlushLock.unlock();
}
@@ -5470,6 +5545,22 @@
}
}
+ {
+ sp<Camera3Device> parent = mParent.promote();
+ if (parent != nullptr) {
+ const String8& streamCameraId = outputStream->getPhysicalCameraId();
+ for (const auto& settings : captureRequest->mSettingsList) {
+ if ((streamCameraId.isEmpty() &&
+ parent->getId() == settings.cameraId.c_str()) ||
+ streamCameraId == settings.cameraId.c_str()) {
+ outputStream->fireBufferRequestForFrameNumber(
+ captureRequest->mResultExtras.frameNumber,
+ settings.metadata);
+ }
+ }
+ }
+ }
+
String8 physicalCameraId = outputStream->getPhysicalCameraId();
if (!physicalCameraId.isEmpty()) {
@@ -5683,16 +5774,21 @@
captureRequest->mInputStream->returnInputBuffer(captureRequest->mInputBuffer);
}
- for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
- //Buffers that failed processing could still have
- //valid acquire fence.
- int acquireFence = (*outputBuffers)[i].acquire_fence;
- if (0 <= acquireFence) {
- close(acquireFence);
- outputBuffers->editItemAt(i).acquire_fence = -1;
+ // No output buffer can be returned when using HAL buffer manager
+ if (!mUseHalBufManager) {
+ for (size_t i = 0; i < halRequest->num_output_buffers; i++) {
+ //Buffers that failed processing could still have
+ //valid acquire fence.
+ int acquireFence = (*outputBuffers)[i].acquire_fence;
+ if (0 <= acquireFence) {
+ close(acquireFence);
+ outputBuffers->editItemAt(i).acquire_fence = -1;
+ }
+ outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+ captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0,
+ /*timestampIncreasing*/true, std::vector<size_t> (),
+ captureRequest->mResultExtras.frameNumber);
}
- outputBuffers->editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
- captureRequest->mOutputStreams.editItemAt(i)->returnBuffer((*outputBuffers)[i], 0);
}
if (sendRequestError) {
@@ -5798,16 +5894,16 @@
if (mPaused == false) {
ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
mPaused = true;
- // Let the tracker know
- sp<StatusTracker> statusTracker = mStatusTracker.promote();
- if (statusTracker != 0) {
- statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
- }
if (mNotifyPipelineDrain) {
mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
mNotifyPipelineDrain = false;
mStreamIdsToBeDrained.clear();
}
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
parent->mRequestBufferSM.onRequestThreadPaused();
@@ -5891,16 +5987,16 @@
if (mPaused == false) {
mPaused = true;
ALOGV("%s: RequestThread: Paused", __FUNCTION__);
- // Let the tracker know
- sp<StatusTracker> statusTracker = mStatusTracker.promote();
- if (statusTracker != 0) {
- statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
- }
if (mNotifyPipelineDrain) {
mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
mNotifyPipelineDrain = false;
mStreamIdsToBeDrained.clear();
}
+ // Let the tracker know
+ sp<StatusTracker> statusTracker = mStatusTracker.promote();
+ if (statusTracker != 0) {
+ statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
+ }
sp<Camera3Device> parent = mParent.promote();
if (parent != nullptr) {
parent->mRequestBufferSM.onRequestThreadPaused();
@@ -6402,9 +6498,11 @@
return;
}
-void Camera3Device::RequestBufferStateMachine::onRequestSubmitted() {
+void Camera3Device::RequestBufferStateMachine::onSubmittingRequest() {
std::lock_guard<std::mutex> lock(mLock);
mRequestThreadPaused = false;
+ // inflight map register actually happens in prepareHalRequest now, but it is close enough
+ // approximation.
mInflightMapEmpty = false;
if (mStatus == RB_STATUS_STOPPED) {
mStatus = RB_STATUS_READY;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e5a38bb..b25d89d 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -309,6 +309,8 @@
status_t close();
void signalPipelineDrain(const std::vector<int>& streamIds);
+ bool isReconfigurationRequired(CameraMetadata& oldSessionParams,
+ CameraMetadata& newSessionParams);
// method to extract buffer's unique ID
// return pair of (newlySeenBuffer?, bufferId)
@@ -401,6 +403,7 @@
uint32_t mNextStreamConfigCounter = 1;
const bool mUseHalBufManager;
+ bool mIsReconfigurationQuerySupported;
};
sp<HalInterface> mInterface;
@@ -1317,7 +1320,7 @@
void onInflightMapEmpty();
// Events triggered by RequestThread
- void onRequestSubmitted();
+ void onSubmittingRequest();
void onRequestThreadPaused();
private:
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 24d1c1b..0571741 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -588,7 +588,11 @@
if (mState != STATE_CONFIGURED) {
ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
__FUNCTION__, mId, mState);
- return INVALID_OPERATION;
+ if (mState == STATE_ABANDONED) {
+ return DEAD_OBJECT;
+ } else {
+ return INVALID_OPERATION;
+ }
}
// Wait for new buffer returned back if we are running into the limit.
@@ -656,7 +660,7 @@
status_t Camera3Stream::returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids) {
+ const std::vector<size_t>& surface_ids, uint64_t frameNumber) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
@@ -687,7 +691,7 @@
*/
status_t res = returnBufferLocked(b, timestamp, surface_ids);
if (res == OK) {
- fireBufferListenersLocked(b, /*acquired*/false, /*output*/true);
+ fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
}
// Even if returning the buffer failed, we still want to signal whoever is waiting for the
@@ -763,8 +767,22 @@
return getInputBufferProducerLocked(producer);
}
+void Camera3Stream::fireBufferRequestForFrameNumber(uint64_t frameNumber,
+ const CameraMetadata& settings) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+
+ for (auto &it : mBufferListenerList) {
+ sp<Camera3StreamBufferListener> listener = it.promote();
+ if (listener.get() != nullptr) {
+ listener->onBufferRequestForFrameNumber(frameNumber, getId(), settings);
+ }
+ }
+}
+
void Camera3Stream::fireBufferListenersLocked(
- const camera3_stream_buffer& buffer, bool acquired, bool output) {
+ const camera3_stream_buffer& buffer, bool acquired, bool output, nsecs_t timestamp,
+ uint64_t frameNumber) {
List<wp<Camera3StreamBufferListener> >::iterator it, end;
// TODO: finish implementing
@@ -773,6 +791,8 @@
Camera3StreamBufferListener::BufferInfo();
info.mOutput = output;
info.mError = (buffer.status == CAMERA3_BUFFER_STATUS_ERROR);
+ info.mFrameNumber = frameNumber;
+ info.mTimestamp = timestamp;
// TODO: rest of fields
for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index ddba9f6..5eb6a23 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -332,7 +332,8 @@
*/
status_t returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids = std::vector<size_t>());
+ const std::vector<size_t>& surface_ids = std::vector<size_t>(),
+ uint64_t frameNumber = 0);
/**
* Fill in the camera3_stream_buffer with the next valid buffer for this
@@ -430,6 +431,12 @@
*/
status_t restoreConfiguredState();
+ /**
+ * Notify buffer stream listeners about incoming request with particular frame number.
+ */
+ void fireBufferRequestForFrameNumber(uint64_t frameNumber,
+ const CameraMetadata& settings) override;
+
protected:
const int mId;
/**
@@ -538,7 +545,7 @@
static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
void fireBufferListenersLocked(const camera3_stream_buffer& buffer,
- bool acquired, bool output);
+ bool acquired, bool output, nsecs_t timestamp = 0, uint64_t frameNumber = 0);
List<wp<Camera3StreamBufferListener> > mBufferListenerList;
status_t cancelPrepareLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 2db333d..d0aee27 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERLISTENER_H
+#include <camera/CameraMetadata.h>
#include <gui/Surface.h>
#include <utils/RefBase.h>
@@ -41,6 +42,9 @@
virtual void onBufferAcquired(const BufferInfo& bufferInfo) = 0;
// Buffer was released by the HAL
virtual void onBufferReleased(const BufferInfo& bufferInfo) = 0;
+ // Notify about incoming buffer request frame number
+ virtual void onBufferRequestForFrameNumber(uint64_t frameNumber, int streamId,
+ const CameraMetadata& settings) = 0;
};
}; //namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index a84720b..5cd11b7 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -18,6 +18,8 @@
#define ANDROID_SERVERS_CAMERA3_STREAM_INTERFACE_H
#include <utils/RefBase.h>
+
+#include <camera/CameraMetadata.h>
#include "Camera3StreamBufferListener.h"
#include "Camera3StreamBufferFreedListener.h"
@@ -259,7 +261,8 @@
*/
virtual status_t returnBuffer(const camera3_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing = true,
- const std::vector<size_t>& surface_ids = std::vector<size_t>()) = 0;
+ const std::vector<size_t>& surface_ids = std::vector<size_t>(),
+ uint64_t frameNumber = 0) = 0;
/**
* Fill in the camera3_stream_buffer with the next valid buffer for this
@@ -341,6 +344,12 @@
* Camera3Stream.
*/
virtual void setBufferFreedListener(wp<Camera3StreamBufferFreedListener> listener) = 0;
+
+ /**
+ * Notify buffer stream listeners about incoming request with particular frame number.
+ */
+ virtual void fireBufferRequestForFrameNumber(uint64_t frameNumber,
+ const CameraMetadata& settings) = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/hidl/HidlCameraService.cpp b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
index 48f1d37..74cfe42 100644
--- a/services/camera/libcameraservice/hidl/HidlCameraService.cpp
+++ b/services/camera/libcameraservice/hidl/HidlCameraService.cpp
@@ -182,7 +182,8 @@
}
}
std::vector<hardware::CameraStatus> cameraStatusAndIds{};
- binder::Status serviceRet = mAidlICameraService->addListener(csListener, &cameraStatusAndIds);
+ binder::Status serviceRet =
+ mAidlICameraService->addListenerHelper(csListener, &cameraStatusAndIds, true);
HStatus status = HStatus::NO_ERROR;
if (!serviceRet.isOk()) {
ALOGE("%s: Unable to add camera device status listener", __FUNCTION__);
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index ad9963a..b4e7c32 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -27,14 +27,19 @@
libcamera_client \
libcamera_metadata \
libutils \
+ libjpeg \
+ libexif \
android.hardware.camera.common@1.0 \
android.hardware.camera.provider@2.4 \
+ android.hardware.camera.provider@2.5 \
android.hardware.camera.device@1.0 \
android.hardware.camera.device@3.2 \
android.hardware.camera.device@3.4
LOCAL_C_INCLUDES += \
system/media/private/camera/include \
+ external/dynamic_depth/includes \
+ external/dynamic_depth/internal \
LOCAL_CFLAGS += -Wall -Wextra -Werror
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index 0086c6c..f47e5a5 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -33,6 +33,7 @@
using android::hardware::camera::common::V1_0::CameraMetadataType;
using android::hardware::camera::device::V3_2::ICameraDeviceCallback;
using android::hardware::camera::device::V3_2::ICameraDeviceSession;
+using android::hardware::camera::provider::V2_5::DeviceState;
/**
* Basic test implementation of a camera ver. 3.2 device interface
@@ -87,7 +88,7 @@
/**
* Basic test implementation of a camera provider
*/
-struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
+struct TestICameraProvider : virtual public provider::V2_5::ICameraProvider {
sp<provider::V2_4::ICameraProviderCallback> mCallbacks;
std::vector<hardware::hidl_string> mDeviceNames;
sp<device::V3_2::ICameraDevice> mDeviceInterface;
@@ -101,6 +102,7 @@
virtual hardware::Return<Status> setCallback(
const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
+ mCalledCounter[SET_CALLBACK]++;
mCallbacks = callbacks;
return hardware::Return<Status>(Status::OK);
}
@@ -108,6 +110,7 @@
using getVendorTags_cb = std::function<void(Status status,
const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+ mCalledCounter[GET_VENDOR_TAGS]++;
_hidl_cb(Status::OK, mVendorTagSections);
return hardware::Void();
}
@@ -117,6 +120,7 @@
bool support)>;
virtual ::hardware::Return<void> isSetTorchModeSupported(
isSetTorchModeSupported_cb _hidl_cb) override {
+ mCalledCounter[IS_SET_TORCH_MODE_SUPPORTED]++;
_hidl_cb(Status::OK, false);
return hardware::Void();
}
@@ -124,6 +128,7 @@
using getCameraIdList_cb = std::function<void(Status status,
const hardware::hidl_vec<hardware::hidl_string>& cameraDeviceNames)>;
virtual hardware::Return<void> getCameraIdList(getCameraIdList_cb _hidl_cb) override {
+ mCalledCounter[GET_CAMERA_ID_LIST]++;
_hidl_cb(Status::OK, mDeviceNames);
return hardware::Void();
}
@@ -148,6 +153,25 @@
return hardware::Void();
}
+ virtual hardware::Return<void> notifyDeviceStateChange(
+ hardware::hidl_bitfield<DeviceState> newState) override {
+ mCalledCounter[NOTIFY_DEVICE_STATE]++;
+ mCurrentState = newState;
+ return hardware::Void();
+ }
+
+ enum MethodNames {
+ SET_CALLBACK,
+ GET_VENDOR_TAGS,
+ IS_SET_TORCH_MODE_SUPPORTED,
+ NOTIFY_DEVICE_STATE,
+ GET_CAMERA_ID_LIST,
+
+ METHOD_NAME_COUNT
+ };
+ int mCalledCounter[METHOD_NAME_COUNT] {0};
+
+ hardware::hidl_bitfield<DeviceState> mCurrentState = 0xFFFFFFFF; // Unlikely to be a real state
};
/**
@@ -209,11 +233,26 @@
res = providerManager->initialize(statusListener, &serviceProxy);
ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+ // Check that both "legacy" and "external" providers (really the same object) are called
+ // once for all the init methods
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], 2) <<
+ "Only one call to setCallback per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], 2) <<
+ "Only one call to getVendorTags per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED], 2) <<
+ "Only one call to isSetTorchModeSupported per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], 2) <<
+ "Only one call to getCameraIdList per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], 2) <<
+ "Only one call to notifyDeviceState per provider expected during init";
std::string legacyInstanceName = "legacy/0";
std::string externalInstanceName = "external/0";
bool gotLegacy = false;
bool gotExternal = false;
+ EXPECT_EQ(2u, serviceProxy.mLastRequestedServiceNames.size()) <<
+ "Only two service queries expected to be seen by hardware service manager";
+
for (auto& serviceName : serviceProxy.mLastRequestedServiceNames) {
if (serviceName == legacyInstanceName) gotLegacy = true;
if (serviceName == externalInstanceName) gotExternal = true;
@@ -375,3 +414,35 @@
metadataCopy.dump(1, 2);
secondMetadata.dump(1, 2);
}
+
+TEST(CameraProviderManagerTest, NotifyStateChangeTest) {
+ std::vector<hardware::hidl_string> deviceNames {
+ "device@3.2/test/0",
+ "device@1.0/test/0",
+ "device@3.2/test/1"};
+
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ status_t res;
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+ serviceProxy.setProvider(provider);
+
+ res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ ASSERT_EQ(provider->mCurrentState,
+ static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::NORMAL))
+ << "Initial device state not set";
+
+ res = providerManager->notifyDeviceStateChange(
+ static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::FOLDED));
+
+ ASSERT_EQ(res, OK) << "Unable to call notifyDeviceStateChange";
+ ASSERT_EQ(provider->mCurrentState,
+ static_cast<hardware::hidl_bitfield<DeviceState>>(DeviceState::FOLDED))
+ << "Unable to change device state";
+
+}
diff --git a/services/camera/libcameraservice/tests/DepthProcessorTest.cpp b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
new file mode 100644
index 0000000..2162514
--- /dev/null
+++ b/services/camera/libcameraservice/tests/DepthProcessorTest.cpp
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "DepthProcessorTest"
+
+#include <array>
+#include <random>
+
+#include <dlfcn.h>
+#include <gtest/gtest.h>
+
+#include "../common/DepthPhotoProcessor.h"
+#include "../utils/ExifUtils.h"
+#include "NV12Compressor.h"
+
+using namespace android;
+using namespace android::camera3;
+
+static const size_t kTestBufferWidth = 640;
+static const size_t kTestBufferHeight = 480;
+static const size_t kTestBufferNV12Size ((((kTestBufferWidth) * (kTestBufferHeight)) * 3) / 2);
+static const size_t kTestBufferDepthSize (kTestBufferWidth * kTestBufferHeight);
+static const size_t kSeed = 1234;
+
+void linkToDepthPhotoLibrary(void **libHandle /*out*/,
+ process_depth_photo_frame *processFrameFunc /*out*/) {
+ ASSERT_NE(libHandle, nullptr);
+ ASSERT_NE(processFrameFunc, nullptr);
+
+ *libHandle = dlopen(kDepthPhotoLibrary, RTLD_NOW | RTLD_LOCAL);
+ if (*libHandle != nullptr) {
+ *processFrameFunc = reinterpret_cast<camera3::process_depth_photo_frame> (
+ dlsym(*libHandle, kDepthPhotoProcessFunction));
+ ASSERT_NE(*processFrameFunc, nullptr);
+ }
+}
+
+void generateColorJpegBuffer(int jpegQuality, ExifOrientation orientationValue, bool includeExif,
+ bool switchDimensions, std::vector<uint8_t> *colorJpegBuffer /*out*/) {
+ ASSERT_NE(colorJpegBuffer, nullptr);
+
+ std::array<uint8_t, kTestBufferNV12Size> colorSourceBuffer;
+ std::default_random_engine gen(kSeed);
+ std::uniform_int_distribution<int> uniDist(0, UINT8_MAX - 1);
+ for (size_t i = 0; i < colorSourceBuffer.size(); i++) {
+ colorSourceBuffer[i] = uniDist(gen);
+ }
+
+ size_t width = kTestBufferWidth;
+ size_t height = kTestBufferHeight;
+ if (switchDimensions) {
+ width = kTestBufferHeight;
+ height = kTestBufferWidth;
+ }
+
+ NV12Compressor jpegCompressor;
+ if (includeExif) {
+ ASSERT_TRUE(jpegCompressor.compressWithExifOrientation(
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality, orientationValue));
+ } else {
+ ASSERT_TRUE(jpegCompressor.compress(
+ reinterpret_cast<const unsigned char*> (colorSourceBuffer.data()), width, height,
+ jpegQuality));
+ }
+
+ *colorJpegBuffer = std::move(jpegCompressor.getCompressedData());
+ ASSERT_FALSE(colorJpegBuffer->empty());
+}
+
+void generateDepth16Buffer(std::array<uint16_t, kTestBufferDepthSize> *depth16Buffer /*out*/) {
+ ASSERT_NE(depth16Buffer, nullptr);
+ std::default_random_engine gen(kSeed+1);
+ std::uniform_int_distribution<int> uniDist(0, UINT16_MAX - 1);
+ for (size_t i = 0; i < depth16Buffer->size(); i++) {
+ (*depth16Buffer)[i] = uniDist(gen);
+ }
+}
+
+TEST(DepthProcessorTest, LinkToLibray) {
+ void *libHandle;
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle != nullptr) {
+ dlclose(libHandle);
+ }
+}
+
+TEST(DepthProcessorTest, BadInput) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ DepthPhotoInputFrame inputFrame;
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), nullptr,
+ &actualDepthPhotoSize), 0);
+
+ ASSERT_NE(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(), nullptr),
+ 0);
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, BasicDepthPhotoValidation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, ExifOrientation::ORIENTATION_UNDEFINED,
+ /*includeExif*/ false, /*switchDimensions*/ false, &colorJpegBuffer);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) && (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ // The final depth photo must consist of three jpeg images:
+ // - the main color image
+ // - the depth map image
+ // - the confidence map image
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDepthPhotoExifOrientation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ ExifOrientation exifOrientations[] = { ExifOrientation::ORIENTATION_UNDEFINED,
+ ExifOrientation::ORIENTATION_0_DEGREES, ExifOrientation::ORIENTATION_90_DEGREES,
+ ExifOrientation::ORIENTATION_180_DEGREES, ExifOrientation::ORIENTATION_270_DEGREES };
+ for (auto exifOrientation : exifOrientations) {
+ std::vector<uint8_t> colorJpegBuffer;
+ generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+ /*switchDimensions*/ false, &colorJpegBuffer);
+ if (exifOrientation != ExifOrientation::ORIENTATION_UNDEFINED) {
+ auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(),
+ colorJpegBuffer.size(), &jpegExifOrientation), OK);
+ ASSERT_EQ(exifOrientation, jpegExifOrientation);
+ }
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+ (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+ size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+ //Depth and confidence images must have the same EXIF orientation as the source
+ auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthJpegExifOrientation), OK);
+ if (exifOrientation == ORIENTATION_UNDEFINED) {
+ // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+ // depth map.
+ ASSERT_EQ(depthJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+ } else {
+ ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+ }
+
+ auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize,
+ confidenceMapSize, &confidenceJpegExifOrientation), OK);
+ if (exifOrientation == ORIENTATION_UNDEFINED) {
+ // In case of undefined or missing EXIF orientation, always expect 0 degrees in the
+ // confidence map.
+ ASSERT_EQ(confidenceJpegExifOrientation, ExifOrientation::ORIENTATION_0_DEGREES);
+ } else {
+ ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+ }
+ }
+
+ dlclose(libHandle);
+}
+
+TEST(DepthProcessorTest, TestDephtPhotoPhysicalRotation) {
+ void *libHandle;
+ int jpegQuality = 95;
+
+ process_depth_photo_frame processFunc;
+ linkToDepthPhotoLibrary(&libHandle, &processFunc);
+ if (libHandle == nullptr) {
+ // Depth library no present, nothing more to test.
+ return;
+ }
+
+ // In case of physical rotation, the EXIF orientation must always be 0.
+ auto exifOrientation = ExifOrientation::ORIENTATION_0_DEGREES;
+ DepthPhotoOrientation depthOrientations[] = {
+ DepthPhotoOrientation::DEPTH_ORIENTATION_0_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_180_DEGREES,
+ DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES };
+ for (auto depthOrientation : depthOrientations) {
+ std::vector<uint8_t> colorJpegBuffer;
+ bool switchDimensions = false;
+ size_t expectedWidth = kTestBufferWidth;
+ size_t expectedHeight = kTestBufferHeight;
+ if ((depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_90_DEGREES) ||
+ (depthOrientation == DepthPhotoOrientation::DEPTH_ORIENTATION_270_DEGREES)) {
+ switchDimensions = true;
+ expectedWidth = kTestBufferHeight;
+ expectedHeight = kTestBufferWidth;
+ }
+ generateColorJpegBuffer(jpegQuality, exifOrientation, /*includeExif*/ true,
+ switchDimensions, &colorJpegBuffer);
+ auto jpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(colorJpegBuffer.data(), colorJpegBuffer.size(),
+ &jpegExifOrientation), OK);
+ ASSERT_EQ(exifOrientation, jpegExifOrientation);
+
+ std::array<uint16_t, kTestBufferDepthSize> depth16Buffer;
+ generateDepth16Buffer(&depth16Buffer);
+
+ DepthPhotoInputFrame inputFrame;
+ inputFrame.mMainJpegBuffer = reinterpret_cast<const char*> (colorJpegBuffer.data());
+ inputFrame.mMainJpegSize = colorJpegBuffer.size();
+ // Worst case both depth and confidence maps have the same size as the main color image.
+ inputFrame.mMaxJpegSize = inputFrame.mMainJpegSize * 3;
+ inputFrame.mMainJpegWidth = kTestBufferWidth;
+ inputFrame.mMainJpegHeight = kTestBufferHeight;
+ inputFrame.mJpegQuality = jpegQuality;
+ inputFrame.mDepthMapBuffer = depth16Buffer.data();
+ inputFrame.mDepthMapWidth = inputFrame.mDepthMapStride = kTestBufferWidth;
+ inputFrame.mDepthMapHeight = kTestBufferHeight;
+ inputFrame.mOrientation = depthOrientation;
+
+ std::vector<uint8_t> depthPhotoBuffer(inputFrame.mMaxJpegSize);
+ size_t actualDepthPhotoSize = 0;
+ ASSERT_EQ(processFunc(inputFrame, depthPhotoBuffer.size(), depthPhotoBuffer.data(),
+ &actualDepthPhotoSize), 0);
+ ASSERT_TRUE((actualDepthPhotoSize > 0) &&
+ (depthPhotoBuffer.size() >= actualDepthPhotoSize));
+
+ size_t mainJpegSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data(), actualDepthPhotoSize,
+ &mainJpegSize), OK);
+ ASSERT_TRUE((mainJpegSize > 0) && (mainJpegSize < actualDepthPhotoSize));
+ size_t depthMapSize = 0;
+ ASSERT_EQ(NV12Compressor::findJpegSize(depthPhotoBuffer.data() + mainJpegSize,
+ actualDepthPhotoSize - mainJpegSize, &depthMapSize), OK);
+ ASSERT_TRUE((depthMapSize > 0) && (depthMapSize < (actualDepthPhotoSize - mainJpegSize)));
+ size_t confidenceMapSize = actualDepthPhotoSize - (mainJpegSize + depthMapSize);
+
+ //Depth and confidence images must have the same EXIF orientation as the source
+ auto depthJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthJpegExifOrientation), OK);
+ ASSERT_EQ(depthJpegExifOrientation, exifOrientation);
+ size_t depthMapWidth, depthMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(depthPhotoBuffer.data() + mainJpegSize,
+ depthMapSize, &depthMapWidth, &depthMapHeight), OK);
+ ASSERT_EQ(depthMapWidth, expectedWidth);
+ ASSERT_EQ(depthMapHeight, expectedHeight);
+
+ auto confidenceJpegExifOrientation = ExifOrientation::ORIENTATION_UNDEFINED;
+ ASSERT_EQ(NV12Compressor::getExifOrientation(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceJpegExifOrientation), OK);
+ ASSERT_EQ(confidenceJpegExifOrientation, exifOrientation);
+ size_t confidenceMapWidth, confidenceMapHeight;
+ ASSERT_EQ(NV12Compressor::getJpegImageDimensions(
+ depthPhotoBuffer.data() + mainJpegSize + depthMapSize, confidenceMapSize,
+ &confidenceMapWidth, &confidenceMapHeight), OK);
+ ASSERT_EQ(confidenceMapWidth, expectedWidth);
+ ASSERT_EQ(confidenceMapHeight, expectedHeight);
+ }
+
+ dlclose(libHandle);
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.cpp b/services/camera/libcameraservice/tests/NV12Compressor.cpp
new file mode 100644
index 0000000..0a41a1f
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.cpp
@@ -0,0 +1,379 @@
+/*
+* Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "Test_NV12Compressor"
+
+#include "NV12Compressor.h"
+
+#include <libexif/exif-data.h>
+#include <netinet/in.h>
+
+using namespace android;
+using namespace android::camera3;
+
+namespace std {
+template <>
+struct default_delete<ExifEntry> {
+ inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+template <>
+struct default_delete<ExifData> {
+ inline void operator()(ExifData* data) const { exif_data_unref(data); }
+};
+
+} // namespace std
+
+bool NV12Compressor::compress(const unsigned char* data, int width, int height, int quality) {
+ if (!configureCompressor(width, height, quality)) {
+ // the method will have logged a more detailed error message than we can
+ // provide here so just return.
+ return false;
+ }
+
+ return compressData(data, /*exifData*/ nullptr);
+}
+
+bool NV12Compressor::compressWithExifOrientation(const unsigned char* data, int width, int height,
+ int quality, android::camera3::ExifOrientation exifValue) {
+ std::unique_ptr<ExifData> exifData(exif_data_new());
+ if (exifData.get() == nullptr) {
+ return false;
+ }
+
+ exif_data_set_option(exifData.get(), EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exifData.get(), EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exifData.get(), EXIF_BYTE_ORDER_INTEL);
+ std::unique_ptr<ExifEntry> exifEntry(exif_entry_new());
+ if (exifEntry.get() == nullptr) {
+ return false;
+ }
+
+ exifEntry->tag = EXIF_TAG_ORIENTATION;
+ exif_content_add_entry(exifData->ifd[EXIF_IFD_0], exifEntry.get());
+ exif_entry_initialize(exifEntry.get(), exifEntry->tag);
+ exif_set_short(exifEntry->data, EXIF_BYTE_ORDER_INTEL, exifValue);
+
+ if (!configureCompressor(width, height, quality)) {
+ return false;
+ }
+
+ return compressData(data, exifData.get());
+}
+
+const std::vector<uint8_t>& NV12Compressor::getCompressedData() const {
+ return mDestManager.mBuffer;
+}
+
+bool NV12Compressor::configureCompressor(int width, int height, int quality) {
+ mCompressInfo.err = jpeg_std_error(&mErrorManager);
+ // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+ // The compiler will not generate code to destroy them during the return
+ // below so they will leak. Additionally, do not place any calls to libjpeg
+ // that can fail above this line or any error will cause undefined behavior.
+ if (setjmp(mErrorManager.mJumpBuffer)) {
+ // This is where the error handler will jump in case setup fails
+ // The error manager will ALOG an appropriate error message
+ return false;
+ }
+
+ jpeg_create_compress(&mCompressInfo);
+
+ mCompressInfo.image_width = width;
+ mCompressInfo.image_height = height;
+ mCompressInfo.input_components = 3;
+ mCompressInfo.in_color_space = JCS_YCbCr;
+ jpeg_set_defaults(&mCompressInfo);
+
+ jpeg_set_quality(&mCompressInfo, quality, TRUE);
+ // It may seem weird to set color space here again but this will also set
+ // other fields. These fields might be overwritten by jpeg_set_defaults
+ jpeg_set_colorspace(&mCompressInfo, JCS_YCbCr);
+ mCompressInfo.raw_data_in = TRUE;
+ mCompressInfo.dct_method = JDCT_IFAST;
+ // Set sampling factors
+ mCompressInfo.comp_info[0].h_samp_factor = 2;
+ mCompressInfo.comp_info[0].v_samp_factor = 2;
+ mCompressInfo.comp_info[1].h_samp_factor = 1;
+ mCompressInfo.comp_info[1].v_samp_factor = 1;
+ mCompressInfo.comp_info[2].h_samp_factor = 1;
+ mCompressInfo.comp_info[2].v_samp_factor = 1;
+
+ mCompressInfo.dest = &mDestManager;
+
+ return true;
+}
+
+static void deinterleave(const uint8_t* vuPlanar, std::vector<uint8_t>& uRows,
+ std::vector<uint8_t>& vRows, int rowIndex, int width, int height, int stride) {
+ int numRows = (height - rowIndex) / 2;
+ if (numRows > 8) numRows = 8;
+ for (int row = 0; row < numRows; ++row) {
+ int offset = ((rowIndex >> 1) + row) * stride;
+ const uint8_t* vu = vuPlanar + offset;
+ for (int i = 0; i < (width >> 1); ++i) {
+ int index = row * (width >> 1) + i;
+ uRows[index] = vu[1];
+ vRows[index] = vu[0];
+ vu += 2;
+ }
+ }
+}
+
+bool NV12Compressor::compressData(const unsigned char* data, ExifData* exifData) {
+ const uint8_t* y[16];
+ const uint8_t* cb[8];
+ const uint8_t* cr[8];
+ const uint8_t** planes[3] = { y, cb, cr };
+
+ int i, offset;
+ int width = mCompressInfo.image_width;
+ int height = mCompressInfo.image_height;
+ const uint8_t* yPlanar = data;
+ const uint8_t* vuPlanar = data + (width * height);
+ std::vector<uint8_t> uRows(8 * (width >> 1));
+ std::vector<uint8_t> vRows(8 * (width >> 1));
+
+ // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+ // The compiler will not generate code to destroy them during the return
+ // below so they will leak. Additionally, do not place any calls to libjpeg
+ // that can fail above this line or any error will cause undefined behavior.
+ if (setjmp(mErrorManager.mJumpBuffer)) {
+ // This is where the error handler will jump in case compression fails
+ // The error manager will ALOG an appropriate error message
+ return false;
+ }
+
+ jpeg_start_compress(&mCompressInfo, TRUE);
+
+ attachExifData(exifData);
+
+ // process 16 lines of Y and 8 lines of U/V each time.
+ while (mCompressInfo.next_scanline < mCompressInfo.image_height) {
+ //deinterleave u and v
+ deinterleave(vuPlanar, uRows, vRows, mCompressInfo.next_scanline,
+ width, height, width);
+
+ // Jpeg library ignores the rows whose indices are greater than height.
+ for (i = 0; i < 16; i++) {
+ // y row
+ y[i] = yPlanar + (mCompressInfo.next_scanline + i) * width;
+
+ // construct u row and v row
+ if ((i & 1) == 0) {
+ // height and width are both halved because of downsampling
+ offset = (i >> 1) * (width >> 1);
+ cb[i/2] = &uRows[offset];
+ cr[i/2] = &vRows[offset];
+ }
+ }
+ jpeg_write_raw_data(&mCompressInfo, const_cast<JSAMPIMAGE>(planes), 16);
+ }
+
+ jpeg_finish_compress(&mCompressInfo);
+ jpeg_destroy_compress(&mCompressInfo);
+
+ return true;
+}
+
+bool NV12Compressor::attachExifData(ExifData* exifData) {
+ if (exifData == nullptr) {
+ // This is not an error, we don't require EXIF data
+ return true;
+ }
+
+ // Save the EXIF data to memory
+ unsigned char* rawData = nullptr;
+ unsigned int size = 0;
+ exif_data_save_data(exifData, &rawData, &size);
+ if (rawData == nullptr) {
+ ALOGE("Failed to create EXIF data block");
+ return false;
+ }
+
+ jpeg_write_marker(&mCompressInfo, JPEG_APP0 + 1, rawData, size);
+ free(rawData);
+ return true;
+}
+
+NV12Compressor::ErrorManager::ErrorManager() {
+ error_exit = &onJpegError;
+}
+
+void NV12Compressor::ErrorManager::onJpegError(j_common_ptr cinfo) {
+ // NOTE! Do not construct any non-trivial objects in this method at the top
+ // scope. Their destructors will not be called. If you do need such an
+ // object create a local scope that does not include the longjmp call,
+ // that ensures the object is destroyed before longjmp is called.
+ ErrorManager* errorManager = reinterpret_cast<ErrorManager*>(cinfo->err);
+
+ // Format and log error message
+ char errorMessage[JMSG_LENGTH_MAX];
+ (*errorManager->format_message)(cinfo, errorMessage);
+ errorMessage[sizeof(errorMessage) - 1] = '\0';
+ ALOGE("JPEG compression error: %s", errorMessage);
+ jpeg_destroy(cinfo);
+
+ // And through the looking glass we go
+ longjmp(errorManager->mJumpBuffer, 1);
+}
+
+NV12Compressor::DestinationManager::DestinationManager() {
+ init_destination = &initDestination;
+ empty_output_buffer = &emptyOutputBuffer;
+ term_destination = &termDestination;
+}
+
+void NV12Compressor::DestinationManager::initDestination(j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Start out with some arbitrary but not too large buffer size
+ manager->mBuffer.resize(16 * 1024);
+ manager->next_output_byte = &manager->mBuffer[0];
+ manager->free_in_buffer = manager->mBuffer.size();
+}
+
+boolean NV12Compressor::DestinationManager::emptyOutputBuffer(
+ j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Keep doubling the size of the buffer for a very low, amortized
+ // performance cost of the allocations
+ size_t oldSize = manager->mBuffer.size();
+ manager->mBuffer.resize(oldSize * 2);
+ manager->next_output_byte = &manager->mBuffer[oldSize];
+ manager->free_in_buffer = manager->mBuffer.size() - oldSize;
+ return manager->free_in_buffer != 0;
+}
+
+void NV12Compressor::DestinationManager::termDestination(j_compress_ptr cinfo) {
+ auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+ // Resize down to the exact size of the output, that is remove as many
+ // bytes as there are left in the buffer
+ manager->mBuffer.resize(manager->mBuffer.size() - manager->free_in_buffer);
+}
+
+status_t NV12Compressor::findJpegSize(uint8_t *jpegBuffer, size_t maxSize, size_t *size /*out*/) {
+ if ((size == nullptr) || (jpegBuffer == nullptr)) {
+ return BAD_VALUE;
+ }
+
+ if (checkJpegStart(jpegBuffer) == 0) {
+ return BAD_VALUE;
+ }
+
+ // Read JFIF segment markers, skip over segment data
+ *size = kMarkerLength; //jump to Start Of Image
+ while (*size <= maxSize - kMarkerLength) {
+ segment_t *segment = (segment_t*)(jpegBuffer + *size);
+ uint8_t type = checkJpegMarker(segment->marker);
+ if (type == 0) { // invalid marker, no more segments, begin JPEG data
+ break;
+ }
+ if (type == kEndOfImage || *size > maxSize - sizeof(segment_t)) {
+ return BAD_VALUE;
+ }
+
+ size_t length = ntohs(segment->length);
+ *size += length + kMarkerLength;
+ }
+
+ // Find End of Image
+ // Scan JPEG buffer until End of Image
+ bool foundEnd = false;
+ for ( ; *size <= maxSize - kMarkerLength; (*size)++) {
+ if (checkJpegEnd(jpegBuffer + *size)) {
+ foundEnd = true;
+ *size += kMarkerLength;
+ break;
+ }
+ }
+
+ if (!foundEnd) {
+ return BAD_VALUE;
+ }
+
+ if (*size > maxSize) {
+ *size = maxSize;
+ }
+
+ return OK;
+}
+
+status_t NV12Compressor::getJpegImageDimensions(uint8_t *jpegBuffer,
+ size_t jpegBufferSize, size_t *width /*out*/, size_t *height /*out*/) {
+ if ((jpegBuffer == nullptr) || (width == nullptr) || (height == nullptr) ||
+ (jpegBufferSize == 0u)) {
+ return BAD_VALUE;
+ }
+
+ // Scan JPEG buffer until Start of Frame
+ bool foundSOF = false;
+ size_t currentPos;
+ for (currentPos = 0; currentPos <= jpegBufferSize - kMarkerLength; currentPos++) {
+ if (checkStartOfFrame(jpegBuffer + currentPos)) {
+ foundSOF = true;
+ currentPos += kMarkerLength;
+ break;
+ }
+ }
+
+ if (!foundSOF) {
+ ALOGE("%s: Start of Frame not found", __func__);
+ return BAD_VALUE;
+ }
+
+ sof_t *startOfFrame = reinterpret_cast<sof_t *> (jpegBuffer + currentPos);
+ *width = ntohs(startOfFrame->width);
+ *height = ntohs(startOfFrame->height);
+
+ return OK;
+}
+
+status_t NV12Compressor::getExifOrientation(uint8_t *jpegBuffer, size_t jpegBufferSize,
+ ExifOrientation *exifValue /*out*/) {
+ if ((jpegBuffer == nullptr) || (exifValue == nullptr) || (jpegBufferSize == 0u)) {
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<ExifData> exifData(exif_data_new());
+ exif_data_load_data(exifData.get(), jpegBuffer, jpegBufferSize);
+ ExifEntry *orientation = exif_content_get_entry(exifData->ifd[EXIF_IFD_0],
+ EXIF_TAG_ORIENTATION);
+ if ((orientation == nullptr) || (orientation->size != sizeof(ExifShort))) {
+ return BAD_VALUE;
+ }
+
+ auto orientationValue = exif_get_short(orientation->data,
+ exif_data_get_byte_order(exifData.get()));
+ status_t ret;
+ switch (orientationValue) {
+ case ExifOrientation::ORIENTATION_0_DEGREES:
+ case ExifOrientation::ORIENTATION_90_DEGREES:
+ case ExifOrientation::ORIENTATION_180_DEGREES:
+ case ExifOrientation::ORIENTATION_270_DEGREES:
+ *exifValue = static_cast<ExifOrientation> (orientationValue);
+ ret = OK;
+ break;
+ default:
+ ALOGE("%s: Unexpected EXIF orientation value: %u", __FUNCTION__, orientationValue);
+ ret = BAD_VALUE;
+ }
+
+ return ret;
+}
diff --git a/services/camera/libcameraservice/tests/NV12Compressor.h b/services/camera/libcameraservice/tests/NV12Compressor.h
new file mode 100644
index 0000000..ee22d5e
--- /dev/null
+++ b/services/camera/libcameraservice/tests/NV12Compressor.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+#define TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
+#include <setjmp.h>
+#include <stdlib.h>
+extern "C" {
+#include <jpeglib.h>
+#include <jerror.h>
+}
+
+#include <utils/Errors.h>
+#include <vector>
+
+#include "../utils/ExifUtils.h"
+
+struct _ExifData;
+typedef _ExifData ExifData;
+
+class NV12Compressor {
+public:
+ NV12Compressor() {}
+
+ /* Compress |data| which represents raw NV21 encoded data of dimensions
+ * |width| * |height|.
+ */
+ bool compress(const unsigned char* data, int width, int height, int quality);
+ bool compressWithExifOrientation(const unsigned char* data, int width, int height, int quality,
+ android::camera3::ExifOrientation exifValue);
+
+ /* Get a reference to the compressed data, this will return an empty vector
+ * if compress has not been called yet
+ */
+ const std::vector<unsigned char>& getCompressedData() const;
+
+ // Utility methods
+ static android::status_t findJpegSize(uint8_t *jpegBuffer, size_t maxSize,
+ size_t *size /*out*/);
+
+ static android::status_t getExifOrientation(uint8_t *jpegBuffer,
+ size_t jpegBufferSize, android::camera3::ExifOrientation *exifValue /*out*/);
+
+ /* Get Jpeg image dimensions from the first Start Of Frame. Please note that due to the
+ * way the jpeg buffer is scanned if the image contains a thumbnail, then the size returned
+ * will be of the thumbnail and not the main image.
+ */
+ static android::status_t getJpegImageDimensions(uint8_t *jpegBuffer, size_t jpegBufferSize,
+ size_t *width /*out*/, size_t *height /*out*/);
+
+private:
+
+ struct DestinationManager : jpeg_destination_mgr {
+ DestinationManager();
+
+ static void initDestination(j_compress_ptr cinfo);
+ static boolean emptyOutputBuffer(j_compress_ptr cinfo);
+ static void termDestination(j_compress_ptr cinfo);
+
+ std::vector<unsigned char> mBuffer;
+ };
+
+ struct ErrorManager : jpeg_error_mgr {
+ ErrorManager();
+
+ static void onJpegError(j_common_ptr cinfo);
+
+ jmp_buf mJumpBuffer;
+ };
+
+ static const size_t kMarkerLength = 2; // length of a marker
+ static const uint8_t kMarker = 0xFF; // First byte of marker
+ static const uint8_t kStartOfImage = 0xD8; // Start of Image
+ static const uint8_t kEndOfImage = 0xD9; // End of Image
+ static const uint8_t kStartOfFrame = 0xC0; // Start of Frame
+
+ struct __attribute__((packed)) segment_t {
+ uint8_t marker[kMarkerLength];
+ uint16_t length;
+ };
+
+ struct __attribute__((packed)) sof_t {
+ uint16_t length;
+ uint8_t precision;
+ uint16_t height;
+ uint16_t width;
+ };
+
+ // check for start of image marker
+ static bool checkStartOfFrame(uint8_t* buf) {
+ return buf[0] == kMarker && buf[1] == kStartOfFrame;
+ }
+
+ // check for start of image marker
+ static bool checkJpegStart(uint8_t* buf) {
+ return buf[0] == kMarker && buf[1] == kStartOfImage;
+ }
+
+ // check for End of Image marker
+ static bool checkJpegEnd(uint8_t *buf) {
+ return buf[0] == kMarker && buf[1] == kEndOfImage;
+ }
+
+ // check for arbitrary marker, returns marker type (second byte)
+ // returns 0 if no marker found. Note: 0x00 is not a valid marker type
+ static uint8_t checkJpegMarker(uint8_t *buf) {
+ return (buf[0] == kMarker) ? buf[1] : 0;
+ }
+
+ jpeg_compress_struct mCompressInfo;
+ DestinationManager mDestManager;
+ ErrorManager mErrorManager;
+
+ bool configureCompressor(int width, int height, int quality);
+ bool compressData(const unsigned char* data, ExifData* exifData);
+ bool attachExifData(ExifData* exifData);
+};
+
+#endif // TEST_CAMERA_JPEG_STUB_NV12_COMPRESSOR_H
+
diff --git a/services/camera/libcameraservice/utils/ExifUtils.cpp b/services/camera/libcameraservice/utils/ExifUtils.cpp
new file mode 100644
index 0000000..c0afdc1
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ExifUtils.cpp
@@ -0,0 +1,1104 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraServerExifUtils"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <cutils/log.h>
+
+#include <inttypes.h>
+#include <math.h>
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+#include "ExifUtils.h"
+
+extern "C" {
+#include <libexif/exif-data.h>
+}
+
+namespace std {
+
+template <>
+struct default_delete<ExifEntry> {
+ inline void operator()(ExifEntry* entry) const { exif_entry_unref(entry); }
+};
+
+} // namespace std
+
+
+namespace android {
+namespace camera3 {
+
+
+class ExifUtilsImpl : public ExifUtils {
+public:
+ ExifUtilsImpl();
+
+ virtual ~ExifUtilsImpl();
+
+ // Initialize() can be called multiple times. The setting of Exif tags will be
+ // cleared.
+ virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize);
+ virtual bool initializeEmpty();
+
+ // set all known fields from a metadata structure
+ virtual bool setFromMetadata(const CameraMetadata& metadata,
+ const CameraMetadata& staticInfo,
+ const size_t imageWidth,
+ const size_t imageHeight);
+
+ // sets the len aperture.
+ // Returns false if memory allocation fails.
+ virtual bool setAperture(float aperture);
+
+ // sets the color space.
+ // Returns false if memory allocation fails.
+ virtual bool setColorSpace(uint16_t color_space);
+
+ // sets the date and time of image last modified. It takes local time. The
+ // name of the tag is DateTime in IFD0.
+ // Returns false if memory allocation fails.
+ virtual bool setDateTime(const struct tm& t);
+
+ // sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+ // was not used.
+ // Returns false if memory allocation fails.
+ virtual bool setDigitalZoomRatio(
+ uint32_t crop_width, uint32_t crop_height,
+ uint32_t sensor_width, uint32_t sensor_height);
+
+ // Sets the exposure bias.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureBias(int32_t ev,
+ uint32_t ev_step_numerator, uint32_t ev_step_denominator);
+
+ // sets the exposure mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureMode(uint8_t exposure_mode);
+
+ // sets the exposure time, given in seconds.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureTime(float exposure_time);
+
+ // sets the status of flash.
+ // Returns false if memory allocation fails.
+ virtual bool setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode);
+
+ // sets the F number.
+ // Returns false if memory allocation fails.
+ virtual bool setFNumber(float f_number);
+
+ // sets the focal length of lens used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLength(float focal_length);
+
+ // sets the focal length of lens for 35mm film used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLengthIn35mmFilm(float focal_length,
+ float sensor_size_x, float sensor_size_y);
+
+ // sets the altitude in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsAltitude(double altitude);
+
+ // sets the latitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLatitude(double latitude);
+
+ // sets the longitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLongitude(double longitude);
+
+ // sets GPS processing method.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsProcessingMethod(const std::string& method);
+
+ // sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsTimestamp(const struct tm& t);
+
+ // sets the length (number of rows) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageHeight(uint32_t length);
+
+ // sets the width (number of columes) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageWidth(uint32_t width);
+
+ // sets the ISO speed.
+ // Returns false if memory allocation fails.
+ virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings);
+
+ // sets the smallest F number of the lens.
+ // Returns false if memory allocation fails.
+ virtual bool setMaxAperture(float aperture);
+
+ // sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientation(uint16_t degrees);
+
+ // sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientationValue(ExifOrientation orientationValue);
+
+ // sets the shutter speed.
+ // Returns false if memory allocation fails.
+ virtual bool setShutterSpeed(float exposure_time);
+
+ // sets the distance to the subject, given in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setSubjectDistance(float diopters);
+
+ // sets the fractions of seconds for the <DateTime> tag.
+ // Returns false if memory allocation fails.
+ virtual bool setSubsecTime(const std::string& subsec_time);
+
+ // sets the white balance mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setWhiteBalance(uint8_t white_balance);
+
+ // Generates APP1 segment.
+ // Returns false if generating APP1 segment fails.
+ virtual bool generateApp1();
+
+ // Gets buffer of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual const uint8_t* getApp1Buffer();
+
+ // Gets length of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual unsigned int getApp1Length();
+
+ protected:
+ // sets the version of this standard supported.
+ // Returns false if memory allocation fails.
+ virtual bool setExifVersion(const std::string& exif_version);
+
+ // Resets the pointers and memories.
+ virtual void reset();
+
+ // Adds a variable length tag to |exif_data_|. It will remove the original one
+ // if the tag exists.
+ // Returns the entry of the tag. The reference count of returned ExifEntry is
+ // two.
+ virtual std::unique_ptr<ExifEntry> addVariableLengthEntry(ExifIfd ifd,
+ ExifTag tag, ExifFormat format, uint64_t components, unsigned int size);
+
+ // Adds a entry of |tag| in |exif_data_|. It won't remove the original one if
+ // the tag exists.
+ // Returns the entry of the tag. It adds one reference count to returned
+ // ExifEntry.
+ virtual std::unique_ptr<ExifEntry> addEntry(ExifIfd ifd, ExifTag tag);
+
+ // Helpe functions to add exif data with different types.
+ virtual bool setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg);
+
+ virtual bool setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg);
+
+ virtual bool setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator,
+ uint32_t denominator, const std::string& msg);
+
+ virtual bool setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator,
+ int32_t denominator, const std::string& msg);
+
+ virtual bool setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string& buffer, const std::string& msg);
+
+ float convertToApex(float val) {
+ return 2.0f * log2f(val);
+ }
+
+ // Destroys the buffer of APP1 segment if exists.
+ virtual void destroyApp1();
+
+ // The Exif data (APP1). Owned by this class.
+ ExifData* exif_data_;
+ // The raw data of APP1 segment. It's allocated by ExifMem in |exif_data_| but
+ // owned by this class.
+ uint8_t* app1_buffer_;
+ // The length of |app1_buffer_|.
+ unsigned int app1_length_;
+
+ // How precise the float-to-rational conversion for EXIF tags would be.
+ const static int kRationalPrecision = 10000;
+};
+
+#define SET_SHORT(ifd, tag, value) \
+ do { \
+ if (setShort(ifd, tag, value, #tag) == false) \
+ return false; \
+ } while (0);
+
+#define SET_LONG(ifd, tag, value) \
+ do { \
+ if (setLong(ifd, tag, value, #tag) == false) \
+ return false; \
+ } while (0);
+
+#define SET_RATIONAL(ifd, tag, numerator, denominator) \
+ do { \
+ if (setRational(ifd, tag, numerator, denominator, #tag) == false) \
+ return false; \
+ } while (0);
+
+#define SET_SRATIONAL(ifd, tag, numerator, denominator) \
+ do { \
+ if (setSRational(ifd, tag, numerator, denominator, #tag) == false) \
+ return false; \
+ } while (0);
+
+#define SET_STRING(ifd, tag, format, buffer) \
+ do { \
+ if (setString(ifd, tag, format, buffer, #tag) == false) \
+ return false; \
+ } while (0);
+
+// This comes from the Exif Version 2.2 standard table 6.
+const char gExifAsciiPrefix[] = {0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0};
+
+static void setLatitudeOrLongitudeData(unsigned char* data, double num) {
+ // Take the integer part of |num|.
+ ExifLong degrees = static_cast<ExifLong>(num);
+ ExifLong minutes = static_cast<ExifLong>(60 * (num - degrees));
+ ExifLong microseconds =
+ static_cast<ExifLong>(3600000000u * (num - degrees - minutes / 60.0));
+ exif_set_rational(data, EXIF_BYTE_ORDER_INTEL, {degrees, 1});
+ exif_set_rational(data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL, {minutes, 1});
+ exif_set_rational(data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {microseconds, 1000000});
+}
+
+ExifUtils *ExifUtils::create() {
+ return new ExifUtilsImpl();
+}
+
+ExifUtils::~ExifUtils() {
+}
+
+ExifUtilsImpl::ExifUtilsImpl()
+ : exif_data_(nullptr), app1_buffer_(nullptr), app1_length_(0) {}
+
+ExifUtilsImpl::~ExifUtilsImpl() {
+ reset();
+}
+
+
+bool ExifUtilsImpl::initialize(const unsigned char *app1Segment, size_t app1SegmentSize) {
+ reset();
+ exif_data_ = exif_data_new_from_data(app1Segment, app1SegmentSize);
+ if (exif_data_ == nullptr) {
+ ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+ return false;
+ }
+ // set the image options.
+ exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+ // set exif version to 2.2.
+ if (!setExifVersion("0220")) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ExifUtilsImpl::initializeEmpty() {
+ reset();
+ exif_data_ = exif_data_new();
+ if (exif_data_ == nullptr) {
+ ALOGE("%s: allocate memory for exif_data_ failed", __FUNCTION__);
+ return false;
+ }
+ // set the image options.
+ exif_data_set_option(exif_data_, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+ exif_data_set_data_type(exif_data_, EXIF_DATA_TYPE_COMPRESSED);
+ exif_data_set_byte_order(exif_data_, EXIF_BYTE_ORDER_INTEL);
+
+ // set exif version to 2.2.
+ if (!setExifVersion("0220")) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ExifUtilsImpl::setAperture(float aperture) {
+ float apexValue = convertToApex(aperture);
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_APERTURE_VALUE,
+ static_cast<uint32_t>(std::round(apexValue * kRationalPrecision)),
+ kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setColorSpace(uint16_t color_space) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_COLOR_SPACE, color_space);
+ return true;
+}
+
+bool ExifUtilsImpl::setDateTime(const struct tm& t) {
+ // The length is 20 bytes including NULL for termination in Exif standard.
+ char str[20];
+ int result = snprintf(str, sizeof(str), "%04i:%02i:%02i %02i:%02i:%02i",
+ t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec);
+ if (result != sizeof(str) - 1) {
+ ALOGW("%s: Input time is invalid", __FUNCTION__);
+ return false;
+ }
+ std::string buffer(str);
+ SET_STRING(EXIF_IFD_0, EXIF_TAG_DATE_TIME, EXIF_FORMAT_ASCII, buffer);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_ORIGINAL, EXIF_FORMAT_ASCII, buffer);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_DATE_TIME_DIGITIZED, EXIF_FORMAT_ASCII, buffer);
+ return true;
+}
+
+bool ExifUtilsImpl::setDigitalZoomRatio(
+ uint32_t crop_width, uint32_t crop_height,
+ uint32_t sensor_width, uint32_t sensor_height) {
+ float zoomRatioX = (crop_width == 0) ? 1.0 : 1.0 * sensor_width / crop_width;
+ float zoomRatioY = (crop_height == 0) ? 1.0 : 1.0 * sensor_height / crop_height;
+ float zoomRatio = std::max(zoomRatioX, zoomRatioY);
+ const static float noZoomThreshold = 1.02f;
+
+ if (zoomRatio <= noZoomThreshold) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_DIGITAL_ZOOM_RATIO, 0, 1);
+ } else {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_DIGITAL_ZOOM_RATIO,
+ static_cast<uint32_t>(std::round(zoomRatio * kRationalPrecision)),
+ kRationalPrecision);
+ }
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureMode(uint8_t exposure_mode) {
+ uint16_t exposureMode = (exposure_mode == ANDROID_CONTROL_AE_MODE_OFF) ? 1 : 0;
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_MODE, exposureMode);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureTime(float exposure_time) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_TIME,
+ static_cast<uint32_t>(std::round(exposure_time * kRationalPrecision)),
+ kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode) {
+ // EXIF_TAG_FLASH bits layout per EXIF standard:
+ // Bit 0: 0 - did not fire
+ // 1 - fired
+ // Bit 1-2: status of return light
+ // Bit 3-4: 0 - unknown
+ // 1 - compulsory flash firing
+ // 2 - compulsory flash suppression
+ // 3 - auto mode
+ // Bit 5: 0 - flash function present
+ // 1 - no flash function
+ // Bit 6: 0 - no red-eye reduction mode or unknown
+ // 1 - red-eye reduction supported
+ uint16_t flash = 0x20;
+
+ if (flash_available == ANDROID_FLASH_INFO_AVAILABLE_TRUE) {
+ flash = 0x00;
+
+ if (flash_state == ANDROID_FLASH_STATE_FIRED) {
+ flash |= 0x1;
+ }
+ if (ae_mode == ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) {
+ flash |= 0x40;
+ }
+
+ uint16_t flashMode = 0;
+ switch (ae_mode) {
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+ flashMode = 3; // AUTO
+ break;
+ case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+ case ANDROID_CONTROL_AE_MODE_ON_EXTERNAL_FLASH:
+ flashMode = 1; // ON
+ break;
+ case ANDROID_CONTROL_AE_MODE_OFF:
+ case ANDROID_CONTROL_AE_MODE_ON:
+ flashMode = 2; // OFF
+ break;
+ default:
+ flashMode = 0; // UNKNOWN
+ break;
+ }
+ flash |= (flashMode << 3);
+ }
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_FLASH, flash);
+ return true;
+}
+
+bool ExifUtilsImpl::setFNumber(float f_number) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FNUMBER,
+ static_cast<uint32_t>(std::round(f_number * kRationalPrecision)),
+ kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setFocalLength(float focal_length) {
+ uint32_t numerator = static_cast<uint32_t>(std::round(focal_length * kRationalPrecision));
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, numerator, kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setFocalLengthIn35mmFilm(
+ float focal_length, float sensor_size_x, float sensor_size_y) {
+ static const float filmDiagonal = 43.27; // diagonal of 35mm film
+ static const float minSensorDiagonal = 0.01;
+ float sensorDiagonal = std::sqrt(
+ sensor_size_x * sensor_size_x + sensor_size_y * sensor_size_y);
+ sensorDiagonal = std::max(sensorDiagonal, minSensorDiagonal);
+ float focalLength35mmFilm = std::round(focal_length * filmDiagonal / sensorDiagonal);
+ focalLength35mmFilm = std::min(1.0f * 65535, focalLength35mmFilm);
+
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH_IN_35MM_FILM,
+ static_cast<uint16_t>(focalLength35mmFilm));
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsAltitude(double altitude) {
+ ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_BYTE, 1, 1);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSAltitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (altitude >= 0) {
+ *refEntry->data = 0;
+ } else {
+ *refEntry->data = 1;
+ altitude *= -1;
+ }
+
+ ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_ALTITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 1, sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSAltitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(altitude * 1000), 1000});
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsLatitude(double latitude) {
+ const ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSLatitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (latitude >= 0) {
+ memcpy(refEntry->data, "N", sizeof("N"));
+ } else {
+ memcpy(refEntry->data, "S", sizeof("S"));
+ latitude *= -1;
+ }
+
+ const ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LATITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSLatitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ setLatitudeOrLongitudeData(entry->data, latitude);
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsLongitude(double longitude) {
+ ExifTag refTag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE_REF);
+ std::unique_ptr<ExifEntry> refEntry =
+ addVariableLengthEntry(EXIF_IFD_GPS, refTag, EXIF_FORMAT_ASCII, 2, 2);
+ if (!refEntry) {
+ ALOGE("%s: Adding GPSLongitudeRef exif entry failed", __FUNCTION__);
+ return false;
+ }
+ if (longitude >= 0) {
+ memcpy(refEntry->data, "E", sizeof("E"));
+ } else {
+ memcpy(refEntry->data, "W", sizeof("W"));
+ longitude *= -1;
+ }
+
+ ExifTag tag = static_cast<ExifTag>(EXIF_TAG_GPS_LONGITUDE);
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(
+ EXIF_IFD_GPS, tag, EXIF_FORMAT_RATIONAL, 3, 3 * sizeof(ExifRational));
+ if (!entry) {
+ exif_content_remove_entry(exif_data_->ifd[EXIF_IFD_GPS], refEntry.get());
+ ALOGE("%s: Adding GPSLongitude exif entry failed", __FUNCTION__);
+ return false;
+ }
+ setLatitudeOrLongitudeData(entry->data, longitude);
+
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsProcessingMethod(const std::string& method) {
+ std::string buffer =
+ std::string(gExifAsciiPrefix, sizeof(gExifAsciiPrefix)) + method;
+ SET_STRING(EXIF_IFD_GPS, static_cast<ExifTag>(EXIF_TAG_GPS_PROCESSING_METHOD),
+ EXIF_FORMAT_UNDEFINED, buffer);
+ return true;
+}
+
+bool ExifUtilsImpl::setGpsTimestamp(const struct tm& t) {
+ const ExifTag dateTag = static_cast<ExifTag>(EXIF_TAG_GPS_DATE_STAMP);
+ const size_t kGpsDateStampSize = 11;
+ std::unique_ptr<ExifEntry> entry = addVariableLengthEntry(EXIF_IFD_GPS,
+ dateTag, EXIF_FORMAT_ASCII, kGpsDateStampSize, kGpsDateStampSize);
+ if (!entry) {
+ ALOGE("%s: Adding GPSDateStamp exif entry failed", __FUNCTION__);
+ return false;
+ }
+ int result = snprintf(reinterpret_cast<char*>(entry->data), kGpsDateStampSize,
+ "%04i:%02i:%02i", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday);
+ if (result != kGpsDateStampSize - 1) {
+ ALOGW("%s: Input time is invalid", __FUNCTION__);
+ return false;
+ }
+
+ const ExifTag timeTag = static_cast<ExifTag>(EXIF_TAG_GPS_TIME_STAMP);
+ entry = addVariableLengthEntry(EXIF_IFD_GPS, timeTag, EXIF_FORMAT_RATIONAL, 3,
+ 3 * sizeof(ExifRational));
+ if (!entry) {
+ ALOGE("%s: Adding GPSTimeStamp exif entry failed", __FUNCTION__);
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(t.tm_hour), 1});
+ exif_set_rational(entry->data + sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(t.tm_min), 1});
+ exif_set_rational(entry->data + 2 * sizeof(ExifRational), EXIF_BYTE_ORDER_INTEL,
+ {static_cast<ExifLong>(t.tm_sec), 1});
+
+ return true;
+}
+
+bool ExifUtilsImpl::setImageHeight(uint32_t length) {
+ SET_LONG(EXIF_IFD_0, EXIF_TAG_IMAGE_LENGTH, length);
+ SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_Y_DIMENSION, length);
+ return true;
+}
+
+bool ExifUtilsImpl::setImageWidth(uint32_t width) {
+ SET_LONG(EXIF_IFD_0, EXIF_TAG_IMAGE_WIDTH, width);
+ SET_LONG(EXIF_IFD_EXIF, EXIF_TAG_PIXEL_X_DIMENSION, width);
+ return true;
+}
+
+bool ExifUtilsImpl::setIsoSpeedRating(uint16_t iso_speed_ratings) {
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_ISO_SPEED_RATINGS, iso_speed_ratings);
+ return true;
+}
+
+bool ExifUtilsImpl::setMaxAperture(float aperture) {
+ float maxAperture = convertToApex(aperture);
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_MAX_APERTURE_VALUE,
+ static_cast<uint32_t>(std::round(maxAperture * kRationalPrecision)),
+ kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setExposureBias(int32_t ev,
+ uint32_t ev_step_numerator, uint32_t ev_step_denominator) {
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_EXPOSURE_BIAS_VALUE,
+ ev * ev_step_numerator, ev_step_denominator);
+ return true;
+}
+
+bool ExifUtilsImpl::setOrientation(uint16_t degrees) {
+ ExifOrientation value = ExifOrientation::ORIENTATION_0_DEGREES;
+ switch (degrees) {
+ case 90:
+ value = ExifOrientation::ORIENTATION_90_DEGREES;
+ break;
+ case 180:
+ value = ExifOrientation::ORIENTATION_180_DEGREES;
+ break;
+ case 270:
+ value = ExifOrientation::ORIENTATION_270_DEGREES;
+ break;
+ default:
+ break;
+ }
+ return setOrientationValue(value);
+}
+
+bool ExifUtilsImpl::setOrientationValue(ExifOrientation orientationValue) {
+ SET_SHORT(EXIF_IFD_0, EXIF_TAG_ORIENTATION, orientationValue);
+ return true;
+}
+
+bool ExifUtilsImpl::setShutterSpeed(float exposure_time) {
+ float shutterSpeed = -log2f(exposure_time);
+ SET_SRATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SHUTTER_SPEED_VALUE,
+ static_cast<uint32_t>(shutterSpeed * kRationalPrecision), kRationalPrecision);
+ return true;
+}
+
+bool ExifUtilsImpl::setSubjectDistance(float diopters) {
+ const static float kInfinityDiopters = 1.0e-6;
+ uint32_t numerator, denominator;
+ uint16_t distanceRange;
+ if (diopters > kInfinityDiopters) {
+ float focusDistance = 1.0f / diopters;
+ numerator = static_cast<uint32_t>(std::round(focusDistance * kRationalPrecision));
+ denominator = kRationalPrecision;
+
+ if (focusDistance < 1.0f) {
+ distanceRange = 1; // Macro
+ } else if (focusDistance < 3.0f) {
+ distanceRange = 2; // Close
+ } else {
+ distanceRange = 3; // Distant
+ }
+ } else {
+ numerator = 0xFFFFFFFF;
+ denominator = 1;
+ distanceRange = 3; // Distant
+ }
+ SET_RATIONAL(EXIF_IFD_EXIF, EXIF_TAG_SUBJECT_DISTANCE, numerator, denominator);
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_SUBJECT_DISTANCE_RANGE, distanceRange);
+ return true;
+}
+
+bool ExifUtilsImpl::setSubsecTime(const std::string& subsec_time) {
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME, EXIF_FORMAT_ASCII, subsec_time);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_ORIGINAL, EXIF_FORMAT_ASCII, subsec_time);
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_SUB_SEC_TIME_DIGITIZED, EXIF_FORMAT_ASCII, subsec_time);
+ return true;
+}
+
+bool ExifUtilsImpl::setWhiteBalance(uint8_t white_balance) {
+ uint16_t whiteBalance = (white_balance == ANDROID_CONTROL_AWB_MODE_AUTO) ? 0 : 1;
+ SET_SHORT(EXIF_IFD_EXIF, EXIF_TAG_WHITE_BALANCE, whiteBalance);
+ return true;
+}
+
+bool ExifUtilsImpl::generateApp1() {
+ destroyApp1();
+ // Save the result into |app1_buffer_|.
+ exif_data_save_data(exif_data_, &app1_buffer_, &app1_length_);
+ if (!app1_length_) {
+ ALOGE("%s: Allocate memory for app1_buffer_ failed", __FUNCTION__);
+ return false;
+ }
+ /*
+ * The JPEG segment size is 16 bits in spec. The size of APP1 segment should
+ * be smaller than 65533 because there are two bytes for segment size field.
+ */
+ if (app1_length_ > 65533) {
+ destroyApp1();
+ ALOGE("%s: The size of APP1 segment is too large", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+const uint8_t* ExifUtilsImpl::getApp1Buffer() {
+ return app1_buffer_;
+}
+
+unsigned int ExifUtilsImpl::getApp1Length() {
+ return app1_length_;
+}
+
+bool ExifUtilsImpl::setExifVersion(const std::string& exif_version) {
+ SET_STRING(EXIF_IFD_EXIF, EXIF_TAG_EXIF_VERSION, EXIF_FORMAT_UNDEFINED, exif_version);
+ return true;
+}
+
+void ExifUtilsImpl::reset() {
+ destroyApp1();
+ if (exif_data_) {
+ /*
+ * Since we decided to ignore the original APP1, we are sure that there is
+ * no thumbnail allocated by libexif. |exif_data_->data| is actually
+ * allocated by JpegCompressor. sets |exif_data_->data| to nullptr to
+ * prevent exif_data_unref() destroy it incorrectly.
+ */
+ exif_data_->data = nullptr;
+ exif_data_->size = 0;
+ exif_data_unref(exif_data_);
+ exif_data_ = nullptr;
+ }
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addVariableLengthEntry(ExifIfd ifd,
+ ExifTag tag, ExifFormat format, uint64_t components, unsigned int size) {
+ // Remove old entry if exists.
+ exif_content_remove_entry(exif_data_->ifd[ifd],
+ exif_content_get_entry(exif_data_->ifd[ifd], tag));
+ ExifMem* mem = exif_mem_new_default();
+ if (!mem) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ return nullptr;
+ }
+ std::unique_ptr<ExifEntry> entry(exif_entry_new_mem(mem));
+ if (!entry) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ exif_mem_unref(mem);
+ return nullptr;
+ }
+ void* tmpBuffer = exif_mem_alloc(mem, size);
+ if (!tmpBuffer) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ exif_mem_unref(mem);
+ return nullptr;
+ }
+
+ entry->data = static_cast<unsigned char*>(tmpBuffer);
+ entry->tag = tag;
+ entry->format = format;
+ entry->components = components;
+ entry->size = size;
+
+ exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+ exif_mem_unref(mem);
+
+ return entry;
+}
+
+std::unique_ptr<ExifEntry> ExifUtilsImpl::addEntry(ExifIfd ifd, ExifTag tag) {
+ std::unique_ptr<ExifEntry> entry(exif_content_get_entry(exif_data_->ifd[ifd], tag));
+ if (entry) {
+ // exif_content_get_entry() won't ref the entry, so we ref here.
+ exif_entry_ref(entry.get());
+ return entry;
+ }
+ entry.reset(exif_entry_new());
+ if (!entry) {
+ ALOGE("%s: Allocate memory for exif entry failed", __FUNCTION__);
+ return nullptr;
+ }
+ entry->tag = tag;
+ exif_content_add_entry(exif_data_->ifd[ifd], entry.get());
+ exif_entry_initialize(entry.get(), tag);
+ return entry;
+}
+
+bool ExifUtilsImpl::setShort(ExifIfd ifd, ExifTag tag, uint16_t value, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_short(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+ return true;
+}
+
+bool ExifUtilsImpl::setLong(ExifIfd ifd, ExifTag tag, uint32_t value, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_long(entry->data, EXIF_BYTE_ORDER_INTEL, value);
+ return true;
+}
+
+bool ExifUtilsImpl::setRational(ExifIfd ifd, ExifTag tag, uint32_t numerator,
+ uint32_t denominator, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_rational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+ return true;
+}
+
+bool ExifUtilsImpl::setSRational(ExifIfd ifd, ExifTag tag, int32_t numerator,
+ int32_t denominator, const std::string& msg) {
+ std::unique_ptr<ExifEntry> entry = addEntry(ifd, tag);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ exif_set_srational(entry->data, EXIF_BYTE_ORDER_INTEL, {numerator, denominator});
+ return true;
+}
+
+bool ExifUtilsImpl::setString(ExifIfd ifd, ExifTag tag, ExifFormat format,
+ const std::string& buffer, const std::string& msg) {
+ size_t entry_size = buffer.length();
+ // Since the exif format is undefined, NULL termination is not necessary.
+ if (format == EXIF_FORMAT_ASCII) {
+ entry_size++;
+ }
+ std::unique_ptr<ExifEntry> entry =
+ addVariableLengthEntry(ifd, tag, format, entry_size, entry_size);
+ if (!entry) {
+ ALOGE("%s: Adding '%s' entry failed", __FUNCTION__, msg.c_str());
+ return false;
+ }
+ memcpy(entry->data, buffer.c_str(), entry_size);
+ return true;
+}
+
+void ExifUtilsImpl::destroyApp1() {
+ /*
+ * Since there is no API to access ExifMem in ExifData->priv, we use free
+ * here, which is the default free function in libexif. See
+ * exif_data_save_data() for detail.
+ */
+ free(app1_buffer_);
+ app1_buffer_ = nullptr;
+ app1_length_ = 0;
+}
+
+bool ExifUtilsImpl::setFromMetadata(const CameraMetadata& metadata,
+ const CameraMetadata& staticInfo,
+ const size_t imageWidth, const size_t imageHeight) {
+ if (!setImageWidth(imageWidth) ||
+ !setImageHeight(imageHeight)) {
+ ALOGE("%s: setting image resolution failed.", __FUNCTION__);
+ return false;
+ }
+
+ struct timespec tp;
+ struct tm time_info;
+ bool time_available = clock_gettime(CLOCK_REALTIME, &tp) != -1;
+ localtime_r(&tp.tv_sec, &time_info);
+ if (!setDateTime(time_info)) {
+ ALOGE("%s: setting data time failed.", __FUNCTION__);
+ return false;
+ }
+
+ float focal_length;
+ camera_metadata_ro_entry entry = metadata.find(ANDROID_LENS_FOCAL_LENGTH);
+ if (entry.count) {
+ focal_length = entry.data.f[0];
+
+ if (!setFocalLength(focal_length)) {
+ ALOGE("%s: setting focal length failed.", __FUNCTION__);
+ return false;
+ }
+
+ camera_metadata_ro_entry sensorSizeEntry =
+ staticInfo.find(ANDROID_SENSOR_INFO_PHYSICAL_SIZE);
+ if (sensorSizeEntry.count == 2) {
+ if (!setFocalLengthIn35mmFilm(
+ focal_length, sensorSizeEntry.data.f[0], sensorSizeEntry.data.f[1])) {
+ ALOGE("%s: setting focal length in 35mm failed.", __FUNCTION__);
+ return false;
+ }
+ }
+ } else {
+ ALOGV("%s: Cannot find focal length in metadata.", __FUNCTION__);
+ }
+
+ if (metadata.exists(ANDROID_SCALER_CROP_REGION) &&
+ staticInfo.exists(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE)) {
+ entry = metadata.find(ANDROID_SCALER_CROP_REGION);
+ camera_metadata_ro_entry activeArrayEntry =
+ staticInfo.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+
+ if (!setDigitalZoomRatio(entry.data.i32[2], entry.data.i32[3],
+ activeArrayEntry.data.i32[2], activeArrayEntry.data.i32[3])) {
+ ALOGE("%s: setting digital zoom ratio failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_JPEG_GPS_COORDINATES)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_COORDINATES);
+ if (entry.count < 3) {
+ ALOGE("%s: Gps coordinates in metadata is not complete.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsLatitude(entry.data.d[0])) {
+ ALOGE("%s: setting gps latitude failed.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsLongitude(entry.data.d[1])) {
+ ALOGE("%s: setting gps longitude failed.", __FUNCTION__);
+ return false;
+ }
+ if (!setGpsAltitude(entry.data.d[2])) {
+ ALOGE("%s: setting gps altitude failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_PROCESSING_METHOD);
+ std::string method_str(reinterpret_cast<const char*>(entry.data.u8));
+ if (!setGpsProcessingMethod(method_str)) {
+ ALOGE("%s: setting gps processing method failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (time_available && metadata.exists(ANDROID_JPEG_GPS_TIMESTAMP)) {
+ entry = metadata.find(ANDROID_JPEG_GPS_TIMESTAMP);
+ time_t timestamp = static_cast<time_t>(entry.data.i64[0]);
+ if (gmtime_r(×tamp, &time_info)) {
+ if (!setGpsTimestamp(time_info)) {
+ ALOGE("%s: setting gps timestamp failed.", __FUNCTION__);
+ return false;
+ }
+ } else {
+ ALOGE("%s: Time tranformation failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (staticInfo.exists(ANDROID_CONTROL_AE_COMPENSATION_STEP) &&
+ metadata.exists(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION)) {
+ entry = metadata.find(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION);
+ camera_metadata_ro_entry stepEntry =
+ staticInfo.find(ANDROID_CONTROL_AE_COMPENSATION_STEP);
+ if (!setExposureBias(entry.data.i32[0], stepEntry.data.r[0].numerator,
+ stepEntry.data.r[0].denominator)) {
+ ALOGE("%s: setting exposure bias failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_JPEG_ORIENTATION)) {
+ entry = metadata.find(ANDROID_JPEG_ORIENTATION);
+ if (!setOrientation(entry.data.i32[0])) {
+ ALOGE("%s: setting orientation failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_SENSOR_EXPOSURE_TIME)) {
+ entry = metadata.find(ANDROID_SENSOR_EXPOSURE_TIME);
+ float exposure_time = 1.0f * entry.data.i64[0] / 1e9;
+ if (!setExposureTime(exposure_time)) {
+ ALOGE("%s: setting exposure time failed.", __FUNCTION__);
+ return false;
+ }
+
+ if (!setShutterSpeed(exposure_time)) {
+ ALOGE("%s: setting shutter speed failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_LENS_FOCUS_DISTANCE)) {
+ entry = metadata.find(ANDROID_LENS_FOCUS_DISTANCE);
+ if (!setSubjectDistance(entry.data.f[0])) {
+ ALOGE("%s: setting subject distance failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_SENSOR_SENSITIVITY)) {
+ entry = metadata.find(ANDROID_SENSOR_SENSITIVITY);
+ int32_t iso = entry.data.i32[0];
+ camera_metadata_ro_entry postRawSensEntry =
+ metadata.find(ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST);
+ if (postRawSensEntry.count > 0) {
+ iso = iso * postRawSensEntry.data.i32[0] / 100;
+ }
+
+ if (!setIsoSpeedRating(static_cast<uint16_t>(iso))) {
+ ALOGE("%s: setting iso rating failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_LENS_APERTURE)) {
+ entry = metadata.find(ANDROID_LENS_APERTURE);
+ if (!setFNumber(entry.data.f[0])) {
+ ALOGE("%s: setting F number failed.", __FUNCTION__);
+ return false;
+ }
+ if (!setAperture(entry.data.f[0])) {
+ ALOGE("%s: setting aperture failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ static const uint16_t kSRGBColorSpace = 1;
+ if (!setColorSpace(kSRGBColorSpace)) {
+ ALOGE("%s: setting color space failed.", __FUNCTION__);
+ return false;
+ }
+
+ if (staticInfo.exists(ANDROID_LENS_INFO_AVAILABLE_APERTURES)) {
+ entry = staticInfo.find(ANDROID_LENS_INFO_AVAILABLE_APERTURES);
+ if (!setMaxAperture(entry.data.f[0])) {
+ ALOGE("%s: setting max aperture failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (staticInfo.exists(ANDROID_FLASH_INFO_AVAILABLE)) {
+ entry = staticInfo.find(ANDROID_FLASH_INFO_AVAILABLE);
+ camera_metadata_ro_entry flashStateEntry = metadata.find(ANDROID_FLASH_STATE);
+ camera_metadata_ro_entry aeModeEntry = metadata.find(ANDROID_CONTROL_AE_MODE);
+ uint8_t flashState = flashStateEntry.count > 0 ?
+ flashStateEntry.data.u8[0] : ANDROID_FLASH_STATE_UNAVAILABLE;
+ uint8_t aeMode = aeModeEntry.count > 0 ?
+ aeModeEntry.data.u8[0] : ANDROID_CONTROL_AE_MODE_OFF;
+
+ if (!setFlash(entry.data.u8[0], flashState, aeMode)) {
+ ALOGE("%s: setting flash failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_CONTROL_AWB_MODE)) {
+ entry = metadata.find(ANDROID_CONTROL_AWB_MODE);
+ if (!setWhiteBalance(entry.data.u8[0])) {
+ ALOGE("%s: setting white balance failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ if (metadata.exists(ANDROID_CONTROL_AE_MODE)) {
+ entry = metadata.find(ANDROID_CONTROL_AE_MODE);
+ if (!setExposureMode(entry.data.u8[0])) {
+ ALOGE("%s: setting exposure mode failed.", __FUNCTION__);
+ return false;
+ }
+ }
+ if (time_available) {
+ char str[4];
+ if (snprintf(str, sizeof(str), "%03ld", tp.tv_nsec / 1000000) < 0) {
+ ALOGE("%s: Subsec is invalid: %ld", __FUNCTION__, tp.tv_nsec);
+ return false;
+ }
+ if (!setSubsecTime(std::string(str))) {
+ ALOGE("%s: setting subsec time failed.", __FUNCTION__);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+} // namespace camera3
+} // namespace android
diff --git a/services/camera/libcameraservice/utils/ExifUtils.h b/services/camera/libcameraservice/utils/ExifUtils.h
new file mode 100644
index 0000000..f1d0205
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ExifUtils.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
+#define ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
+
+#include "CameraMetadata.h"
+
+namespace android {
+namespace camera3 {
+
+/*
+ * Orientation value:
+ * 1 2 3 4 5 6 7 8
+ *
+ * 888888 888888 88 88 8888888888 88 88 8888888888
+ * 88 88 88 88 88 88 88 88 88 88 88 88
+ * 8888 8888 8888 8888 88 8888888888 8888888888 88
+ * 88 88 88 88
+ * 88 88 888888 888888
+ */
+enum ExifOrientation : uint16_t {
+ ORIENTATION_UNDEFINED = 0x0,
+ ORIENTATION_0_DEGREES = 0x1,
+ ORIENTATION_90_DEGREES = 0x6,
+ ORIENTATION_180_DEGREES = 0x3,
+ ORIENTATION_270_DEGREES = 0x8,
+};
+
+// This is based on the camera HIDL shim implementation, which was in turned
+// based on original ChromeOS ARC implementation of a V4L2 HAL
+
+// ExifUtils can override APP1 segment with tags which caller set. ExifUtils can
+// also add a thumbnail in the APP1 segment if thumbnail size is specified.
+// ExifUtils can be reused with different images by calling initialize().
+//
+// Example of using this class :
+// std::unique_ptr<ExifUtils> utils(ExifUtils::Create());
+// utils->initialize(const unsigned char* app1Segment, size_t app1SegmentSize);
+// ...
+// // Call ExifUtils functions to set Exif tags.
+// ...
+// utils->GenerateApp1();
+// unsigned int app1Length = utils->GetApp1Length();
+// uint8_t* app1Buffer = new uint8_t[app1Length];
+// memcpy(app1Buffer, utils->GetApp1Buffer(), app1Length);
+class ExifUtils {
+
+public:
+ virtual ~ExifUtils();
+
+ static ExifUtils* create();
+
+ // Initialize() can be called multiple times. The setting of Exif tags will be
+ // cleared.
+ virtual bool initialize(const unsigned char *app1Segment, size_t app1SegmentSize) = 0;
+ virtual bool initializeEmpty() = 0;
+
+ // Set all known fields from a metadata structure
+ virtual bool setFromMetadata(const CameraMetadata& metadata,
+ const CameraMetadata& staticInfo,
+ const size_t imageWidth, const size_t imageHeight) = 0;
+
+ // Sets the len aperture.
+ // Returns false if memory allocation fails.
+ virtual bool setAperture(float aperture) = 0;
+
+ // sets the color space.
+ // Returns false if memory allocation fails.
+ virtual bool setColorSpace(uint16_t color_space) = 0;
+
+ // Sets the date and time of image last modified. It takes local time. The
+ // name of the tag is DateTime in IFD0.
+ // Returns false if memory allocation fails.
+ virtual bool setDateTime(const struct tm& t) = 0;
+
+ // Sets the digital zoom ratio. If the numerator is 0, it means digital zoom
+ // was not used.
+ // Returns false if memory allocation fails.
+ virtual bool setDigitalZoomRatio(uint32_t crop_width, uint32_t crop_height,
+ uint32_t sensor_width, uint32_t sensor_height) = 0;
+
+ // Sets the exposure bias.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureBias(int32_t ev,
+ uint32_t ev_step_numerator, uint32_t ev_step_denominator) = 0;
+
+ // Sets the exposure mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureMode(uint8_t exposure_mode) = 0;
+
+ // Sets the exposure time, given in seconds.
+ // Returns false if memory allocation fails.
+ virtual bool setExposureTime(float exposure_time) = 0;
+
+ // Sets the status of flash.
+ // Returns false if memory allocation fails.
+ virtual bool setFlash(uint8_t flash_available, uint8_t flash_state, uint8_t ae_mode) = 0;
+
+ // Sets the F number.
+ // Returns false if memory allocation fails.
+ virtual bool setFNumber(float f_number) = 0;
+
+ // Sets the focal length of lens used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLength(float focal_length) = 0;
+
+ // Sets the focal length of lens for 35mm film used to take the image in millimeters.
+ // Returns false if memory allocation fails.
+ virtual bool setFocalLengthIn35mmFilm(float focal_length,
+ float sensor_size_x, float sensor_size_y) = 0;
+
+ // Sets the altitude in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsAltitude(double altitude) = 0;
+
+ // Sets the latitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLatitude(double latitude) = 0;
+
+ // Sets the longitude with degrees minutes seconds format.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsLongitude(double longitude) = 0;
+
+ // Sets GPS processing method.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsProcessingMethod(const std::string& method) = 0;
+
+ // Sets GPS date stamp and time stamp (atomic clock). It takes UTC time.
+ // Returns false if memory allocation fails.
+ virtual bool setGpsTimestamp(const struct tm& t) = 0;
+
+ // Sets the height (number of rows) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageHeight(uint32_t length) = 0;
+
+ // Sets the width (number of columns) of main image.
+ // Returns false if memory allocation fails.
+ virtual bool setImageWidth(uint32_t width) = 0;
+
+ // Sets the ISO speed.
+ // Returns false if memory allocation fails.
+ virtual bool setIsoSpeedRating(uint16_t iso_speed_ratings) = 0;
+
+ // Sets the smallest F number of the lens.
+ // Returns false if memory allocation fails.
+ virtual bool setMaxAperture(float aperture) = 0;
+
+ // Sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientation(uint16_t degrees) = 0;
+
+ // Sets image orientation.
+ // Returns false if memory allocation fails.
+ virtual bool setOrientationValue(ExifOrientation orientationValue) = 0;
+
+ // Sets the shutter speed.
+ // Returns false if memory allocation fails.
+ virtual bool setShutterSpeed(float exposure_time) = 0;
+
+ // Sets the distance to the subject, given in meters.
+ // Returns false if memory allocation fails.
+ virtual bool setSubjectDistance(float diopters) = 0;
+
+ // Sets the fractions of seconds for the <DateTime> tag.
+ // Returns false if memory allocation fails.
+ virtual bool setSubsecTime(const std::string& subsec_time) = 0;
+
+ // Sets the white balance mode set when the image was shot.
+ // Returns false if memory allocation fails.
+ virtual bool setWhiteBalance(uint8_t white_blanace) = 0;
+
+ // Generates APP1 segment.
+ // Returns false if generating APP1 segment fails.
+ virtual bool generateApp1() = 0;
+
+ // Gets buffer of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual const uint8_t* getApp1Buffer() = 0;
+
+ // Gets length of APP1 segment. This method must be called only after calling
+ // GenerateAPP1().
+ virtual unsigned int getApp1Length() = 0;
+};
+
+} // namespace camera3
+} // namespace android
+
+#endif // ANDROID_SERVERS_CAMERA_EXIF_UTILS_H
diff --git a/services/mediaanalytics/Android.bp b/services/mediaanalytics/Android.bp
new file mode 100644
index 0000000..c93c120
--- /dev/null
+++ b/services/mediaanalytics/Android.bp
@@ -0,0 +1,49 @@
+// Media Statistics service
+//
+
+cc_binary {
+ name: "mediametrics",
+
+ srcs: [
+ "main_mediametrics.cpp",
+ "MediaAnalyticsService.cpp",
+ ],
+
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ "libmedia",
+ "libutils",
+ "libbinder",
+ "libdl",
+ "libgui",
+ "libmedia",
+ "libmediautils",
+ "libmediametrics",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ static_libs: ["libregistermsext"],
+
+ include_dirs: [
+ "frameworks/av/media/libstagefright/include",
+ "frameworks/av/media/libstagefright/rtsp",
+ "frameworks/av/media/libstagefright/webm",
+ "frameworks/av/include/media",
+ "frameworks/av/include/camera",
+ "frameworks/native/include/media/openmax",
+ "frameworks/native/include/media/hardware",
+ "external/tremolo/Tremolo",
+ ],
+
+ init_rc: ["mediametrics.rc"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wno-error=deprecated-declarations",
+ ],
+ clang: true,
+
+}
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
deleted file mode 100644
index 5b20e61..0000000
--- a/services/mediaanalytics/Android.mk
+++ /dev/null
@@ -1,47 +0,0 @@
-# Media Statistics service
-#
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- main_mediametrics.cpp \
- MediaAnalyticsService.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- liblog \
- libmedia \
- libutils \
- libbinder \
- libdl \
- libgui \
- libmedia \
- libmediautils \
- libmediametrics \
- libstagefright_foundation \
- libutils
-
-LOCAL_STATIC_LIBRARIES := \
- libregistermsext
-
-LOCAL_C_INCLUDES := \
- $(TOP)/frameworks/av/media/libstagefright/include \
- $(TOP)/frameworks/av/media/libstagefright/rtsp \
- $(TOP)/frameworks/av/media/libstagefright/wifi-display \
- $(TOP)/frameworks/av/media/libstagefright/webm \
- $(TOP)/frameworks/av/include/media \
- $(TOP)/frameworks/av/include/camera \
- $(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/frameworks/native/include/media/hardware \
- $(TOP)/external/tremolo/Tremolo
-
-
-LOCAL_MODULE:= mediametrics
-
-LOCAL_INIT_RC := mediametrics.rc
-
-LOCAL_CFLAGS := -Werror -Wall -Wno-error=deprecated-declarations
-LOCAL_CLANG := true
-
-include $(BUILD_EXECUTABLE)
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index ae832ba..4f3ac1b 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -51,7 +51,6 @@
#include <utils/Timers.h>
#include <utils/Vector.h>
-#include <media/AudioPolicyHelper.h>
#include <media/IMediaHTTPService.h>
#include <media/IRemoteDisplay.h>
#include <media/IRemoteDisplayClient.h>
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 3b6dc80..f78c671 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -69,9 +69,12 @@
include $(CLEAR_VARS)
# seccomp is not required for coverage build.
ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaswcodec.policy
+LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaswcodec.policy
endif
+
LOCAL_SRC_FILES := \
main_swcodecservice.cpp \
MediaCodecUpdateService.cpp \
@@ -105,10 +108,17 @@
libutils \
libziparchive \
+LOCAL_HEADER_LIBRARIES := \
+ libnativeloader-dummy-headers \
+
LOCAL_MODULE := mediaswcodec
LOCAL_INIT_RC := mediaswcodec.rc
-LOCAL_32_BIT_ONLY := true
LOCAL_SANITIZE := scudo
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86_64 arm64))
+ LOCAL_MULTILIB := both
+ LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
+ LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)
+endif
sanitizer_runtime_libraries :=
llndk_libraries :=
@@ -137,4 +147,16 @@
include $(BUILD_PREBUILT)
endif
+####################################################################
+
+# sw service seccomp policy
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediaswcodec.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+LOCAL_SRC_FILES := seccomp_policy/mediaswcodec-$(TARGET_ARCH).policy
+include $(BUILD_PREBUILT)
+endif
+
include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/MediaCodecUpdateService.cpp b/services/mediacodec/MediaCodecUpdateService.cpp
index 0e6892d..50ccbce 100644
--- a/services/mediacodec/MediaCodecUpdateService.cpp
+++ b/services/mediacodec/MediaCodecUpdateService.cpp
@@ -20,28 +20,12 @@
#include <android/dlext.h>
#include <dlfcn.h>
#include <media/CodecServiceRegistrant.h>
+#include <nativeloader/dlext_namespaces.h>
#include <utils/Log.h>
#include <utils/String8.h>
#include "MediaCodecUpdateService.h"
-// Copied from GraphicsEnv.cpp
-// TODO(b/37049319) Get this from a header once one exists
-extern "C" {
- android_namespace_t* android_create_namespace(const char* name,
- const char* ld_library_path,
- const char* default_library_path,
- uint64_t type,
- const char* permitted_when_isolated_path,
- android_namespace_t* parent);
- bool android_link_namespaces(android_namespace_t* from,
- android_namespace_t* to,
- const char* shared_libs_sonames);
- enum {
- ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
- };
-}
-
namespace android {
void loadFromApex(const char *libDirPath) {
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 1168825..05b5695 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -26,12 +26,10 @@
using namespace android;
-// TODO: replace policy with software codec-only policies
-// Must match location in Android.mk.
static const char kSystemSeccompPolicyPath[] =
- "/system/etc/seccomp_policy/mediacodec.policy";
+ "/system/etc/seccomp_policy/mediaswcodec.policy";
static const char kVendorSeccompPolicyPath[] =
- "/vendor/etc/seccomp_policy/mediacodec.policy";
+ "/vendor/etc/seccomp_policy/mediaswcodec.policy";
// Disable Scudo's mismatch allocation check, as it is being triggered
// by some third party code.
@@ -47,8 +45,11 @@
::android::hardware::configureRpcThreadpool(64, false);
- // codec libs are currently 32-bit only
+#ifdef __LP64__
+ loadFromApex("/apex/com.android.media.swcodec/lib64");
+#else
loadFromApex("/apex/com.android.media.swcodec/lib");
+#endif
::android::hardware::joinRpcThreadpool();
}
diff --git a/services/mediacodec/mediaswcodec.rc b/services/mediacodec/mediaswcodec.rc
index dfe3381..3549666 100644
--- a/services/mediacodec/mediaswcodec.rc
+++ b/services/mediacodec/mediaswcodec.rc
@@ -2,5 +2,6 @@
class main
user mediacodec
group camera drmrpc mediadrm
+ updatable
ioprio rt 4
writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index 8c40ad1..1470de2 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -28,6 +28,7 @@
"libcodec2_soft_amrwbdec",
"libcodec2_soft_amrwbenc",
"libcodec2_soft_hevcdec",
+ "libcodec2_soft_hevcenc",
"libcodec2_soft_g711alawdec",
"libcodec2_soft_g711mlawdec",
"libcodec2_soft_mpeg2dec",
@@ -38,6 +39,7 @@
"libcodec2_soft_mp3dec",
"libcodec2_soft_vorbisdec",
"libcodec2_soft_opusdec",
+ "libcodec2_soft_opusenc",
"libcodec2_soft_vp8dec",
"libcodec2_soft_vp9dec",
"libcodec2_soft_av1dec",
@@ -49,7 +51,5 @@
"libcodec2_soft_gsmdec",
"libcodec2_soft_xaacdec",
],
-
- compile_multilib: "32",
}
diff --git a/services/mediacodec/seccomp_policy/mediacodec-arm.policy b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
index edf4dab..3870a11 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-arm.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-arm.policy
@@ -13,6 +13,9 @@
ppoll: 1
mmap2: 1
getrandom: 1
+memfd_create: 1
+ftruncate: 1
+ftruncate64: 1
# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
# parser support for '<' is in this needs to be modified to also prevent
@@ -55,8 +58,4 @@
getdents64: 1
getrandom: 1
-# Used by UBSan diagnostic messages
-readlink: 1
-open: 1
-
@include /system/etc/seccomp_policy/crash_dump.arm.policy
diff --git a/services/mediacodec/seccomp_policy/mediacodec-x86.policy b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
index 966e214..845f84b 100644
--- a/services/mediacodec/seccomp_policy/mediacodec-x86.policy
+++ b/services/mediacodec/seccomp_policy/mediacodec-x86.policy
@@ -18,15 +18,20 @@
openat: 1
open: 1
getuid32: 1
+getuid: 1
+getrlimit: 1
writev: 1
ioctl: 1
close: 1
mmap2: 1
+mmap: 1
fstat64: 1
+fstat: 1
stat64: 1
statfs64: 1
madvise: 1
fstatat64: 1
+newfstatat: 1
futex: 1
munmap: 1
faccessat: 1
@@ -43,6 +48,7 @@
readlinkat: 1
_llseek: 1
fstatfs64: 1
+fstatfs: 1
pread64: 1
mremap: 1
dup: 1
@@ -51,6 +57,9 @@
nanosleep: 1
sched_setscheduler: 1
uname: 1
+memfd_create: 1
+ftruncate: 1
+ftruncate64: 1
# Required by AddressSanitizer
gettid: 1
@@ -58,8 +67,4 @@
getpid: 1
gettid: 1
-# Used by UBSan diagnostic messages
-readlink: 1
-open: 1
-
@include /system/etc/seccomp_policy/crash_dump.x86.policy
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
new file mode 100644
index 0000000..02cedba
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm.policy
@@ -0,0 +1,63 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap2: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+memfd_create: 1
+ftruncate: 1
+ftruncate64: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+getuid32: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+fstat64: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+fstatfs64: 1
+gettimeofday: 1
+faccessat: 1
+_llseek: 1
+fstatat64: 1
+ugetrlimit: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
new file mode 100644
index 0000000..78ecaf5
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-arm64.policy
@@ -0,0 +1,64 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+futex: 1
+# ioctl calls are filtered via the selinux policy.
+ioctl: 1
+sched_yield: 1
+close: 1
+dup: 1
+ppoll: 1
+mprotect: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+mmap: arg2 in ~PROT_EXEC || arg2 in ~PROT_WRITE
+getuid: 1
+getrlimit: 1
+fstat: 1
+newfstatat: 1
+fstatfs: 1
+memfd_create: 1
+ftruncate: 1
+ftruncate64: 1
+
+# mremap: Ensure |flags| are (MREMAP_MAYMOVE | MREMAP_FIXED) TODO: Once minijail
+# parser support for '<' is in this needs to be modified to also prevent
+# |old_address| and |new_address| from touching the exception vector page, which
+# on ARM is statically loaded at 0xffff 0000. See
+# http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0211h/Babfeega.html
+# for more details.
+mremap: arg3 == 3
+munmap: 1
+prctl: 1
+writev: 1
+sigaltstack: 1
+clone: 1
+exit: 1
+lseek: 1
+rt_sigprocmask: 1
+openat: 1
+write: 1
+nanosleep: 1
+setpriority: 1
+set_tid_address: 1
+getdents64: 1
+readlinkat: 1
+read: 1
+pread64: 1
+gettimeofday: 1
+faccessat: 1
+exit_group: 1
+restart_syscall: 1
+rt_sigreturn: 1
+getrandom: 1
+madvise: 1
+
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
new file mode 120000
index 0000000..ab2592a
--- /dev/null
+++ b/services/mediacodec/seccomp_policy/mediaswcodec-x86_64.policy
@@ -0,0 +1 @@
+mediacodec-x86.policy
\ No newline at end of file
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index dd64881..65fcf40 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -4,29 +4,10 @@
include $(CLEAR_VARS)
LOCAL_CFLAGS := -Wall -Werror
LOCAL_SRC_FILES := \
- MediaExtractorService.cpp \
- MediaExtractorUpdateService.cpp \
+ MediaExtractorService.cpp
LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
LOCAL_MODULE:= libmediaextractorservice
-
-sanitizer_runtime_libraries := $(call normalize-path-list,$(addsuffix .so,\
- $(ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
- $(UBSAN_RUNTIME_LIBRARY) \
- $(TSAN_RUNTIME_LIBRARY)))
-
-# $(info Sanitizer: $(sanitizer_runtime_libraries))
-
-ndk_libraries := $(call normalize-path-list,$(addprefix lib,$(addsuffix .so,\
- $(NDK_PREBUILT_SHARED_LIBRARIES))))
-
-# $(info NDK: $(ndk_libraries))
-
-LOCAL_CFLAGS += -DLINKED_LIBRARIES='"$(sanitizer_runtime_libraries):$(ndk_libraries)"'
-
-sanitizer_runtime_libraries :=
-ndk_libraries :=
-
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 0665930..de5c3e4 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -30,9 +30,7 @@
namespace android {
MediaExtractorService::MediaExtractorService()
- : BnMediaExtractorService() {
- MediaExtractorFactory::SetLinkedLibraries(std::string(LINKED_LIBRARIES));
-}
+ : BnMediaExtractorService() { }
sp<IMediaExtractor> MediaExtractorService::makeExtractor(
const sp<IDataSource> &remoteSource, const char *mime) {
diff --git a/services/mediaextractor/MediaExtractorUpdateService.cpp b/services/mediaextractor/MediaExtractorUpdateService.cpp
deleted file mode 100644
index 473a698..0000000
--- a/services/mediaextractor/MediaExtractorUpdateService.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaExtractorUpdateService"
-#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <media/stagefright/MediaExtractorFactory.h>
-
-#include "MediaExtractorUpdateService.h"
-
-namespace android {
-namespace media {
-
-binder::Status MediaExtractorUpdateService::loadPlugins(const ::std::string& apkPath) {
- ALOGV("loadPlugins %s", apkPath.c_str());
- MediaExtractorFactory::LoadPlugins(apkPath);
- return binder::Status::ok();
-}
-
-} // namespace media
-} // namespace android
diff --git a/services/mediaextractor/MediaExtractorUpdateService.h b/services/mediaextractor/MediaExtractorUpdateService.h
deleted file mode 100644
index ea34c9d..0000000
--- a/services/mediaextractor/MediaExtractorUpdateService.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
-#define ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
-
-#include <binder/BinderService.h>
-#include <android/media/BnMediaUpdateService.h>
-
-namespace android {
-namespace media {
-
-class MediaExtractorUpdateService
- : public BinderService<MediaExtractorUpdateService>, public BnMediaUpdateService
-{
- friend class BinderService<MediaExtractorUpdateService>;
-public:
- MediaExtractorUpdateService() : BnMediaUpdateService() { }
- virtual ~MediaExtractorUpdateService() { }
- static const char* getServiceName() { return "media.extractor.update"; }
- binder::Status loadPlugins(const ::std::string& apkPath);
-};
-
-} // namespace media
-} // namespace android
-
-#endif // ANDROID_MEDIA_EXTRACTOR_UPDATE_SERVICE_H
diff --git a/services/mediaextractor/main_extractorservice.cpp b/services/mediaextractor/main_extractorservice.cpp
index 5f42711..3c15bfd 100644
--- a/services/mediaextractor/main_extractorservice.cpp
+++ b/services/mediaextractor/main_extractorservice.cpp
@@ -31,7 +31,6 @@
// from LOCAL_C_INCLUDES
#include "IcuUtils.h"
#include "MediaExtractorService.h"
-#include "MediaExtractorUpdateService.h"
#include "MediaUtils.h"
#include "minijail.h"
@@ -72,11 +71,6 @@
sp<IServiceManager> sm = defaultServiceManager();
MediaExtractorService::instantiate();
- std::string value = base::GetProperty("ro.build.type", "unknown");
- if (value == "userdebug" || value == "eng") {
- media::MediaExtractorUpdateService::instantiate();
- }
-
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/services/mediaextractor/mediaextractor.rc b/services/mediaextractor/mediaextractor.rc
index 5fc2941..6b2d0a5 100644
--- a/services/mediaextractor/mediaextractor.rc
+++ b/services/mediaextractor/mediaextractor.rc
@@ -2,5 +2,7 @@
class main
user mediaex
group drmrpc mediadrm
+ # TODO(b/123275379): Remove updatable when http://aosp/878198 has landed
+ updatable
ioprio rt 4
writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
new file mode 100644
index 0000000..f3339a0
--- /dev/null
+++ b/services/mediaresourcemanager/Android.bp
@@ -0,0 +1,26 @@
+
+
+cc_library_shared {
+ name: "libresourcemanagerservice",
+
+ srcs: [
+ "ResourceManagerService.cpp",
+ "ServiceLog.cpp",
+ ],
+
+ shared_libs: [
+ "libmedia",
+ "libmediautils",
+ "libbinder",
+ "libutils",
+ "liblog",
+ ],
+
+ include_dirs: ["frameworks/av/include"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+}
diff --git a/services/mediaresourcemanager/Android.mk b/services/mediaresourcemanager/Android.mk
deleted file mode 100644
index 5823036..0000000
--- a/services/mediaresourcemanager/Android.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := ResourceManagerService.cpp ServiceLog.cpp
-
-LOCAL_SHARED_LIBRARIES := libmedia libmediautils libbinder libutils liblog
-
-LOCAL_MODULE:= libresourcemanagerservice
-
-LOCAL_32_BIT_ONLY := true
-
-LOCAL_C_INCLUDES += \
- frameworks/av/include
-
-LOCAL_CFLAGS += -Werror -Wall
-
-include $(BUILD_SHARED_LIBRARY)
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
new file mode 100644
index 0000000..70e8833
--- /dev/null
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -0,0 +1,41 @@
+// Build the unit tests.
+cc_test {
+ name: "ResourceManagerService_test",
+ srcs: ["ResourceManagerService_test.cpp"],
+ shared_libs: [
+ "libbinder",
+ "liblog",
+ "libmedia",
+ "libresourcemanagerservice",
+ "libutils",
+ ],
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ compile_multilib: "32",
+}
+
+cc_test {
+ name: "ServiceLog_test",
+ srcs: ["ServiceLog_test.cpp"],
+ shared_libs: [
+ "liblog",
+ "libmedia",
+ "libresourcemanagerservice",
+ "libutils",
+ ],
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+ compile_multilib: "32",
+}
diff --git a/services/mediaresourcemanager/test/Android.mk b/services/mediaresourcemanager/test/Android.mk
deleted file mode 100644
index 6abcf92..0000000
--- a/services/mediaresourcemanager/test/Android.mk
+++ /dev/null
@@ -1,52 +0,0 @@
-# Build the unit tests.
-LOCAL_PATH:= $(call my-dir)
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := ResourceManagerService_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- ResourceManagerService_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libbinder \
- liblog \
- libmedia \
- libresourcemanagerservice \
- libutils \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/include \
- frameworks/av/services/mediaresourcemanager \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-LOCAL_32_BIT_ONLY := true
-
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := ServiceLog_test
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_SRC_FILES := \
- ServiceLog_test.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- liblog \
- libmedia \
- libresourcemanagerservice \
- libutils \
-
-LOCAL_C_INCLUDES := \
- frameworks/av/include \
- frameworks/av/services/mediaresourcemanager \
-
-LOCAL_CFLAGS += -Werror -Wall
-
-LOCAL_32_BIT_ONLY := true
-
-include $(BUILD_NATIVE_TEST)