Merge "Update linkerconfig configuration format" am: 6f27296e13 am: 91fecb51c2 am: e1311102cf am: c31812c4a3
Original change: https://android-review.googlesource.com/c/platform/frameworks/av/+/1450496
Change-Id: I1a241dc073a45a7660b9ce44bec7f814669c1fdc
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index ae920c0..8fe48c2 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,2 +1,11 @@
[Hook Scripts]
mainline_hook = ${REPO_ROOT}/frameworks/av/tools/mainline_hook_partial.sh ${REPO_ROOT} ${PREUPLOAD_FILES}
+
+[Builtin Hooks]
+clang_format = true
+
+[Builtin Hooks Options]
+# Only turn on clang-format check for the following subfolders.
+clang_format = --commit ${PREUPLOAD_COMMIT} --style file --extensions c,h,cc,cpp
+ media/libmediatranscoding/
+ services/mediatranscoding/
diff --git a/apex/TEST_MAPPING b/apex/TEST_MAPPING
index f036516..09c46d6 100644
--- a/apex/TEST_MAPPING
+++ b/apex/TEST_MAPPING
@@ -14,17 +14,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/apex/mediaswcodec.rc b/apex/mediaswcodec.rc
index d17481b..0c9b8c8 100644
--- a/apex/mediaswcodec.rc
+++ b/apex/mediaswcodec.rc
@@ -2,6 +2,5 @@
class main
user mediacodec
group camera drmrpc mediadrm
- override
ioprio rt 4
writepid /dev/cpuset/foreground/tasks
diff --git a/camera/Android.bp b/camera/Android.bp
index fa36bb3..b777d74 100644
--- a/camera/Android.bp
+++ b/camera/Android.bp
@@ -38,7 +38,6 @@
"ICamera.cpp",
"ICameraClient.cpp",
"ICameraRecordingProxy.cpp",
- "ICameraRecordingProxyListener.cpp",
"camera2/CaptureRequest.cpp",
"camera2/ConcurrentCamera.cpp",
"camera2/OutputConfiguration.cpp",
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 84d1d93..f7d194e 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -25,7 +25,6 @@
#include <binder/IMemory.h>
#include <Camera.h>
-#include <ICameraRecordingProxyListener.h>
#include <android/hardware/ICameraService.h>
#include <android/hardware/ICamera.h>
@@ -77,63 +76,6 @@
return CameraBaseT::connect(cameraId, clientPackageName, clientUid, clientPid);
}
-status_t Camera::connectLegacy(int cameraId, int halVersion,
- const String16& clientPackageName,
- int clientUid,
- sp<Camera>& camera)
-{
- ALOGV("%s: connect legacy camera device", __FUNCTION__);
- sp<Camera> c = new Camera(cameraId);
- sp<::android::hardware::ICameraClient> cl = c;
- status_t status = NO_ERROR;
- const sp<::android::hardware::ICameraService>& cs = CameraBaseT::getCameraService();
-
- binder::Status ret;
- if (cs != nullptr) {
- ret = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
- clientUid, /*out*/&(c->mCamera));
- }
- if (ret.isOk() && c->mCamera != nullptr) {
- IInterface::asBinder(c->mCamera)->linkToDeath(c);
- c->mStatus = NO_ERROR;
- camera = c;
- } else {
- switch(ret.serviceSpecificErrorCode()) {
- case hardware::ICameraService::ERROR_DISCONNECTED:
- status = -ENODEV;
- break;
- case hardware::ICameraService::ERROR_CAMERA_IN_USE:
- status = -EBUSY;
- break;
- case hardware::ICameraService::ERROR_INVALID_OPERATION:
- status = -EINVAL;
- break;
- case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
- status = -EUSERS;
- break;
- case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
- status = BAD_VALUE;
- break;
- case hardware::ICameraService::ERROR_DEPRECATED_HAL:
- status = -EOPNOTSUPP;
- break;
- case hardware::ICameraService::ERROR_DISABLED:
- status = -EACCES;
- break;
- case hardware::ICameraService::ERROR_PERMISSION_DENIED:
- status = PERMISSION_DENIED;
- break;
- default:
- status = -EINVAL;
- ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
- (cs != nullptr) ? "Service not available" : ret.toString8().string());
- break;
- }
- c.clear();
- }
- return status;
-}
-
status_t Camera::reconnect()
{
ALOGV("reconnect");
@@ -214,10 +156,6 @@
void Camera::stopRecording()
{
ALOGV("stopRecording");
- {
- Mutex::Autolock _l(mLock);
- mRecordingProxyListener.clear();
- }
sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return;
c->stopRecording();
@@ -325,12 +263,6 @@
mListener = listener;
}
-void Camera::setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener)
-{
- Mutex::Autolock _l(mLock);
- mRecordingProxyListener = listener;
-}
-
void Camera::setPreviewCallbackFlags(int flag)
{
ALOGV("setPreviewCallbackFlags");
@@ -384,19 +316,6 @@
// callback from camera service when timestamped frame is ready
void Camera::dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr)
{
- // If recording proxy listener is registered, forward the frame and return.
- // The other listener (mListener) is ignored because the receiver needs to
- // call releaseRecordingFrame.
- sp<ICameraRecordingProxyListener> proxylistener;
- {
- Mutex::Autolock _l(mLock);
- proxylistener = mRecordingProxyListener;
- }
- if (proxylistener != NULL) {
- proxylistener->dataCallbackTimestamp(timestamp, msgType, dataPtr);
- return;
- }
-
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
@@ -413,19 +332,6 @@
void Camera::recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle)
{
- // If recording proxy listener is registered, forward the frame and return.
- // The other listener (mListener) is ignored because the receiver needs to
- // call releaseRecordingFrameHandle.
- sp<ICameraRecordingProxyListener> proxylistener;
- {
- Mutex::Autolock _l(mLock);
- proxylistener = mRecordingProxyListener;
- }
- if (proxylistener != NULL) {
- proxylistener->recordingFrameHandleCallbackTimestamp(timestamp, handle);
- return;
- }
-
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
@@ -444,19 +350,6 @@
const std::vector<nsecs_t>& timestamps,
const std::vector<native_handle_t*>& handles)
{
- // If recording proxy listener is registered, forward the frame and return.
- // The other listener (mListener) is ignored because the receiver needs to
- // call releaseRecordingFrameHandle.
- sp<ICameraRecordingProxyListener> proxylistener;
- {
- Mutex::Autolock _l(mLock);
- proxylistener = mRecordingProxyListener;
- }
- if (proxylistener != NULL) {
- proxylistener->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
- return;
- }
-
sp<CameraListener> listener;
{
Mutex::Autolock _l(mLock);
@@ -476,10 +369,9 @@
return new RecordingProxy(this);
}
-status_t Camera::RecordingProxy::startRecording(const sp<ICameraRecordingProxyListener>& listener)
+status_t Camera::RecordingProxy::startRecording()
{
ALOGV("RecordingProxy::startRecording");
- mCamera->setRecordingProxyListener(listener);
mCamera->reconnect();
return mCamera->startRecording();
}
@@ -490,23 +382,6 @@
mCamera->stopRecording();
}
-void Camera::RecordingProxy::releaseRecordingFrame(const sp<IMemory>& mem)
-{
- ALOGV("RecordingProxy::releaseRecordingFrame");
- mCamera->releaseRecordingFrame(mem);
-}
-
-void Camera::RecordingProxy::releaseRecordingFrameHandle(native_handle_t* handle) {
- ALOGV("RecordingProxy::releaseRecordingFrameHandle");
- mCamera->releaseRecordingFrameHandle(handle);
-}
-
-void Camera::RecordingProxy::releaseRecordingFrameHandleBatch(
- const std::vector<native_handle_t*>& handles) {
- ALOGV("RecordingProxy::releaseRecordingFrameHandleBatch");
- mCamera->releaseRecordingFrameHandleBatch(handles);
-}
-
Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
{
mCamera = camera;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 024311f..96ea5f2 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -22,6 +22,7 @@
#include <binder/Parcel.h>
#include <camera/CameraMetadata.h>
+#include <camera_metadata_hidden.h>
namespace android {
@@ -877,5 +878,8 @@
return OK;
}
+metadata_vendor_id_t CameraMetadata::getVendorId() {
+ return get_camera_metadata_vendor_id(mBuffer);
+}
}; // namespace android
diff --git a/camera/ICameraRecordingProxy.cpp b/camera/ICameraRecordingProxy.cpp
index bd6af75..97523a5 100644
--- a/camera/ICameraRecordingProxy.cpp
+++ b/camera/ICameraRecordingProxy.cpp
@@ -18,7 +18,6 @@
#define LOG_TAG "ICameraRecordingProxy"
#include <camera/CameraUtils.h>
#include <camera/ICameraRecordingProxy.h>
-#include <camera/ICameraRecordingProxyListener.h>
#include <binder/IMemory.h>
#include <binder/Parcel.h>
#include <media/hardware/HardwareAPI.h>
@@ -29,10 +28,7 @@
enum {
START_RECORDING = IBinder::FIRST_CALL_TRANSACTION,
- STOP_RECORDING,
- RELEASE_RECORDING_FRAME,
- RELEASE_RECORDING_FRAME_HANDLE,
- RELEASE_RECORDING_FRAME_HANDLE_BATCH,
+ STOP_RECORDING
};
@@ -44,12 +40,11 @@
{
}
- status_t startRecording(const sp<ICameraRecordingProxyListener>& listener)
+ status_t startRecording()
{
ALOGV("startRecording");
Parcel data, reply;
data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(listener));
remote()->transact(START_RECORDING, data, &reply);
return reply.readInt32();
}
@@ -61,46 +56,6 @@
data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
remote()->transact(STOP_RECORDING, data, &reply);
}
-
- void releaseRecordingFrame(const sp<IMemory>& mem)
- {
- ALOGV("releaseRecordingFrame");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(mem));
- remote()->transact(RELEASE_RECORDING_FRAME, data, &reply);
- }
-
- void releaseRecordingFrameHandle(native_handle_t *handle) {
- ALOGV("releaseRecordingFrameHandle");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
- data.writeNativeHandle(handle);
-
- remote()->transact(RELEASE_RECORDING_FRAME_HANDLE, data, &reply);
-
- // Close the native handle because camera received a dup copy.
- native_handle_close(handle);
- native_handle_delete(handle);
- }
-
- void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
- ALOGV("releaseRecordingFrameHandleBatch");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
- uint32_t n = handles.size();
- data.writeUint32(n);
- for (auto& handle : handles) {
- data.writeNativeHandle(handle);
- }
- remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
-
- // Close the native handle because camera received a dup copy.
- for (auto& handle : handles) {
- native_handle_close(handle);
- native_handle_delete(handle);
- }
- }
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
@@ -114,9 +69,7 @@
case START_RECORDING: {
ALOGV("START_RECORDING");
CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
- sp<ICameraRecordingProxyListener> listener =
- interface_cast<ICameraRecordingProxyListener>(data.readStrongBinder());
- reply->writeInt32(startRecording(listener));
+ reply->writeInt32(startRecording());
return NO_ERROR;
} break;
case STOP_RECORDING: {
@@ -125,46 +78,6 @@
stopRecording();
return NO_ERROR;
} break;
- case RELEASE_RECORDING_FRAME: {
- ALOGV("RELEASE_RECORDING_FRAME");
- CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
- sp<IMemory> mem = interface_cast<IMemory>(data.readStrongBinder());
- releaseRecordingFrame(mem);
- return NO_ERROR;
- } break;
- case RELEASE_RECORDING_FRAME_HANDLE: {
- ALOGV("RELEASE_RECORDING_FRAME_HANDLE");
- CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
-
- // releaseRecordingFrameHandle will be responsble to close the native handle.
- releaseRecordingFrameHandle(data.readNativeHandle());
- return NO_ERROR;
- } break;
- case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
- ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
- CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
- uint32_t n = 0;
- status_t res = data.readUint32(&n);
- if (res != OK) {
- ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
- return BAD_VALUE;
- }
- std::vector<native_handle_t*> handles;
- handles.reserve(n);
- for (uint32_t i = 0; i < n; i++) {
- native_handle_t* handle = data.readNativeHandle();
- if (handle == nullptr) {
- ALOGE("%s: Received a null native handle at handles[%d]",
- __FUNCTION__, i);
- return BAD_VALUE;
- }
- handles.push_back(handle);
- }
-
- // releaseRecordingFrameHandleBatch will be responsble to close the native handle.
- releaseRecordingFrameHandleBatch(handles);
- return NO_ERROR;
- } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
@@ -173,4 +86,3 @@
// ----------------------------------------------------------------------------
}; // namespace android
-
diff --git a/camera/ICameraRecordingProxyListener.cpp b/camera/ICameraRecordingProxyListener.cpp
deleted file mode 100644
index 66faf8f..0000000
--- a/camera/ICameraRecordingProxyListener.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ICameraRecordingProxyListener"
-#include <camera/CameraUtils.h>
-#include <camera/ICameraRecordingProxyListener.h>
-#include <binder/IMemory.h>
-#include <binder/Parcel.h>
-#include <media/hardware/HardwareAPI.h>
-#include <utils/Log.h>
-
-namespace android {
-
-enum {
- DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
- RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
- RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH
-};
-
-class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
-{
-public:
- explicit BpCameraRecordingProxyListener(const sp<IBinder>& impl)
- : BpInterface<ICameraRecordingProxyListener>(impl)
- {
- }
-
- void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& imageData)
- {
- ALOGV("dataCallback");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
- data.writeInt64(timestamp);
- data.writeInt32(msgType);
- data.writeStrongBinder(IInterface::asBinder(imageData));
- remote()->transact(DATA_CALLBACK_TIMESTAMP, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
- void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle) {
- ALOGV("recordingFrameHandleCallbackTimestamp");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
- data.writeInt64(timestamp);
- data.writeNativeHandle(handle);
- remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
- IBinder::FLAG_ONEWAY);
-
- // The native handle is dupped in ICameraClient so we need to free it here.
- native_handle_close(handle);
- native_handle_delete(handle);
- }
-
- void recordingFrameHandleCallbackTimestampBatch(
- const std::vector<nsecs_t>& timestamps,
- const std::vector<native_handle_t*>& handles) {
- ALOGV("recordingFrameHandleCallbackTimestampBatch");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
-
- uint32_t n = timestamps.size();
- if (n != handles.size()) {
- ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
- __FUNCTION__, timestamps.size(), handles.size());
- return;
- }
- data.writeUint32(n);
- for (auto ts : timestamps) {
- data.writeInt64(ts);
- }
- for (auto& handle : handles) {
- data.writeNativeHandle(handle);
- }
- remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
- IBinder::FLAG_ONEWAY);
-
- // The native handle is dupped in ICameraClient so we need to free it here.
- for (auto& handle : handles) {
- native_handle_close(handle);
- native_handle_delete(handle);
- }
- }
-};
-
-IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraRecordingProxyListener::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch(code) {
- case DATA_CALLBACK_TIMESTAMP: {
- ALOGV("DATA_CALLBACK_TIMESTAMP");
- CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
- nsecs_t timestamp = data.readInt64();
- int32_t msgType = data.readInt32();
- sp<IMemory> imageData = interface_cast<IMemory>(data.readStrongBinder());
- dataCallbackTimestamp(timestamp, msgType, imageData);
- return NO_ERROR;
- } break;
- case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP: {
- ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP");
- CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
- nsecs_t timestamp;
- status_t res = data.readInt64(×tamp);
- if (res != OK) {
- ALOGE("%s: Failed to read timestamp: %s (%d)", __FUNCTION__, strerror(-res), res);
- return BAD_VALUE;
- }
-
- native_handle_t* handle = data.readNativeHandle();
- if (handle == nullptr) {
- ALOGE("%s: Received a null native handle", __FUNCTION__);
- return BAD_VALUE;
- }
- // The native handle will be freed in
- // BpCameraRecordingProxy::releaseRecordingFrameHandle.
- recordingFrameHandleCallbackTimestamp(timestamp, handle);
- return NO_ERROR;
- } break;
- case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
- ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
- CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
- uint32_t n = 0;
- status_t res = data.readUint32(&n);
- if (res != OK) {
- ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
- return BAD_VALUE;
- }
- std::vector<nsecs_t> timestamps;
- std::vector<native_handle_t*> handles;
- timestamps.reserve(n);
- handles.reserve(n);
- for (uint32_t i = 0; i < n; i++) {
- nsecs_t t;
- res = data.readInt64(&t);
- if (res != OK) {
- ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
- __FUNCTION__, i, strerror(-res), res);
- return BAD_VALUE;
- }
- timestamps.push_back(t);
- }
- for (uint32_t i = 0; i < n; i++) {
- native_handle_t* handle = data.readNativeHandle();
- if (handle == nullptr) {
- ALOGE("%s: Received a null native handle at handles[%d]",
- __FUNCTION__, i);
- return BAD_VALUE;
- }
- handles.push_back(handle);
- }
- // The native handle will be freed in
- // BpCameraRecordingProxy::releaseRecordingFrameHandleBatch.
- recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
diff --git a/camera/TEST_MAPPING b/camera/TEST_MAPPING
new file mode 100644
index 0000000..683e183
--- /dev/null
+++ b/camera/TEST_MAPPING
@@ -0,0 +1,11 @@
+{
+ "postsubmit": [
+ {
+ "name": "CtsCameraTestCases"
+ },
+ {
+ "name": "CtsCameraTestCases",
+ "keywords": ["primary-device"]
+ }
+ ]
+}
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index d713d2d..24fa912 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -660,6 +660,16 @@
return sGlobalVendorTagDescriptorCache;
}
+bool VendorTagDescriptorCache::isVendorCachePresent(metadata_vendor_id_t vendorId) {
+ Mutex::Autolock al(sLock);
+ if ((sGlobalVendorTagDescriptorCache.get() != nullptr) &&
+ (sGlobalVendorTagDescriptorCache->getVendorIdsAndTagDescriptors().find(vendorId) !=
+ sGlobalVendorTagDescriptorCache->getVendorIdsAndTagDescriptors().end())) {
+ return true;
+ }
+ return false;
+}
+
extern "C" {
int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index ac7a35b..8af704d 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -69,7 +69,7 @@
/**
* Default UID/PID values for non-privileged callers of
- * connect(), connectDevice(), and connectLegacy()
+ * connect() and connectDevice()
*/
const int USE_CALLING_UID = -1;
const int USE_CALLING_PID = -1;
@@ -93,20 +93,6 @@
int clientUid);
/**
- * halVersion constant for connectLegacy
- */
- const int CAMERA_HAL_API_VERSION_UNSPECIFIED = -1;
-
- /**
- * Open a camera device in legacy mode, if supported by the camera module HAL.
- */
- ICamera connectLegacy(ICameraClient client,
- int cameraId,
- int halVersion,
- String opPackageName,
- int clientUid);
-
- /**
* Add listener for changes to camera device and flashlight state.
*
* Also returns the set of currently-known camera IDs and state of each device.
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 2cdb617..5579183 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -24,7 +24,6 @@
#include <gui/IGraphicBufferProducer.h>
#include <system/camera.h>
#include <camera/ICameraRecordingProxy.h>
-#include <camera/ICameraRecordingProxyListener.h>
#include <camera/android/hardware/ICamera.h>
#include <camera/android/hardware/ICameraClient.h>
#include <camera/CameraBase.h>
@@ -84,10 +83,6 @@
const String16& clientPackageName,
int clientUid, int clientPid);
- static status_t connectLegacy(int cameraId, int halVersion,
- const String16& clientPackageName,
- int clientUid, sp<Camera>& camera);
-
virtual ~Camera();
status_t reconnect();
@@ -154,7 +149,6 @@
status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
void setListener(const sp<CameraListener>& listener);
- void setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
// Configure preview callbacks to app. Only one of the older
// callbacks or the callback surface can be active at the same time;
@@ -187,12 +181,8 @@
explicit RecordingProxy(const sp<Camera>& camera);
// ICameraRecordingProxy interface
- virtual status_t startRecording(const sp<ICameraRecordingProxyListener>& listener);
+ virtual status_t startRecording();
virtual void stopRecording();
- virtual void releaseRecordingFrame(const sp<IMemory>& mem);
- virtual void releaseRecordingFrameHandle(native_handle_t* handle);
- virtual void releaseRecordingFrameHandleBatch(
- const std::vector<native_handle_t*>& handles);
private:
sp<Camera> mCamera;
@@ -203,8 +193,6 @@
Camera(const Camera&);
Camera& operator=(const Camera);
- sp<ICameraRecordingProxyListener> mRecordingProxyListener;
-
friend class CameraBase;
};
diff --git a/camera/include/camera/CameraMetadata.h b/camera/include/camera/CameraMetadata.h
index e883ffa..c56ee6d 100644
--- a/camera/include/camera/CameraMetadata.h
+++ b/camera/include/camera/CameraMetadata.h
@@ -242,6 +242,11 @@
static status_t getTagFromName(const char *name,
const VendorTagDescriptor* vTags, uint32_t *tag);
+ /**
+ * Return the current vendor tag id associated with this metadata.
+ */
+ metadata_vendor_id_t getVendorId();
+
private:
camera_metadata_t *mBuffer;
mutable bool mLocked;
diff --git a/camera/include/camera/ICameraRecordingProxy.h b/camera/include/camera/ICameraRecordingProxy.h
index 02af2f3..4306dc1 100644
--- a/camera/include/camera/ICameraRecordingProxy.h
+++ b/camera/include/camera/ICameraRecordingProxy.h
@@ -24,13 +24,11 @@
namespace android {
-class ICameraRecordingProxyListener;
-class IMemory;
class Parcel;
/*
- * The purpose of ICameraRecordingProxy and ICameraRecordingProxyListener is to
- * allow applications using the camera during recording.
+ * The purpose of ICameraRecordingProxy is to
+ * allow applications to use the camera during recording with the old camera API.
*
* Camera service allows only one client at a time. Since camcorder application
* needs to own the camera to do things like zoom, the media recorder cannot
@@ -42,35 +40,29 @@
* ICameraRecordingProxy
* startRecording()
* stopRecording()
- * releaseRecordingFrame()
*
- * ICameraRecordingProxyListener
- * dataCallbackTimestamp()
-
* The camcorder app opens the camera and starts the preview. The app passes
* ICamera and ICameraRecordingProxy to the media recorder by
* MediaRecorder::setCamera(). The recorder uses ICamera to setup the camera in
* MediaRecorder::start(). After setup, the recorder disconnects from camera
- * service. The recorder calls ICameraRecordingProxy::startRecording() and
- * passes a ICameraRecordingProxyListener to the app. The app connects back to
- * camera service and starts the recording. The app owns the camera and can do
- * things like zoom. The media recorder receives the video frames from the
- * listener and releases them by ICameraRecordingProxy::releaseRecordingFrame.
- * The recorder calls ICameraRecordingProxy::stopRecording() to stop the
- * recording.
+ * service. The recorder calls ICameraRecordingProxy::startRecording() and The
+ * app owns the camera and can do things like zoom. The media recorder receives
+ * the video frames via a buffer queue. The recorder calls
+ * ICameraRecordingProxy::stopRecording() to stop the recording.
*
* The call sequences are as follows:
* 1. The app: Camera.unlock().
* 2. The app: MediaRecorder.setCamera().
* 3. Start recording
* (1) The app: MediaRecorder.start().
- * (2) The recorder: ICamera.unlock() and ICamera.disconnect().
- * (3) The recorder: ICameraRecordingProxy.startRecording().
- * (4) The app: ICamera.reconnect().
- * (5) The app: ICamera.startRecording().
+ * (2) The recorder: ICamera.setVideoTarget(buffer queue).
+ * (3) The recorder: ICamera.unlock() and ICamera.disconnect().
+ * (4) The recorder: ICameraRecordingProxy.startRecording().
+ * (5) The app: ICamera.reconnect().
+ * (6) The app: ICamera.startRecording().
* 4. During recording
- * (1) The recorder: receive frames from ICameraRecordingProxyListener.dataCallbackTimestamp()
- * (2) The recorder: release frames by ICameraRecordingProxy.releaseRecordingFrame().
+ * (1) The recorder: receive frames via a buffer queue
+ * (2) The recorder: release frames via a buffer queue
* 5. Stop recording
* (1) The app: MediaRecorder.stop()
* (2) The recorder: ICameraRecordingProxy.stopRecording().
@@ -82,12 +74,8 @@
public:
DECLARE_META_INTERFACE(CameraRecordingProxy);
- virtual status_t startRecording(const sp<ICameraRecordingProxyListener>& listener) = 0;
+ virtual status_t startRecording() = 0;
virtual void stopRecording() = 0;
- virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
- virtual void releaseRecordingFrameHandle(native_handle_t *handle) = 0;
- virtual void releaseRecordingFrameHandleBatch(
- const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/include/camera/ICameraRecordingProxyListener.h b/camera/include/camera/ICameraRecordingProxyListener.h
deleted file mode 100644
index da03c56..0000000
--- a/camera/include/camera/ICameraRecordingProxyListener.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
-#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
-
-#include <vector>
-#include <binder/IInterface.h>
-#include <cutils/native_handle.h>
-#include <stdint.h>
-#include <utils/RefBase.h>
-#include <utils/Timers.h>
-
-namespace android {
-
-class Parcel;
-class IMemory;
-
-class ICameraRecordingProxyListener: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(CameraRecordingProxyListener);
-
- virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType,
- const sp<IMemory>& data) = 0;
-
- virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
- native_handle_t* handle) = 0;
-
- virtual void recordingFrameHandleCallbackTimestampBatch(
- const std::vector<nsecs_t>& timestamps,
- const std::vector<native_handle_t*>& handles) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraRecordingProxyListener: public BnInterface<ICameraRecordingProxyListener>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/camera/include/camera/VendorTagDescriptor.h b/camera/include/camera/VendorTagDescriptor.h
index b2fbf3a..b3440d5 100644
--- a/camera/include/camera/VendorTagDescriptor.h
+++ b/camera/include/camera/VendorTagDescriptor.h
@@ -249,6 +249,12 @@
*/
static void clearGlobalVendorTagCache();
+ /**
+ * Return true if given vendor id is present in the vendor tag caches, return
+ * false otherwise.
+ */
+ static bool isVendorCachePresent(metadata_vendor_id_t vendorId);
+
};
} /* namespace android */
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index bfa60d9..631f6cd 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -527,6 +527,7 @@
case ACAMERA_LENS_OPTICAL_STABILIZATION_MODE:
case ACAMERA_NOISE_REDUCTION_MODE:
case ACAMERA_SCALER_CROP_REGION:
+ case ACAMERA_SCALER_ROTATE_AND_CROP:
case ACAMERA_SENSOR_EXPOSURE_TIME:
case ACAMERA_SENSOR_FRAME_DURATION:
case ACAMERA_SENSOR_SENSITIVITY:
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 1354fce..a2c2ca7 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3736,6 +3736,108 @@
ACAMERA_SCALER_AVAILABLE_RECOMMENDED_INPUT_OUTPUT_FORMATS_MAP =
// int32
ACAMERA_SCALER_START + 15,
+ /**
+ * <p>List of rotate-and-crop modes for ACAMERA_SCALER_ROTATE_AND_CROP that are supported by this camera device.</p>
+ *
+ * @see ACAMERA_SCALER_ROTATE_AND_CROP
+ *
+ * <p>Type: byte[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This entry lists the valid modes for ACAMERA_SCALER_ROTATE_AND_CROP for this camera device.</p>
+ * <p>Starting with API level 30, all devices will list at least <code>ROTATE_AND_CROP_NONE</code>.
+ * Devices with support for rotate-and-crop will additionally list at least
+ * <code>ROTATE_AND_CROP_AUTO</code> and <code>ROTATE_AND_CROP_90</code>.</p>
+ *
+ * @see ACAMERA_SCALER_ROTATE_AND_CROP
+ */
+ ACAMERA_SCALER_AVAILABLE_ROTATE_AND_CROP_MODES = // byte[n]
+ ACAMERA_SCALER_START + 16,
+ /**
+ * <p>Whether a rotation-and-crop operation is applied to processed
+ * outputs from the camera.</p>
+ *
+ * <p>Type: byte (acamera_metadata_enum_android_scaler_rotate_and_crop_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraCaptureSession_captureCallback_result callbacks</li>
+ * <li>ACaptureRequest</li>
+ * </ul></p>
+ *
+ * <p>This control is primarily intended to help camera applications with no support for
+ * multi-window modes to work correctly on devices where multi-window scenarios are
+ * unavoidable, such as foldables or other devices with variable display geometry or more
+ * free-form window placement (such as laptops, which often place portrait-orientation apps
+ * in landscape with pillarboxing).</p>
+ * <p>If supported, the default value is <code>ROTATE_AND_CROP_AUTO</code>, which allows the camera API
+ * to enable backwards-compatibility support for applications that do not support resizing
+ * / multi-window modes, when the device is in fact in a multi-window mode (such as inset
+ * portrait on laptops, or on a foldable device in some fold states). In addition,
+ * <code>ROTATE_AND_CROP_NONE</code> and <code>ROTATE_AND_CROP_90</code> will always be available if this control
+ * is supported by the device. If not supported, devices API level 30 or higher will always
+ * list only <code>ROTATE_AND_CROP_NONE</code>.</p>
+ * <p>When <code>CROP_AUTO</code> is in use, and the camera API activates backward-compatibility mode,
+ * several metadata fields will also be parsed differently to ensure that coordinates are
+ * correctly handled for features like drawing face detection boxes or passing in
+ * tap-to-focus coordinates. The camera API will convert positions in the active array
+ * coordinate system to/from the cropped-and-rotated coordinate system to make the
+ * operation transparent for applications. The following controls are affected:</p>
+ * <ul>
+ * <li>ACAMERA_CONTROL_AE_REGIONS</li>
+ * <li>ACAMERA_CONTROL_AF_REGIONS</li>
+ * <li>ACAMERA_CONTROL_AWB_REGIONS</li>
+ * <li>android.statistics.faces</li>
+ * </ul>
+ * <p>Capture results will contain the actual value selected by the API;
+ * <code>ROTATE_AND_CROP_AUTO</code> will never be seen in a capture result.</p>
+ * <p>Applications can also select their preferred cropping mode, either to opt out of the
+ * backwards-compatibility treatment, or to use the cropping feature themselves as needed.
+ * In this case, no coordinate translation will be done automatically, and all controls
+ * will continue to use the normal active array coordinates.</p>
+ * <p>Cropping and rotating is done after the application of digital zoom (via either
+ * ACAMERA_SCALER_CROP_REGION or ACAMERA_CONTROL_ZOOM_RATIO), but before each individual
+ * output is further cropped and scaled. It only affects processed outputs such as
+ * YUV, PRIVATE, and JPEG. It has no effect on RAW outputs.</p>
+ * <p>When <code>CROP_90</code> or <code>CROP_270</code> are selected, there is a significant loss to the field of
+ * view. For example, with a 4:3 aspect ratio output of 1600x1200, <code>CROP_90</code> will still
+ * produce 1600x1200 output, but these buffers are cropped from a vertical 3:4 slice at the
+ * center of the 4:3 area, then rotated to be 4:3, and then upscaled to 1600x1200. Only
+ * 56.25% of the original FOV is still visible. In general, for an aspect ratio of <code>w:h</code>,
+ * the crop and rotate operation leaves <code>(h/w)^2</code> of the field of view visible. For 16:9,
+ * this is ~31.6%.</p>
+ * <p>As a visual example, the figure below shows the effect of <code>ROTATE_AND_CROP_90</code> on the
+ * outputs for the following parameters:</p>
+ * <ul>
+ * <li>Sensor active array: <code>2000x1500</code></li>
+ * <li>Crop region: top-left: <code>(500, 375)</code>, size: <code>(1000, 750)</code> (4:3 aspect ratio)</li>
+ * <li>Output streams: YUV <code>640x480</code> and YUV <code>1280x720</code></li>
+ * <li><code>ROTATE_AND_CROP_90</code></li>
+ * </ul>
+ * <p><img alt="Effect of ROTATE_AND_CROP_90" src="../images/camera2/metadata/android.scaler.rotateAndCrop/crop-region-rotate-90-43-ratio.png" /></p>
+ * <p>With these settings, the regions of the active array covered by the output streams are:</p>
+ * <ul>
+ * <li>640x480 stream crop: top-left: <code>(219, 375)</code>, size: <code>(562, 750)</code></li>
+ * <li>1280x720 stream crop: top-left: <code>(289, 375)</code>, size: <code>(422, 750)</code></li>
+ * </ul>
+ * <p>Since the buffers are rotated, the buffers as seen by the application are:</p>
+ * <ul>
+ * <li>640x480 stream: top-left: <code>(781, 375)</code> on active array, size: <code>(640, 480)</code>, downscaled 1.17x from sensor pixels</li>
+ * <li>1280x720 stream: top-left: <code>(711, 375)</code> on active array, size: <code>(1280, 720)</code>, upscaled 1.71x from sensor pixels</li>
+ * </ul>
+ *
+ * @see ACAMERA_CONTROL_AE_REGIONS
+ * @see ACAMERA_CONTROL_AF_REGIONS
+ * @see ACAMERA_CONTROL_AWB_REGIONS
+ * @see ACAMERA_CONTROL_ZOOM_RATIO
+ * @see ACAMERA_SCALER_CROP_REGION
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP = // byte (acamera_metadata_enum_android_scaler_rotate_and_crop_t)
+ ACAMERA_SCALER_START + 17,
ACAMERA_SCALER_END,
/**
@@ -8298,6 +8400,51 @@
} acamera_metadata_enum_android_scaler_available_recommended_stream_configurations_t;
+// ACAMERA_SCALER_ROTATE_AND_CROP
+typedef enum acamera_metadata_enum_acamera_scaler_rotate_and_crop {
+ /**
+ * <p>No rotate and crop is applied. Processed outputs are in the sensor orientation.</p>
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP_NONE = 0,
+
+ /**
+ * <p>Processed images are rotated by 90 degrees clockwise, and then cropped
+ * to the original aspect ratio.</p>
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP_90 = 1,
+
+ /**
+ * <p>Processed images are rotated by 180 degrees. Since the aspect ratio does not
+ * change, no cropping is performed.</p>
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP_180 = 2,
+
+ /**
+ * <p>Processed images are rotated by 270 degrees clockwise, and then cropped
+ * to the original aspect ratio.</p>
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP_270 = 3,
+
+ /**
+ * <p>The camera API automatically selects the best concrete value for
+ * rotate-and-crop based on the application's support for resizability and the current
+ * multi-window mode.</p>
+ * <p>If the application does not support resizing but the display mode for its main
+ * Activity is not in a typical orientation, the camera API will set <code>ROTATE_AND_CROP_90</code>
+ * or some other supported rotation value, depending on device configuration,
+ * to ensure preview and captured images are correctly shown to the user. Otherwise,
+ * <code>ROTATE_AND_CROP_NONE</code> will be selected.</p>
+ * <p>When a value other than NONE is selected, several metadata fields will also be parsed
+ * differently to ensure that coordinates are correctly handled for features like drawing
+ * face detection boxes or passing in tap-to-focus coordinates. The camera API will
+ * convert positions in the active array coordinate system to/from the cropped-and-rotated
+ * coordinate system to make the operation transparent for applications.</p>
+ * <p>No coordinate mapping will be done when the application selects a non-AUTO mode.</p>
+ */
+ ACAMERA_SCALER_ROTATE_AND_CROP_AUTO = 4,
+
+} acamera_metadata_enum_android_scaler_rotate_and_crop_t;
+
// ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
typedef enum acamera_metadata_enum_acamera_sensor_reference_illuminant1 {
diff --git a/camera/ndk/include/camera/NdkCameraWindowType.h b/camera/ndk/include/camera/NdkCameraWindowType.h
index 99f67e9..df977da 100644
--- a/camera/ndk/include/camera/NdkCameraWindowType.h
+++ b/camera/ndk/include/camera/NdkCameraWindowType.h
@@ -44,7 +44,7 @@
*/
#ifdef __ANDROID_VNDK__
#include <cutils/native_handle.h>
-typedef native_handle_t ACameraWindowType;
+typedef const native_handle_t ACameraWindowType;
#else
#include <android/native_window.h>
typedef ANativeWindow ACameraWindowType;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
index e1af8c1..5a1af79 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraCaptureSessionVendor.h
@@ -18,7 +18,7 @@
#include "utils.h"
struct ACaptureSessionOutput {
- explicit ACaptureSessionOutput(native_handle_t* window, bool isShared = false,
+ explicit ACaptureSessionOutput(const native_handle_t* window, bool isShared = false,
const char* physicalCameraId = "") :
mWindow(window), mIsShared(isShared), mPhysicalCameraId(physicalCameraId) {};
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index e511a3f..0fcb700 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -355,7 +355,7 @@
std::vector<int32_t> requestStreamIdxList;
std::vector<int32_t> requestSurfaceIdxList;
for (auto outputTarget : request->targets->mOutputs) {
- native_handle_t* anw = outputTarget.mWindow;
+ const native_handle_t* anw = outputTarget.mWindow;
bool found = false;
req->mSurfaceList.push_back(anw);
// lookup stream/surface ID
@@ -434,7 +434,7 @@
}
pRequest->targets = new ACameraOutputTargets();
for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
- native_handle_t* anw = req->mSurfaceList[i];
+ const native_handle_t* anw = req->mSurfaceList[i];
ACameraOutputTarget outputTarget(anw);
pRequest->targets->mOutputs.insert(outputTarget);
}
@@ -611,7 +611,7 @@
std::set<std::pair<native_handle_ptr_wrapper, OutputConfigurationWrapper>> outputSet;
for (auto outConfig : outputs->mOutputs) {
- native_handle_t* anw = outConfig.mWindow;
+ const native_handle_t* anw = outConfig.mWindow;
OutputConfigurationWrapper outConfigInsertW;
OutputConfiguration &outConfigInsert = outConfigInsertW.mOutputConfiguration;
outConfigInsert.rotation = utils::convertToHidl(outConfig.mRotation);
@@ -846,8 +846,7 @@
for (auto streamAndWindowId : request->mCaptureRequest.streamAndWindowIds) {
int32_t windowId = streamAndWindowId.windowId;
if (utils::isWindowNativeHandleEqual(windowHandles[windowId],outHandle)) {
- native_handle_t* anw =
- const_cast<native_handle_t *>(windowHandles[windowId].getNativeHandle());
+ const native_handle_t* anw = windowHandles[windowId].getNativeHandle();
ALOGV("Camera %s Lost output buffer for ANW %p frame %" PRId64,
getId(), anw, frameNumber);
@@ -1244,7 +1243,7 @@
return;
}
- native_handle_t* anw;
+ const native_handle_t* anw;
found = msg->findPointer(kAnwKey, (void**) &anw);
if (!found) {
ALOGE("%s: Cannot find native_handle_t!", __FUNCTION__);
diff --git a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h b/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
index ed67615..5715d77 100644
--- a/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
+++ b/camera/ndk/ndk_vendor/impl/ACaptureRequestVendor.h
@@ -17,7 +17,7 @@
#include "utils.h"
struct ACameraOutputTarget {
- explicit ACameraOutputTarget(native_handle_t* window) : mWindow(window) {};
+ explicit ACameraOutputTarget(const native_handle_t* window) : mWindow(window) {};
bool operator == (const ACameraOutputTarget& other) const {
return mWindow == other.mWindow;
diff --git a/camera/ndk/ndk_vendor/impl/utils.h b/camera/ndk/ndk_vendor/impl/utils.h
index f389f03..6f5820e 100644
--- a/camera/ndk/ndk_vendor/impl/utils.h
+++ b/camera/ndk/ndk_vendor/impl/utils.h
@@ -42,7 +42,7 @@
// Utility class so that CaptureRequest can be stored by sp<>
struct CaptureRequest : public RefBase {
frameworks::cameraservice::device::V2_0::CaptureRequest mCaptureRequest;
- std::vector<native_handle_t *> mSurfaceList;
+ std::vector<const native_handle_t *> mSurfaceList;
//Physical camera settings metadata is stored here, since the capture request
//might not contain it. That's since, fmq might have consumed it.
hidl_vec<PhysicalCameraSettings> mPhysicalCameraSettings;
@@ -62,13 +62,13 @@
// Utility class so the native_handle_t can be compared with its contents instead
// of just raw pointer comparisons.
struct native_handle_ptr_wrapper {
- native_handle_t *mWindow = nullptr;
+ const native_handle_t *mWindow = nullptr;
- native_handle_ptr_wrapper(native_handle_t *nh) : mWindow(nh) { }
+ native_handle_ptr_wrapper(const native_handle_t *nh) : mWindow(nh) { }
native_handle_ptr_wrapper() = default;
- operator native_handle_t *() const { return mWindow; }
+ operator const native_handle_t *() const { return mWindow; }
bool operator ==(const native_handle_ptr_wrapper other) const {
return isWindowNativeHandleEqual(mWindow, other.mWindow);
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 938b5f5..ba14c5c 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -50,7 +50,7 @@
static constexpr int kTestImageFormat = AIMAGE_FORMAT_YUV_420_888;
using android::hardware::camera::common::V1_0::helper::VendorTagDescriptorCache;
-using ConfiguredWindows = std::set<native_handle_t *>;
+using ConfiguredWindows = std::set<const native_handle_t *>;
class CameraHelper {
public:
@@ -60,11 +60,11 @@
struct PhysicalImgReaderInfo {
const char* physicalCameraId;
- native_handle_t* anw;
+ const native_handle_t* anw;
};
// Retaining the error code in case the caller needs to analyze it.
- std::variant<int, ConfiguredWindows> initCamera(native_handle_t* imgReaderAnw,
+ std::variant<int, ConfiguredWindows> initCamera(const native_handle_t* imgReaderAnw,
const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
bool usePhysicalSettings) {
ConfiguredWindows configuredWindows;
@@ -257,7 +257,7 @@
ACameraDevice_StateCallbacks mDeviceCb{this, nullptr, nullptr};
ACameraCaptureSession_stateCallbacks mSessionCb{ this, nullptr, nullptr, nullptr};
- native_handle_t* mImgReaderAnw = nullptr; // not owned by us.
+ const native_handle_t* mImgReaderAnw = nullptr; // not owned by us.
// Camera device
ACameraDevice* mDevice = nullptr;
@@ -396,7 +396,7 @@
return 0;
}
- native_handle_t* getNativeWindow() { return mImgReaderAnw; }
+ const native_handle_t* getNativeWindow() { return mImgReaderAnw; }
int getAcquiredImageCount() {
std::lock_guard<std::mutex> lock(mMutex);
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index f4fb626..b31a58b 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -273,14 +273,11 @@
SurfaceComposerClient::Transaction& t,
const sp<IBinder>& dpy,
const ui::DisplayState& displayState) {
- const ui::Size& viewport = displayState.viewport;
-
- // Set the region of the layer stack we're interested in, which in our
- // case is "all of it".
- Rect layerStackRect(viewport);
+ // Set the region of the layer stack we're interested in, which in our case is "all of it".
+ Rect layerStackRect(displayState.layerStackSpaceRect);
// We need to preserve the aspect ratio of the display.
- float displayAspect = viewport.getHeight() / static_cast<float>(viewport.getWidth());
+ float displayAspect = layerStackRect.getHeight() / static_cast<float>(layerStackRect.getWidth());
// Set the way we map the output onto the display surface (which will
@@ -699,20 +696,21 @@
return err;
}
- const ui::Size& viewport = displayState.viewport;
+ const ui::Size& layerStackSpaceRect = displayState.layerStackSpaceRect;
if (gVerbose) {
printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
- viewport.getWidth(), viewport.getHeight(), displayConfig.refreshRate,
- toCString(displayState.orientation), displayState.layerStack);
+ layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
+ displayConfig.refreshRate, toCString(displayState.orientation),
+ displayState.layerStack);
fflush(stdout);
}
// Encoder can't take odd number as config
if (gVideoWidth == 0) {
- gVideoWidth = floorToEven(viewport.getWidth());
+ gVideoWidth = floorToEven(layerStackSpaceRect.getWidth());
}
if (gVideoHeight == 0) {
- gVideoHeight = floorToEven(viewport.getHeight());
+ gVideoHeight = floorToEven(layerStackSpaceRect.getHeight());
}
// Configure and start the encoder.
@@ -1170,14 +1168,14 @@
}
break;
case 'd':
- gPhysicalDisplayId = atoll(optarg);
- if (gPhysicalDisplayId == 0) {
+ gPhysicalDisplayId = PhysicalDisplayId(atoll(optarg));
+ if (gPhysicalDisplayId.value == 0) {
fprintf(stderr, "Please specify a valid physical display id\n");
return 2;
} else if (SurfaceComposerClient::
getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
- fprintf(stderr, "Invalid physical display id: %"
- ANDROID_PHYSICAL_DISPLAY_ID_FORMAT "\n", gPhysicalDisplayId);
+ fprintf(stderr, "Invalid physical display id: %s\n",
+ to_string(gPhysicalDisplayId).c_str());
return 2;
}
break;
diff --git a/cmds/stagefright/AudioPlayer.cpp b/cmds/stagefright/AudioPlayer.cpp
index eb76953..55427ca 100644
--- a/cmds/stagefright/AudioPlayer.cpp
+++ b/cmds/stagefright/AudioPlayer.cpp
@@ -134,15 +134,18 @@
success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);
- int32_t numChannels, channelMask;
+ int32_t numChannels;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
- if(!format->findInt32(kKeyChannelMask, &channelMask)) {
+ audio_channel_mask_t channelMask;
+ if (int32_t rawChannelMask; !format->findInt32(kKeyChannelMask, &rawChannelMask)) {
// log only when there's a risk of ambiguity of channel mask selection
ALOGI_IF(numChannels > 2,
"source format didn't specify channel mask, using (%d) channel order", numChannels);
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ } else {
+ channelMask = static_cast<audio_channel_mask_t>(rawChannelMask);
}
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 37091c4..098c278 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -259,31 +259,6 @@
printf("$\n");
#endif
-#if 0
- CameraSource *source = CameraSource::Create(
- String16(argv[0], strlen(argv[0])));
- source->start();
-
- printf("source = %p\n", source);
-
- for (int i = 0; i < 100; ++i) {
- MediaBuffer *buffer;
- status_t err = source->read(&buffer);
- CHECK_EQ(err, (status_t)OK);
-
- printf("got a frame, data=%p, size=%d\n",
- buffer->data(), buffer->range_length());
-
- buffer->release();
- buffer = NULL;
- }
-
- err = source->stop();
-
- delete source;
- source = NULL;
-#endif
-
if (err != OK && err != ERROR_END_OF_STREAM) {
fprintf(stderr, "record failed: %d\n", err);
return 1;
diff --git a/drm/TEST_MAPPING b/drm/TEST_MAPPING
index 2595e3e..9f6a532 100644
--- a/drm/TEST_MAPPING
+++ b/drm/TEST_MAPPING
@@ -9,17 +9,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/include/drm/TEST_MAPPING b/include/drm/TEST_MAPPING
index 28e432e..512e844 100644
--- a/include/drm/TEST_MAPPING
+++ b/include/drm/TEST_MAPPING
@@ -8,17 +8,9 @@
},
{
"include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
}
]
}
diff --git a/include/media/MmapStreamInterface.h b/include/media/MmapStreamInterface.h
index b3bf16d..61de987 100644
--- a/include/media/MmapStreamInterface.h
+++ b/include/media/MmapStreamInterface.h
@@ -22,6 +22,8 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include <time.h>
+
namespace android {
class MmapStreamCallback;
@@ -103,6 +105,19 @@
virtual status_t getMmapPosition(struct audio_mmap_position *position) = 0;
/**
+ * Get a recent count of the number of audio frames presented/received to/from an
+ * external observer.
+ *
+ * \param[out] position count of presented audio frames
+ * \param[out] timeNanos associated clock time
+ *
+ * \return OK if the external position is set correctly.
+ * NO_INIT in case of initialization error
+ * INVALID_OPERATION if the interface is not implemented
+ */
+ virtual status_t getExternalPosition(uint64_t* position, int64_t* timeNanos) = 0;
+
+ /**
* Start a stream operating in mmap mode.
* createMmapBuffer() must be called before calling start()
*
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index a6dfb21..50facfb 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -1,32 +1,63 @@
+// for frameworks/av/media
{
- "presubmit": [
- {
- "name": "GtsMediaTestCases",
- "options" : [
+ "presubmit": [
+ // runs whenever we change something in this tree
{
- "include-annotation": "android.platform.test.annotations.Presubmit"
+ "name": "CtsMediaTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.cts.EncodeDecodeTest"
+ }
+ ]
},
{
- "include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
- }
- ]
- },
- {
- "name": "GtsExoPlayerTestCases",
- "options" : [
- {
- "include-annotation": "android.platform.test.annotations.SocPresubmit"
+ "name": "CtsMediaTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.cts.DecodeEditEncodeTest"
+ }
+ ]
},
{
- "include-filter": "com.google.android.exoplayer.gts.DashTest#testWidevine23FpsH264Fixed"
+ "name": "GtsMediaTestCases",
+ "options" : [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
+ },
+ {
+ "include-filter": "com.google.android.media.gts.WidevineYouTubePerformanceTests"
+ }
+ ]
}
- ]
- }
- ],
- "imports": [
- {
- "path": "frameworks/av/drm/mediadrm/plugins"
- }
- ]
-}
+ ],
+ "imports": [
+ {
+ "path": "frameworks/av/drm/mediadrm/plugins"
+ }
+ ],
+
+ "platinum-postsubmit": [
+ // runs regularly, independent of changes in this tree.
+ // signals if changes elsewhere break media functionality
+ {
+ "name": "CtsMediaTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.cts.EncodeDecodeTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.cts.DecodeEditEncodeTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/media/audioserver/Android.bp b/media/audioserver/Android.bp
new file mode 100644
index 0000000..ca3c81c
--- /dev/null
+++ b/media/audioserver/Android.bp
@@ -0,0 +1,58 @@
+cc_binary {
+ name: "audioserver",
+
+ srcs: [
+ "main_audioserver.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+
+ header_libs: [
+ "libaudiohal_headers",
+ "libmediametrics_headers",
+ ],
+
+ shared_libs: [
+ "libaaudioservice",
+ "libaudioflinger",
+ "libaudiopolicyservice",
+ "libaudioprocessing",
+ "libbinder",
+ "libcutils",
+ "libhidlbase",
+ "liblog",
+ "libmedia",
+ "libmedialogservice",
+ "libmediautils",
+ "libnbaio",
+ "libnblog",
+ "libpowermanager",
+ "libutils",
+ "libvibrator",
+
+ ],
+
+ // TODO check if we still need all of these include directories
+ include_dirs: [
+ "external/sonic",
+ "frameworks/av/media/libaaudio/include",
+ "frameworks/av/media/libaaudio/src",
+ "frameworks/av/media/libaaudio/src/binding",
+ "frameworks/av/media/libmedia/include",
+ "frameworks/av/services/audioflinger",
+ "frameworks/av/services/audiopolicy",
+ "frameworks/av/services/audiopolicy/common/include",
+ "frameworks/av/services/audiopolicy/common/managerdefinitions/include",
+ "frameworks/av/services/audiopolicy/engine/interface",
+ "frameworks/av/services/audiopolicy/service",
+ "frameworks/av/services/medialog",
+
+ // TODO oboeservice is the old folder name for aaudioservice. It will be changed.
+ "frameworks/av/services/oboeservice",
+ ],
+
+ init_rc: ["audioserver.rc"],
+}
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
deleted file mode 100644
index cf1c14c..0000000
--- a/media/audioserver/Android.mk
+++ /dev/null
@@ -1,51 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- main_audioserver.cpp \
-
-LOCAL_SHARED_LIBRARIES := \
- libaaudioservice \
- libaudioflinger \
- libaudiopolicyservice \
- libaudioprocessing \
- libbinder \
- libcutils \
- liblog \
- libhidlbase \
- libmedia \
- libmedialogservice \
- libmediautils \
- libnbaio \
- libnblog \
- libutils \
- libvibrator
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiohal_headers \
- libmediametrics_headers \
-
-# TODO oboeservice is the old folder name for aaudioservice. It will be changed.
-LOCAL_C_INCLUDES := \
- frameworks/av/services/audioflinger \
- frameworks/av/services/audiopolicy \
- frameworks/av/services/audiopolicy/common/managerdefinitions/include \
- frameworks/av/services/audiopolicy/common/include \
- frameworks/av/services/audiopolicy/engine/interface \
- frameworks/av/services/audiopolicy/service \
- frameworks/av/services/medialog \
- frameworks/av/services/oboeservice \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/src/binding \
- frameworks/av/media/libmedia/include \
- external/sonic \
-
-LOCAL_MODULE := audioserver
-
-LOCAL_INIT_RC := audioserver.rc
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_EXECUTABLE)
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index f75e4c7..c4a6601 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -8,6 +8,7 @@
task_profiles ProcessCapacityHigh HighPerformance
onrestart restart vendor.audio-hal
onrestart restart vendor.audio-hal-4-0-msd
+ onrestart restart audio_proxy_service
# Keep the original service names for backward compatibility
onrestart restart vendor.audio-hal-2-0
onrestart restart audio-hal-2-0
@@ -20,6 +21,7 @@
on property:init.svc.audioserver=stopped
stop vendor.audio-hal
stop vendor.audio-hal-4-0-msd
+ stop audio_proxy_service
# Keep the original service names for backward compatibility
stop vendor.audio-hal-2-0
stop audio-hal-2-0
@@ -28,6 +30,7 @@
# audioserver bringing it back into running state.
start vendor.audio-hal
start vendor.audio-hal-4-0-msd
+ start audio_proxy_service
# Keep the original service names for backward compatibility
start vendor.audio-hal-2-0
start audio-hal-2-0
@@ -35,6 +38,7 @@
on property:init.svc.audioserver=running
start vendor.audio-hal
start vendor.audio-hal-4-0-msd
+ start audio_proxy_service
# Keep the original service names for backward compatibility
start vendor.audio-hal-2-0
start audio-hal-2-0
@@ -44,10 +48,12 @@
# Keep the original service names for backward compatibility
stop vendor.audio-hal
stop vendor.audio-hal-4-0-msd
+ stop audio_proxy_service
stop vendor.audio-hal-2-0
stop audio-hal-2-0
start vendor.audio-hal
start vendor.audio-hal-4-0-msd
+ start audio_proxy_service
start vendor.audio-hal-2-0
start audio-hal-2-0
# reset the property
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 17309dd..8ee1efb 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -29,8 +29,8 @@
#include <mediautils/LimitProcessMemory.h>
#include <utils/Log.h>
-// from LOCAL_C_INCLUDES
-#include "aaudio/AAudioTesting.h"
+// from include_dirs
+#include "aaudio/AAudioTesting.h" // aaudio_policy_t, AAUDIO_PROP_MMAP_POLICY, AAUDIO_POLICY_*
#include "AudioFlinger.h"
#include "AudioPolicyService.h"
#include "AAudioService.h"
diff --git a/media/bufferpool/1.0/TEST_MAPPING b/media/bufferpool/1.0/TEST_MAPPING
new file mode 100644
index 0000000..a1e6a58
--- /dev/null
+++ b/media/bufferpool/1.0/TEST_MAPPING
@@ -0,0 +1,8 @@
+// mappings for frameworks/av/media/bufferpool/1.0
+{
+ "presubmit": [
+
+ { "name": "VtsVndkHidlBufferpoolV1_0TargetSingleTest" },
+ { "name": "VtsVndkHidlBufferpoolV1_0TargetMultiTest"}
+ ]
+}
diff --git a/media/bufferpool/1.0/vts/Android.bp b/media/bufferpool/1.0/vts/Android.bp
index ee5a757..691ed40 100644
--- a/media/bufferpool/1.0/vts/Android.bp
+++ b/media/bufferpool/1.0/vts/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "VtsVndkHidlBufferpoolV1_0TargetSingleTest",
+ test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
@@ -34,6 +35,7 @@
cc_test {
name: "VtsVndkHidlBufferpoolV1_0TargetMultiTest",
+ test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
diff --git a/media/bufferpool/2.0/TEST_MAPPING b/media/bufferpool/2.0/TEST_MAPPING
new file mode 100644
index 0000000..65dee2c
--- /dev/null
+++ b/media/bufferpool/2.0/TEST_MAPPING
@@ -0,0 +1,7 @@
+// mappings for frameworks/av/media/bufferpool/2.0
+{
+ "presubmit": [
+ { "name": "VtsVndkHidlBufferpoolV2_0TargetSingleTest"},
+ { "name": "VtsVndkHidlBufferpoolV2_0TargetMultiTest"}
+ ]
+}
diff --git a/media/bufferpool/2.0/tests/Android.bp b/media/bufferpool/2.0/tests/Android.bp
index 8b44f61..8492939 100644
--- a/media/bufferpool/2.0/tests/Android.bp
+++ b/media/bufferpool/2.0/tests/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "VtsVndkHidlBufferpoolV2_0TargetSingleTest",
+ test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
@@ -34,6 +35,7 @@
cc_test {
name: "VtsVndkHidlBufferpoolV2_0TargetMultiTest",
+ test_suites: ["device-tests"],
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"allocator.cpp",
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 8afa1a8..fca3477 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -1,5 +1,10 @@
{
"presubmit": [
+ // TODO failing 4 of 13
+ // { "name": "codec2_core_param_test"},
+ // TODO(b/155516524)
+ // { "name": "codec2_vndk_interface_test"},
+ { "name": "codec2_vndk_test"},
{
"name": "CtsMediaTestCases",
"options": [
diff --git a/media/codec2/components/aac/DrcPresModeWrap.cpp b/media/codec2/components/aac/DrcPresModeWrap.cpp
index bee969b..7ce5c9d 100644
--- a/media/codec2/components/aac/DrcPresModeWrap.cpp
+++ b/media/codec2/components/aac/DrcPresModeWrap.cpp
@@ -161,7 +161,7 @@
int newHeavy = mDesHeavy;
if (mDataUpdate) {
- // sanity check
+ // Validation check
if ((mDesTarget < MAX_TARGET_LEVEL) && (mDesTarget != -1)){
mDesTarget = MAX_TARGET_LEVEL; // limit target level to -10 dB or below
newTarget = MAX_TARGET_LEVEL;
@@ -217,7 +217,7 @@
}
else { // handle other used encoder target levels
- // Sanity check: DRC presentation mode is only specified for max. 5.1 channels
+ // Validation check: DRC presentation mode is only specified for max. 5.1 channels
if (mStreamNrAACChan > 6) {
drcPresMode = 0;
}
@@ -308,7 +308,7 @@
} // switch()
} // if (mEncoderTarget == GPM_ENCODER_TARGET_LEVEL)
- // sanity again
+ // Validation check again
if (newHeavy == 1) {
newBoostFactor=127; // not really needed as the same would be done by the decoder anyway
newAttFactor = 127;
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 408db7e..72910c5 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -262,9 +262,10 @@
work->result = C2_NO_MEMORY;
return;
}
- C2WriteView wView = mOutputBlock->map().get();
- if (wView.error()) {
- ALOGE("write view map failed %d", wView.error());
+
+ err = mOutputBlock->map().get().error();
+ if (err) {
+ ALOGE("write view map failed %d", err);
work->result = C2_CORRUPTED;
return;
}
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index a8b5377..d3b6e31 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -359,6 +359,10 @@
}
memcpy(&numPageFrames, data + inSize - sizeof(numPageFrames), sizeof(numPageFrames));
inSize -= sizeof(numPageFrames);
+ if (inSize == 0) {
+ // empty buffer, ignore
+ return;
+ }
if (numPageFrames >= 0) {
mNumFramesLeftOnPage = numPageFrames;
}
@@ -409,7 +413,7 @@
mState, reinterpret_cast<int16_t *> (wView.data()),
kMaxNumSamplesPerChannel);
if (numFrames < 0) {
- ALOGD("vorbis_dsp_pcmout returned %d", numFrames);
+ ALOGD("vorbis_dsp_pcmout returned %d frames", numFrames);
numFrames = 0;
}
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index ba19565..97145c3 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1721,7 +1721,13 @@
}
}
- if (notifyClient && !buffer && !flags) {
+ bool drop = false;
+ if (worklet->output.flags & C2FrameData::FLAG_DROP_FRAME) {
+ ALOGV("[%s] onWorkDone: drop buffer but keep metadata", mName);
+ drop = true;
+ }
+
+ if (notifyClient && !buffer && !flags && !(drop && outputFormat)) {
ALOGV("[%s] onWorkDone: Not reporting output buffer (%lld)",
mName, work->input.ordinal.frameIndex.peekull());
notifyClient = false;
@@ -1748,7 +1754,7 @@
return false;
}
output->buffers->pushToStash(
- buffer,
+ drop ? nullptr : buffer,
notifyClient,
timestamp.peek(),
flags,
diff --git a/media/codec2/sfplugin/TEST_MAPPING b/media/codec2/sfplugin/TEST_MAPPING
new file mode 100644
index 0000000..045e5b5
--- /dev/null
+++ b/media/codec2/sfplugin/TEST_MAPPING
@@ -0,0 +1,12 @@
+// mappings for frameworks/av/media/codec2/sfplugin
+{
+ "presubmit": [
+ // failing 1 of 11
+ // TODO(b/156167471)
+ // { "name": "ccodec_unit_test" },
+
+ // failing 4 of 17, around max-input-size defaults & overrides
+ // TODO(b/156167471)
+ //{ "name": "mc_sanity_test"}
+ ]
+}
diff --git a/media/codec2/sfplugin/tests/Android.bp b/media/codec2/sfplugin/tests/Android.bp
index 8d1a9c3..5c774a2 100644
--- a/media/codec2/sfplugin/tests/Android.bp
+++ b/media/codec2/sfplugin/tests/Android.bp
@@ -1,5 +1,6 @@
cc_test {
name: "ccodec_unit_test",
+ test_suites: ["device-tests"],
srcs: [
"CCodecBuffers_test.cpp",
@@ -43,6 +44,7 @@
cc_test {
name: "mc_sanity_test",
+ test_suites: ["device-tests"],
srcs: [
"MediaCodec_sanity_test.cpp",
diff --git a/media/codec2/tests/Android.bp b/media/codec2/tests/Android.bp
index fce6e21..c9169a9 100644
--- a/media/codec2/tests/Android.bp
+++ b/media/codec2/tests/Android.bp
@@ -1,5 +1,6 @@
cc_test {
name: "codec2_core_param_test",
+ test_suites: ["device-tests"],
srcs: [
"C2Param_test.cpp",
@@ -28,6 +29,7 @@
cc_test {
name: "codec2_vndk_test",
+ test_suites: ["device-tests"],
srcs: [
"C2_test.cpp",
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 4d7e619..59471a2 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -25,6 +25,7 @@
#include <hardware/gralloc.h>
#include <ui/GraphicBufferAllocator.h>
#include <ui/GraphicBufferMapper.h>
+#include <ui/Rect.h>
#include <C2AllocatorGralloc.h>
#include <C2Buffer.h>
@@ -253,7 +254,7 @@
virtual ~C2AllocationGralloc() override;
virtual c2_status_t map(
- C2Rect rect, C2MemoryUsage usage, C2Fence *fence,
+ C2Rect c2Rect, C2MemoryUsage usage, C2Fence *fence,
C2PlanarLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) override;
virtual c2_status_t unmap(
uint8_t **addr /* nonnull */, C2Rect rect, C2Fence *fence /* nullable */) override;
@@ -336,8 +337,12 @@
}
c2_status_t C2AllocationGralloc::map(
- C2Rect rect, C2MemoryUsage usage, C2Fence *fence,
+ C2Rect c2Rect, C2MemoryUsage usage, C2Fence *fence,
C2PlanarLayout *layout /* nonnull */, uint8_t **addr /* nonnull */) {
+ const Rect rect{(int32_t)c2Rect.left, (int32_t)c2Rect.top,
+ (int32_t)(c2Rect.left + c2Rect.width) /* right */,
+ (int32_t)(c2Rect.top + c2Rect.height) /* bottom */};
+
uint64_t grallocUsage = static_cast<C2AndroidMemoryUsage>(usage).asGrallocUsage();
ALOGV("mapping buffer with usage %#llx => %#llx",
(long long)usage.expected, (long long)grallocUsage);
@@ -386,10 +391,7 @@
void *pointer = nullptr;
// TODO: fence
status_t err = GraphicBufferMapper::get().lock(
- const_cast<native_handle_t *>(mBuffer), grallocUsage,
- { (int32_t)rect.left, (int32_t)rect.top,
- (int32_t)rect.width, (int32_t)rect.height },
- &pointer);
+ const_cast<native_handle_t *>(mBuffer), grallocUsage, rect, &pointer);
if (err) {
ALOGE("failed transaction: lock(RGBA_1010102)");
return C2_CORRUPTED;
@@ -464,10 +466,7 @@
void *pointer = nullptr;
// TODO: fence
status_t err = GraphicBufferMapper::get().lock(
- const_cast<native_handle_t*>(mBuffer), grallocUsage,
- { (int32_t)rect.left, (int32_t)rect.top,
- (int32_t)rect.width, (int32_t)rect.height },
- &pointer);
+ const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, &pointer);
if (err) {
ALOGE("failed transaction: lock(RGBA_8888)");
return C2_CORRUPTED;
@@ -524,10 +523,7 @@
void *pointer = nullptr;
// TODO: fence
status_t err = GraphicBufferMapper::get().lock(
- const_cast<native_handle_t*>(mBuffer), grallocUsage,
- { (int32_t)rect.left, (int32_t)rect.top,
- (int32_t)rect.width, (int32_t)rect.height },
- &pointer);
+ const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, &pointer);
if (err) {
ALOGE("failed transaction: lock(BLOB)");
return C2_CORRUPTED;
@@ -544,10 +540,7 @@
android_ycbcr ycbcrLayout;
status_t err = GraphicBufferMapper::get().lockYCbCr(
- const_cast<native_handle_t*>(mBuffer), grallocUsage,
- { (int32_t)rect.left, (int32_t)rect.top,
- (int32_t)rect.width, (int32_t)rect.height },
- &ycbcrLayout);
+ const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, &ycbcrLayout);
if (err) {
ALOGE("failed transaction: lockYCbCr");
return C2_CORRUPTED;
diff --git a/media/extractors/TEST_MAPPING b/media/extractors/TEST_MAPPING
index abefb0f..4984b8f 100644
--- a/media/extractors/TEST_MAPPING
+++ b/media/extractors/TEST_MAPPING
@@ -1,5 +1,6 @@
{
"presubmit": [
+
// TODO(b/153661591) enable test once the bug is fixed
// This tests the extractor path
// {
@@ -13,5 +14,14 @@
// }
// ]
// }
+ ],
+
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "ExtractorUnitTest" }
]
+
+
}
diff --git a/media/extractors/fuzzers/Android.bp b/media/extractors/fuzzers/Android.bp
index 31d6f83..e900e57 100644
--- a/media/extractors/fuzzers/Android.bp
+++ b/media/extractors/fuzzers/Android.bp
@@ -310,3 +310,27 @@
dictionary: "flac_extractor_fuzzer.dict",
}
+
+cc_fuzz {
+ name: "midi_extractor_fuzzer",
+ defaults: ["extractor-fuzzer-defaults"],
+
+ srcs: [
+ "midi_extractor_fuzzer.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/media/extractors/midi",
+ ],
+
+ static_libs: [
+ "libsonivox",
+ "libmedia_midiiowrapper",
+ "libmidiextractor",
+ "libwatchdog",
+ ],
+
+ dictionary: "midi_extractor_fuzzer.dict",
+
+ host_supported: true,
+}
diff --git a/media/extractors/fuzzers/README.md b/media/extractors/fuzzers/README.md
index 4223b5e..fb1d52f 100644
--- a/media/extractors/fuzzers/README.md
+++ b/media/extractors/fuzzers/README.md
@@ -11,6 +11,7 @@
+ [libmp3extractor](#mp3ExtractorFuzzer)
+ [libaacextractor](#aacExtractorFuzzer)
+ [libflacextractor](#flacExtractor)
++ [libmidiextractor](#midiExtractorFuzzer)
# <a name="ExtractorFuzzerBase"></a> Fuzzer for libextractorfuzzerbase
All the extractors have a common API - creating a data source, extraction
@@ -321,6 +322,41 @@
$ adb shell /data/fuzz/arm64/flac_extractor_fuzzer/flac_extractor_fuzzer CORPUS_DIR
```
+# <a name="midiExtractorFuzzer"></a> Fuzzer for libmidiextractor
+
+## Plugin Design Considerations
+The fuzzer plugin for MIDI extractor uses the `ExtractorFuzzerBase` class and
+implements only the `createExtractor` to create the MIDI extractor class.
+
+##### Maximize code coverage
+Dict file (dictionary file) is created for MIDI to ensure that the required MIDI
+headers are present in every input file that goes to the fuzzer.
+This ensures that larger code gets covered as a range of MIDI headers will be
+present in the input data.
+
+
+## Build
+
+This describes steps to build midi_extractor_fuzzer binary.
+
+### Android
+
+#### Steps to build
+Build the fuzzer
+```
+ $ mm -j$(nproc) midi_extractor_fuzzer
+```
+
+#### Steps to run
+Create a directory CORPUS_DIR and copy some MIDI files to that folder
+Push this directory to device.
+
+To run on device
+```
+ $ adb sync data
+ $ adb shell /data/fuzz/arm64/midi_extractor_fuzzer/midi_extractor_fuzzer CORPUS_DIR
+```
+
## References:
* http://llvm.org/docs/LibFuzzer.html
* https://github.com/google/oss-fuzz
diff --git a/media/extractors/fuzzers/midi_extractor_fuzzer.cpp b/media/extractors/fuzzers/midi_extractor_fuzzer.cpp
new file mode 100644
index 0000000..e02a12b
--- /dev/null
+++ b/media/extractors/fuzzers/midi_extractor_fuzzer.cpp
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *****************************************************************************
+ * Originally developed and contributed by Ittiam Systems Pvt. Ltd, Bangalore
+ */
+
+#include "ExtractorFuzzerBase.h"
+
+#include "MidiExtractor.h"
+
+using namespace android;
+
+class MIDIExtractor : public ExtractorFuzzerBase {
+ public:
+ MIDIExtractor() = default;
+ ~MIDIExtractor() = default;
+
+ bool createExtractor();
+};
+
+bool MIDIExtractor::createExtractor() {
+ mExtractor = new MidiExtractor(mDataSource->wrap());
+ if (!mExtractor) {
+ return false;
+ }
+ mExtractor->name();
+ return true;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if ((!data) || (size == 0)) {
+ return 0;
+ }
+ MIDIExtractor* extractor = new MIDIExtractor();
+ if (extractor) {
+ extractor->processData(data, size);
+ delete extractor;
+ }
+ return 0;
+}
diff --git a/media/extractors/fuzzers/midi_extractor_fuzzer.dict b/media/extractors/fuzzers/midi_extractor_fuzzer.dict
new file mode 100644
index 0000000..5b6bb8b
--- /dev/null
+++ b/media/extractors/fuzzers/midi_extractor_fuzzer.dict
@@ -0,0 +1,3 @@
+# MIDI Chunks
+kw1="MThd"
+kw2="MTrk"
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index b8255fc..1c69bb8 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -5,7 +5,7 @@
srcs: ["MidiExtractor.cpp"],
header_libs: [
- "libmedia_headers",
+ "libmedia_datasource_headers",
],
static_libs: [
@@ -18,4 +18,12 @@
shared_libs: [
"libbase",
],
+
+ host_supported: true,
+
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 65ba382..bd36403 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -2880,6 +2880,21 @@
break;
}
+ case FOURCC("pasp"):
+ {
+ *offset += chunk_size;
+ // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
+ // ignore otherwise
+ if (depth >= 2 && mPath[depth - 2] == FOURCC("stsd")) {
+ status_t err = parsePaspBox(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ break;
+ }
+
case FOURCC("titl"):
case FOURCC("perf"):
case FOURCC("auth"):
@@ -4052,6 +4067,26 @@
return OK;
}
+status_t MPEG4Extractor::parsePaspBox(off64_t offset, size_t size) {
+ if (size < 8 || size == SIZE_MAX || mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint32_t data[2]; // hSpacing, vSpacing
+ if (mDataSource->readAt(offset, data, 8) < 8) {
+ return ERROR_IO;
+ }
+ uint32_t hSpacing = ntohl(data[0]);
+ uint32_t vSpacing = ntohl(data[1]);
+
+ if (hSpacing != 0 && vSpacing != 0) {
+ AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_SAR_WIDTH, hSpacing);
+ AMediaFormat_setInt32(mLastTrack->meta, AMEDIAFORMAT_KEY_SAR_HEIGHT, vSpacing);
+ }
+
+ return OK;
+}
+
status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
if (size < 4 || size == SIZE_MAX) {
return ERROR_MALFORMED;
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index 1e49d50..bafc7f5 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -160,6 +160,7 @@
status_t parseChunk(off64_t *offset, int depth);
status_t parseITunesMetaData(off64_t offset, size_t size);
status_t parseColorInfo(off64_t offset, size_t size);
+ status_t parsePaspBox(off64_t offset, size_t size);
status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
void parseID3v2MetaData(off64_t offset, uint64_t size);
status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 828bcd6..62f0808 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -43,6 +43,9 @@
long vorbis_packet_blocksize(vorbis_info *vi,ogg_packet *op);
}
+static constexpr int OGG_PAGE_FLAG_CONTINUED_PACKET = 1;
+static constexpr int OGG_PAGE_FLAG_END_OF_STREAM = 4;
+
namespace android {
struct OggSource : public MediaTrackHelper {
@@ -297,7 +300,8 @@
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
*out = packet;
- ALOGV("returning buffer %p", packet);
+ ALOGV("returning buffer %p, size %zu, length %zu",
+ packet, packet->size(), packet->range_length());
return AMEDIA_OK;
}
@@ -358,10 +362,10 @@
if (!memcmp(signature, "OggS", 4)) {
if (*pageOffset > startOffset) {
- ALOGV("skipped %lld bytes of junk to reach next frame",
- (long long)(*pageOffset - startOffset));
+ ALOGV("skipped %lld bytes of junk at %lld to reach next frame",
+ (long long)(*pageOffset - startOffset), (long long)(startOffset));
}
-
+ ALOGV("found frame at %lld", (long long)(*pageOffset));
return OK;
}
@@ -629,7 +633,8 @@
// Calculate timestamps by accumulating durations starting from the first sample of a page;
// We assume that we only seek to page boundaries.
AMediaFormat *meta = (*out)->meta_data();
- if (AMediaFormat_getInt32(meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, ¤tPageSamples)) {
+ if (AMediaFormat_getInt32(meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, ¤tPageSamples) &&
+ (mCurrentPage.mFlags & OGG_PAGE_FLAG_END_OF_STREAM)) {
// first packet in page
if (mOffset == mFirstDataOffset) {
currentPageSamples -= mStartGranulePosition;
@@ -812,6 +817,7 @@
}
buffer = tmp;
+ ALOGV("reading %zu bytes @ %zu", packetSize, size_t(dataOffset));
ssize_t n = mSource->readAt(
dataOffset,
(uint8_t *)buffer->data() + buffer->range_length(),
@@ -830,8 +836,9 @@
if (gotFullPacket) {
// We've just read the entire packet.
+ ALOGV("got full packet, size %zu", fullSize);
- if (mFirstPacketInPage) {
+ if (mFirstPacketInPage && (mCurrentPage.mFlags & OGG_PAGE_FLAG_END_OF_STREAM)) {
AMediaFormat *meta = buffer->meta_data();
AMediaFormat_setInt32(
meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, mCurrentPageSamples);
@@ -864,6 +871,9 @@
}
// fall through, the buffer now contains the start of the packet.
+ ALOGV("have start of packet, getting rest");
+ } else {
+ ALOGV("moving to next page");
}
CHECK_EQ(mNextLaceIndex, mCurrentPage.mNumSegments);
@@ -899,9 +909,10 @@
mNextLaceIndex = 0;
if (buffer != NULL) {
- if ((mCurrentPage.mFlags & 1) == 0) {
+ if ((mCurrentPage.mFlags & OGG_PAGE_FLAG_CONTINUED_PACKET) == 0) {
// This page does not continue the packet, i.e. the packet
// is already complete.
+ ALOGV("packet was already complete?!");
if (timeUs >= 0) {
AMediaFormat *meta = buffer->meta_data();
@@ -909,8 +920,10 @@
}
AMediaFormat *meta = buffer->meta_data();
- AMediaFormat_setInt32(
- meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, mCurrentPageSamples);
+ if (mCurrentPage.mFlags & OGG_PAGE_FLAG_END_OF_STREAM) {
+ AMediaFormat_setInt32(
+ meta, AMEDIAFORMAT_KEY_VALID_SAMPLES, mCurrentPageSamples);
+ }
mFirstPacketInPage = false;
*out = buffer;
@@ -929,6 +942,7 @@
for (size_t i = 0; i < mNumHeaders; ++i) {
// ignore timestamp for configuration packets
if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != AMEDIA_OK) {
+ ALOGV("readNextPacket failed");
return err;
}
ALOGV("read packet of size %zu\n", packet->range_length());
@@ -1008,6 +1022,10 @@
size_t size = buffer->range_length();
+ if (size == 0) {
+ return 0;
+ }
+
ogg_buffer buf;
buf.data = (uint8_t *)data;
buf.size = size;
@@ -1304,8 +1322,8 @@
|| audioChannelCount <= 0 || audioChannelCount > FCC_8) {
ALOGE("Invalid haptic channel count found in metadata: %d", mHapticChannelCount);
} else {
- const audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(
- audioChannelCount) | hapticChannelMask;
+ const audio_channel_mask_t channelMask = static_cast<audio_channel_mask_t>(
+ audio_channel_out_mask_from_count(audioChannelCount) | hapticChannelMask);
AMediaFormat_setInt32(mMeta, AMEDIAFORMAT_KEY_CHANNEL_MASK, channelMask);
AMediaFormat_setInt32(
mMeta, AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT, mHapticChannelCount);
diff --git a/media/extractors/tests/Android.bp b/media/extractors/tests/Android.bp
index b3afe2f..0bca6f5 100644
--- a/media/extractors/tests/Android.bp
+++ b/media/extractors/tests/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "ExtractorUnitTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: ["ExtractorUnitTest.cpp"],
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 140052f..e81ab06 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -32,5 +32,6 @@
cc_library_headers {
name: "libaaudio_headers",
export_include_dirs: ["include"],
+ export_header_lib_headers: ["aaudio-aidl-cpp"],
+ header_libs: ["aaudio-aidl-cpp"],
}
-
diff --git a/media/libaaudio/Doxyfile.orig b/media/libaaudio/Doxyfile.orig
deleted file mode 100644
index 137facb..0000000
--- a/media/libaaudio/Doxyfile.orig
+++ /dev/null
@@ -1,2303 +0,0 @@
-# Doxyfile 1.8.6
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME = "My Project"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY =
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE = 4
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C.
-#
-# Note For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-MARKDOWN_SUPPORT = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-AUTOLINK_SUPPORT = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL = NO
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PACKAGE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-SHOW_GROUPED_MEMB_INC = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if <section_label> ... \endif and \cond <section_label>
-# ... \endcond blocks.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. Do not use file names with spaces, bibtex cannot handle them. See
-# also \cite for info how to create references.
-
-CITE_BIB_FILES =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT =
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS =
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-# <filter> <input-file>
-#
-# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-SOURCE_TOOLTIPS = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
-# defined cascading style sheet that is included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet file to the output directory. For an example
-# see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_STYLESHEET =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_FORMAT = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_CODEFILE =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use <access key> + S
-# (what the <access key> is depends on the OS and browser, but it is typically
-# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
-# key> to jump into the search results window, the results can be navigated
-# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
-# the search. The filter options can be selected when the cursor is inside the
-# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
-# to select a filter and <Enter> or <escape> to activate or cancel the filter
-# option.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-SEARCHENGINE = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a web server instead of a web client using Javascript. There
-# are two flavours of web server based searching depending on the
-# EXTERNAL_SEARCH setting. When disabled, doxygen will generate a PHP script for
-# searching and an index file used by the script. When EXTERNAL_SEARCH is
-# enabled the indexing and searching needs to be provided by external tools. See
-# the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SERVER_BASED_SEARCH = NO
-
-# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
-# script for searching. Instead the search results are written to an XML file
-# which needs to be processed by an external indexer. Doxygen will invoke an
-# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
-# search results.
-#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/).
-#
-# See the section "External Indexing and Searching" for details.
-# The default value is: NO.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH = NO
-
-# The SEARCHENGINE_URL should point to a search engine hosted by a web server
-# which will return the search results when EXTERNAL_SEARCH is enabled.
-#
-# Doxygen ships with an example indexer ( doxyindexer) and search engine
-# (doxysearch.cgi) which are based on the open source search engine library
-# Xapian (see: http://xapian.org/). See the section "External Indexing and
-# Searching" for details.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHENGINE_URL =
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
-# search data is written to a file for indexing by an external tool. With the
-# SEARCHDATA_FILE tag the name of this file can be specified.
-# The default file is: searchdata.xml.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-SEARCHDATA_FILE = searchdata.xml
-
-# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
-# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
-# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
-# projects and redirect the results back to the right project.
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTERNAL_SEARCH_ID =
-
-# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
-# projects other than the one defined by this configuration file, but that are
-# all added to the same external search index. Each project needs to have a
-# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
-# to a relative location where the documentation can be found. The format is:
-# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
-# This tag requires that the tag SEARCHENGINE is set to YES.
-
-EXTRA_SEARCH_MAPPINGS =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
-# The default value is: YES.
-
-GENERATE_LATEX = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked.
-#
-# Note that when enabling USE_PDFLATEX this option is only used for generating
-# bitmaps for formulas in the HTML output, but not in the Makefile that is
-# written to the output directory.
-# The default file is: latex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
-# index for LaTeX.
-# The default file is: makeindex.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-COMPACT_LATEX = NO
-
-# The PAPER_TYPE tag can be used to set the paper type that is used by the
-# printer.
-# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
-# 14 inches) and executive (7.25 x 10.5 inches).
-# The default value is: a4.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PAPER_TYPE = a4
-
-# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
-# that should be included in the LaTeX output. To get the times font for
-# instance you can specify
-# EXTRA_PACKAGES=times
-# If left blank no extra packages will be included.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
-# generated LaTeX document. The header should contain everything until the first
-# chapter. If it is left blank doxygen will generate a standard header. See
-# section "Doxygen usage" for information on how to let doxygen write the
-# default header to a separate file.
-#
-# Note: Only use a user-defined header if you know what you are doing! The
-# following commands have a special meaning inside the header: $title,
-# $datetime, $date, $doxygenversion, $projectname, $projectnumber. Doxygen will
-# replace them by respectively the title of the page, the current date and time,
-# only the current date, the version number of doxygen, the project name (see
-# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HEADER =
-
-# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
-# generated LaTeX document. The footer should contain everything after the last
-# chapter. If it is left blank doxygen will generate a standard footer.
-#
-# Note: Only use a user-defined footer if you know what you are doing!
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_FOOTER =
-
-# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the LATEX_OUTPUT output
-# directory. Note that the files will be copied as-is; there are no commands or
-# markers available.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_EXTRA_FILES =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
-# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
-# contain links (just like the HTML output) instead of page references. This
-# makes the output suitable for online browsing using a PDF viewer.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-PDF_HYPERLINKS = YES
-
-# If the LATEX_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
-# the PDF file directly from the LaTeX files. Set this option to YES to get a
-# higher quality PDF documentation.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
-# command to the generated LaTeX files. This will instruct LaTeX to keep running
-# if errors occur, instead of asking the user for help. This option is also used
-# when generating formulas in HTML.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BATCHMODE = NO
-
-# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
-# index chapters (such as File Index, Compound Index, etc.) in the output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_HIDE_INDICES = NO
-
-# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
-# code with syntax highlighting in the LaTeX output.
-#
-# Note that which sources are shown also depends on other settings such as
-# SOURCE_BROWSER.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_SOURCE_CODE = NO
-
-# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
-# bibliography, e.g. plainnat, or ieeetr. See
-# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
-# The default value is: plain.
-# This tag requires that the tag GENERATE_LATEX is set to YES.
-
-LATEX_BIB_STYLE = plain
-
-#---------------------------------------------------------------------------
-# Configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The
-# RTF output is optimized for Word 97 and may not look too pretty with other RTF
-# readers/editors.
-# The default value is: NO.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: rtf.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF
-# documents. This may be useful for small projects and may help to save some
-# trees in general.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
-# contain hyperlink fields. The RTF file will contain links (just like the HTML
-# output) instead of page references. This makes the output suitable for online
-# browsing using Word or some other Word compatible readers that support those
-# fields.
-#
-# Note: WordPad (write) and others do not support links.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's config
-# file, i.e. a series of assignments. You only have to provide replacements,
-# missing definitions are set to their default value.
-#
-# See also section "Doxygen usage" for information on how to generate the
-# default style sheet that doxygen normally uses.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an RTF document. Syntax is
-# similar to doxygen's config file. A template extensions file can be generated
-# using doxygen -e rtf extensionFile.
-# This tag requires that the tag GENERATE_RTF is set to YES.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for
-# classes and files.
-# The default value is: NO.
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it. A directory man3 will be created inside the directory specified by
-# MAN_OUTPUT.
-# The default directory is: man.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to the generated
-# man pages. In case the manual section does not start with a number, the number
-# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
-# optional.
-# The default value is: .3.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
-# will generate one additional man file for each entity documented in the real
-# man page(s). These additional files only source the real man page, but without
-# them the man command would be unable to find the correct page.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_MAN is set to YES.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that
-# captures the structure of the code including all documentation.
-# The default value is: NO.
-
-GENERATE_XML = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: xml.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_OUTPUT = xml
-
-# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
-# validating XML parser to check the syntax of the XML files.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_DTD =
-
-# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
-# listings (including syntax highlighting and cross-referencing information) to
-# the XML output. Note that enabling this will significantly increase the size
-# of the XML output.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_XML is set to YES.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to the DOCBOOK output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files
-# that can be used to generate PDF.
-# The default value is: NO.
-
-GENERATE_DOCBOOK = NO
-
-# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
-# front of it.
-# The default directory is: docbook.
-# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
-
-DOCBOOK_OUTPUT = docbook
-
-#---------------------------------------------------------------------------
-# Configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen
-# Definitions (see http://autogen.sf.net) file that captures the structure of
-# the code including all documentation. Note that this feature is still
-# experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# Configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module
-# file that captures the structure of the code including all documentation.
-#
-# Note that this feature is still experimental and incomplete at the moment.
-# The default value is: NO.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary
-# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
-# output from the Perl module output.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely
-# formatted so it can be parsed by a human reader. This is useful if you want to
-# understand what is going on. On the other hand, if this tag is set to NO the
-# size of the Perl module output will be much smaller and Perl will parse it
-# just the same.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file are
-# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
-# so different doxyrules.make files included by the same Makefile don't
-# overwrite each other's variables.
-# This tag requires that the tag GENERATE_PERLMOD is set to YES.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all
-# C-preprocessor directives found in the sources and include files.
-# The default value is: YES.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names
-# in the source code. If set to NO only conditional compilation will be
-# performed. Macro expansion can be done in a controlled way by setting
-# EXPAND_ONLY_PREDEF to YES.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-MACRO_EXPANSION = NO
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
-# the macro expansion is limited to the macros specified with the PREDEFINED and
-# EXPAND_AS_DEFINED tags.
-# The default value is: NO.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES the includes files in the
-# INCLUDE_PATH will be searched if a #include is found.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by the
-# preprocessor.
-# This tag requires that the tag SEARCH_INCLUDES is set to YES.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will be
-# used.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that are
-# defined before the preprocessor is started (similar to the -D option of e.g.
-# gcc). The argument of the tag is a list of macros of the form: name or
-# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
-# is assumed. To prevent a macro definition from being undefined via #undef or
-# recursively expanded use the := operator instead of the = operator.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-PREDEFINED =
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
-# tag can be used to specify a list of macro names that should be expanded. The
-# macro definition that is found in the sources will be used. Use the PREDEFINED
-# tag if you want to use a different macro definition that overrules the
-# definition found in the source code.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
-# remove all refrences to function-like macros that are alone on a line, have an
-# all uppercase name, and do not end with a semicolon. Such function macros are
-# typically used for boiler-plate code, and will confuse the parser if not
-# removed.
-# The default value is: YES.
-# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration options related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES tag can be used to specify one or more tag files. For each tag
-# file the location of the external documentation should be added. The format of
-# a tag file without this location is as follows:
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where loc1 and loc2 can be relative or absolute paths or URLs. See the
-# section "Linking to external documentation" for more information about the use
-# of tag files.
-# Note: Each tag file must have an unique name (where the name does NOT include
-# the path). If a tag file is not located in the directory in which doxygen is
-# run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
-# tag file that is based on the input files it reads. See section "Linking to
-# external documentation" for more information about the usage of tag files.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
-# class index. If set to NO only the inherited external classes will be listed.
-# The default value is: NO.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in
-# the modules index. If set to NO, only the current project's groups will be
-# listed.
-# The default value is: YES.
-
-EXTERNAL_GROUPS = YES
-
-# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
-# the related pages index. If set to NO, only the current project's pages will
-# be listed.
-# The default value is: YES.
-
-EXTERNAL_PAGES = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of 'which perl').
-# The default file (with absolute path) is: /usr/bin/perl.
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram
-# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
-# NO turns the diagrams off. Note that this option also works with HAVE_DOT
-# disabled, but it is recommended to install and use dot, since it yields more
-# powerful graphs.
-# The default value is: YES.
-
-CLASS_DIAGRAMS = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see:
-# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
-# You can include diagrams made with dia in doxygen documentation. Doxygen will
-# then run dia to produce the diagram and insert it in the documentation. The
-# DIA_PATH tag allows you to specify the directory where the dia binary resides.
-# If left empty dia is assumed to be found in the default search path.
-
-DIA_PATH =
-
-# If set to YES, the inheritance and collaboration graphs will hide inheritance
-# and usage relations if the target is undocumented or is not a class.
-# The default value is: YES.
-
-HIDE_UNDOC_RELATIONS = YES
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz (see:
-# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
-# Bell Labs. The other options in this section have no effect if this option is
-# set to NO
-# The default value is: NO.
-
-HAVE_DOT = NO
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
-# to run in parallel. When set to 0 doxygen will base this on the number of
-# processors available in the system. You can set it explicitly to a value
-# larger than 0 to get control over the balance between CPU load and processing
-# speed.
-# Minimum value: 0, maximum value: 32, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_NUM_THREADS = 0
-
-# When you want a differently looking font n the dot files that doxygen
-# generates you can specify the font name using DOT_FONTNAME. You need to make
-# sure dot is able to find the font, which can be done by putting it in a
-# standard location or by setting the DOTFONTPATH environment variable or by
-# setting DOT_FONTPATH to the directory containing the font.
-# The default value is: Helvetica.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTNAME = Helvetica
-
-# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
-# dot graphs.
-# Minimum value: 4, maximum value: 24, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the default font as specified with
-# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
-# the path where dot can find it using this tag.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
-# each documented class showing the direct and indirect inheritance relations.
-# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
-# graph for each documented class showing the direct and indirect implementation
-# dependencies (inheritance, containment, and class references variables) of the
-# class with other documented classes.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
-# groups, showing the direct groups dependencies.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LOOK = NO
-
-# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
-# class node. If there are many fields or methods and many nodes the graph may
-# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
-# number of items for each type to make the size more manageable. Set this to 0
-# for no limit. Note that the threshold may be exceeded by 50% before the limit
-# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
-# but if the number exceeds 15, the total amount of fields shown is limited to
-# 10.
-# Minimum value: 0, maximum value: 100, default value: 10.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-UML_LIMIT_NUM_FIELDS = 10
-
-# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
-# collaboration graphs will show the relations between templates and their
-# instances.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-TEMPLATE_RELATIONS = NO
-
-# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
-# YES then doxygen will generate a graph for each documented file showing the
-# direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDE_GRAPH = YES
-
-# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
-# set to YES then doxygen will generate a graph for each documented file showing
-# the direct and indirect include dependencies of the file with other documented
-# files.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable call graphs for selected
-# functions only using the \callgraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALL_GRAPH = NO
-
-# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
-# dependency graph for every global function or class method.
-#
-# Note that enabling this option will significantly increase the time of a run.
-# So in most cases it will be better to enable caller graphs for selected
-# functions only using the \callergraph command.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-CALLER_GRAPH = NO
-
-# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
-# hierarchy of all classes instead of a textual one.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
-# dependencies a directory has on other directories in a graphical way. The
-# dependency relations are determined by the #include relations between the
-# files in the directories.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot.
-# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
-# to make the SVG files visible in IE 9+ (other browsers do not have this
-# requirement).
-# Possible values are: png, jpg, gif and svg.
-# The default value is: png.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_IMAGE_FORMAT = png
-
-# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
-# enable generation of interactive SVG images that allow zooming and panning.
-#
-# Note that this requires a modern browser other than Internet Explorer. Tested
-# and working are Firefox, Chrome, Safari, and Opera.
-# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
-# the SVG files visible. Older versions of IE do not have SVG support.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-INTERACTIVE_SVG = NO
-
-# The DOT_PATH tag can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the \dotfile
-# command).
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the \mscfile
-# command).
-
-MSCFILE_DIRS =
-
-# The DIAFILE_DIRS tag can be used to specify one or more directories that
-# contain dia files that are included in the documentation (see the \diafile
-# command).
-
-DIAFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
-# that will be shown in the graph. If the number of nodes in a graph becomes
-# larger than this value, doxygen will truncate the graph, which is visualized
-# by representing a node as a red box. Note that doxygen if the number of direct
-# children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
-# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-# Minimum value: 0, maximum value: 10000, default value: 50.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
-# generated by dot. A depth value of 3 means that only nodes reachable from the
-# root by following a path via at most 3 edges will be shown. Nodes that lay
-# further from the root node will be omitted. Note that setting this option to 1
-# or 2 may greatly reduce the computation time needed for large code bases. Also
-# note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-# Minimum value: 0, maximum value: 1000, default value: 0.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not seem
-# to support this out of the box.
-#
-# Warning: Depending on the platform used, enabling this option may lead to
-# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
-# read).
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10) support
-# this, this feature is disabled by default.
-# The default value is: NO.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_MULTI_TARGETS = YES
-
-# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
-# explaining the meaning of the various boxes and arrows in the dot generated
-# graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot
-# files that are used to generate the various graphs.
-# The default value is: YES.
-# This tag requires that the tag HAVE_DOT is set to YES.
-
-DOT_CLEANUP = YES
diff --git a/media/libaaudio/examples/loopback/Android.bp b/media/libaaudio/examples/loopback/Android.bp
index 5b7d956..4de632f 100644
--- a/media/libaaudio/examples/loopback/Android.bp
+++ b/media/libaaudio/examples/loopback/Android.bp
@@ -4,9 +4,11 @@
srcs: ["src/loopback.cpp"],
cflags: ["-Wall", "-Werror"],
static_libs: ["libsndfile"],
+ include_dirs: ["external/oboe/apps/OboeTester/app/src/main/cpp"],
shared_libs: [
"libaaudio",
"libaudioutils",
+ "liblog"
],
header_libs: ["libaaudio_example_utils"],
}
diff --git a/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h b/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h
deleted file mode 100644
index 04435d1..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/GlitchAnalyzer.h
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANALYZER_GLITCH_ANALYZER_H
-#define ANALYZER_GLITCH_ANALYZER_H
-
-#include <algorithm>
-#include <cctype>
-#include <iomanip>
-#include <iostream>
-
-#include "LatencyAnalyzer.h"
-#include "PseudoRandom.h"
-
-/**
- * Output a steady sine wave and analyze the return signal.
- *
- * Use a cosine transform to measure the predicted magnitude and relative phase of the
- * looped back sine wave. Then generate a predicted signal and compare with the actual signal.
- */
-class GlitchAnalyzer : public LoopbackProcessor {
-public:
-
- int32_t getState() const {
- return mState;
- }
-
- double getPeakAmplitude() const {
- return mPeakFollower.getLevel();
- }
-
- double getTolerance() {
- return mTolerance;
- }
-
- void setTolerance(double tolerance) {
- mTolerance = tolerance;
- mScaledTolerance = mMagnitude * mTolerance;
- }
-
- void setMagnitude(double magnitude) {
- mMagnitude = magnitude;
- mScaledTolerance = mMagnitude * mTolerance;
- }
-
- int32_t getGlitchCount() const {
- return mGlitchCount;
- }
-
- int32_t getStateFrameCount(int state) const {
- return mStateFrameCounters[state];
- }
-
- double getSignalToNoiseDB() {
- static const double threshold = 1.0e-14;
- if (mMeanSquareSignal < threshold || mMeanSquareNoise < threshold) {
- return 0.0;
- } else {
- double signalToNoise = mMeanSquareSignal / mMeanSquareNoise; // power ratio
- double signalToNoiseDB = 10.0 * log(signalToNoise);
- if (signalToNoiseDB < MIN_SNR_DB) {
- ALOGD("ERROR - signal to noise ratio is too low! < %d dB. Adjust volume.",
- MIN_SNR_DB);
- setResult(ERROR_VOLUME_TOO_LOW);
- }
- return signalToNoiseDB;
- }
- }
-
- std::string analyze() override {
- std::stringstream report;
- report << "GlitchAnalyzer ------------------\n";
- report << LOOPBACK_RESULT_TAG "peak.amplitude = " << std::setw(8)
- << getPeakAmplitude() << "\n";
- report << LOOPBACK_RESULT_TAG "sine.magnitude = " << std::setw(8)
- << mMagnitude << "\n";
- report << LOOPBACK_RESULT_TAG "rms.noise = " << std::setw(8)
- << mMeanSquareNoise << "\n";
- report << LOOPBACK_RESULT_TAG "signal.to.noise.db = " << std::setw(8)
- << getSignalToNoiseDB() << "\n";
- report << LOOPBACK_RESULT_TAG "frames.accumulated = " << std::setw(8)
- << mFramesAccumulated << "\n";
- report << LOOPBACK_RESULT_TAG "sine.period = " << std::setw(8)
- << mSinePeriod << "\n";
- report << LOOPBACK_RESULT_TAG "test.state = " << std::setw(8)
- << mState << "\n";
- report << LOOPBACK_RESULT_TAG "frame.count = " << std::setw(8)
- << mFrameCounter << "\n";
- // Did we ever get a lock?
- bool gotLock = (mState == STATE_LOCKED) || (mGlitchCount > 0);
- if (!gotLock) {
- report << "ERROR - failed to lock on reference sine tone.\n";
- setResult(ERROR_NO_LOCK);
- } else {
- // Only print if meaningful.
- report << LOOPBACK_RESULT_TAG "glitch.count = " << std::setw(8)
- << mGlitchCount << "\n";
- report << LOOPBACK_RESULT_TAG "max.glitch = " << std::setw(8)
- << mMaxGlitchDelta << "\n";
- if (mGlitchCount > 0) {
- report << "ERROR - number of glitches > 0\n";
- setResult(ERROR_GLITCHES);
- }
- }
- return report.str();
- }
-
- void printStatus() override {
- ALOGD("st = %d, #gl = %3d,", mState, mGlitchCount);
- }
- /**
- * Calculate the magnitude of the component of the input signal
- * that matches the analysis frequency.
- * Also calculate the phase that we can use to create a
- * signal that matches that component.
- * The phase will be between -PI and +PI.
- */
- double calculateMagnitude(double *phasePtr = nullptr) {
- if (mFramesAccumulated == 0) {
- return 0.0;
- }
- double sinMean = mSinAccumulator / mFramesAccumulated;
- double cosMean = mCosAccumulator / mFramesAccumulated;
- double magnitude = 2.0 * sqrt((sinMean * sinMean) + (cosMean * cosMean));
- if (phasePtr != nullptr) {
- double phase = M_PI_2 - atan2(sinMean, cosMean);
- *phasePtr = phase;
- }
- return magnitude;
- }
-
- /**
- * @param frameData contains microphone data with sine signal feedback
- * @param channelCount
- */
- result_code processInputFrame(float *frameData, int /* channelCount */) override {
- result_code result = RESULT_OK;
-
- float sample = frameData[0];
- float peak = mPeakFollower.process(sample);
-
- // Force a periodic glitch to test the detector!
- if (mForceGlitchDuration > 0) {
- if (mForceGlitchCounter == 0) {
- ALOGE("%s: force a glitch!!", __func__);
- mForceGlitchCounter = getSampleRate();
- } else if (mForceGlitchCounter <= mForceGlitchDuration) {
- // Force an abrupt offset.
- sample += (sample > 0.0) ? -0.5f : 0.5f;
- }
- --mForceGlitchCounter;
- }
-
- mStateFrameCounters[mState]++; // count how many frames we are in each state
-
- switch (mState) {
- case STATE_IDLE:
- mDownCounter--;
- if (mDownCounter <= 0) {
- mState = STATE_IMMUNE;
- mDownCounter = IMMUNE_FRAME_COUNT;
- mInputPhase = 0.0; // prevent spike at start
- mOutputPhase = 0.0;
- }
- break;
-
- case STATE_IMMUNE:
- mDownCounter--;
- if (mDownCounter <= 0) {
- mState = STATE_WAITING_FOR_SIGNAL;
- }
- break;
-
- case STATE_WAITING_FOR_SIGNAL:
- if (peak > mThreshold) {
- mState = STATE_WAITING_FOR_LOCK;
- //ALOGD("%5d: switch to STATE_WAITING_FOR_LOCK", mFrameCounter);
- resetAccumulator();
- }
- break;
-
- case STATE_WAITING_FOR_LOCK:
- mSinAccumulator += sample * sinf(mInputPhase);
- mCosAccumulator += sample * cosf(mInputPhase);
- mFramesAccumulated++;
- // Must be a multiple of the period or the calculation will not be accurate.
- if (mFramesAccumulated == mSinePeriod * PERIODS_NEEDED_FOR_LOCK) {
- double phaseOffset = 0.0;
- setMagnitude(calculateMagnitude(&phaseOffset));
-// ALOGD("%s() mag = %f, offset = %f, prev = %f",
-// __func__, mMagnitude, mPhaseOffset, mPreviousPhaseOffset);
- if (mMagnitude > mThreshold) {
- if (abs(phaseOffset) < kMaxPhaseError) {
- mState = STATE_LOCKED;
-// ALOGD("%5d: switch to STATE_LOCKED", mFrameCounter);
- }
- // Adjust mInputPhase to match measured phase
- mInputPhase += phaseOffset;
- }
- resetAccumulator();
- }
- incrementInputPhase();
- break;
-
- case STATE_LOCKED: {
- // Predict next sine value
- double predicted = sinf(mInputPhase) * mMagnitude;
- double diff = predicted - sample;
- double absDiff = fabs(diff);
- mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
- if (absDiff > mScaledTolerance) {
- result = ERROR_GLITCHES;
- onGlitchStart();
-// LOGI("diff glitch detected, absDiff = %g", absDiff);
- } else {
- mSumSquareSignal += predicted * predicted;
- mSumSquareNoise += diff * diff;
- // Track incoming signal and slowly adjust magnitude to account
- // for drift in the DRC or AGC.
- mSinAccumulator += sample * sinf(mInputPhase);
- mCosAccumulator += sample * cosf(mInputPhase);
- mFramesAccumulated++;
- // Must be a multiple of the period or the calculation will not be accurate.
- if (mFramesAccumulated == mSinePeriod) {
- const double coefficient = 0.1;
- double phaseOffset = 0.0;
- double magnitude = calculateMagnitude(&phaseOffset);
- // One pole averaging filter.
- setMagnitude((mMagnitude * (1.0 - coefficient)) + (magnitude * coefficient));
-
- mMeanSquareNoise = mSumSquareNoise * mInverseSinePeriod;
- mMeanSquareSignal = mSumSquareSignal * mInverseSinePeriod;
- resetAccumulator();
-
- if (abs(phaseOffset) > kMaxPhaseError) {
- result = ERROR_GLITCHES;
- onGlitchStart();
- ALOGD("phase glitch detected, phaseOffset = %g", phaseOffset);
- } else if (mMagnitude < mThreshold) {
- result = ERROR_GLITCHES;
- onGlitchStart();
- ALOGD("magnitude glitch detected, mMagnitude = %g", mMagnitude);
- }
- }
- }
- incrementInputPhase();
- } break;
-
- case STATE_GLITCHING: {
- // Predict next sine value
- mGlitchLength++;
- double predicted = sinf(mInputPhase) * mMagnitude;
- double diff = predicted - sample;
- double absDiff = fabs(diff);
- mMaxGlitchDelta = std::max(mMaxGlitchDelta, absDiff);
- if (absDiff < mScaledTolerance) { // close enough?
- // If we get a full sine period of non-glitch samples in a row then consider the glitch over.
- // We don't want to just consider a zero crossing the end of a glitch.
- if (mNonGlitchCount++ > mSinePeriod) {
- onGlitchEnd();
- }
- } else {
- mNonGlitchCount = 0;
- if (mGlitchLength > (4 * mSinePeriod)) {
- relock();
- }
- }
- incrementInputPhase();
- } break;
-
- case NUM_STATES: // not a real state
- break;
- }
-
- mFrameCounter++;
-
- return result;
- }
-
- // advance and wrap phase
- void incrementInputPhase() {
- mInputPhase += mPhaseIncrement;
- if (mInputPhase > M_PI) {
- mInputPhase -= (2.0 * M_PI);
- }
- }
-
- // advance and wrap phase
- void incrementOutputPhase() {
- mOutputPhase += mPhaseIncrement;
- if (mOutputPhase > M_PI) {
- mOutputPhase -= (2.0 * M_PI);
- }
- }
-
- /**
- * @param frameData upon return, contains the reference sine wave
- * @param channelCount
- */
- result_code processOutputFrame(float *frameData, int channelCount) override {
- float output = 0.0f;
- // Output sine wave so we can measure it.
- if (mState != STATE_IDLE) {
- float sinOut = sinf(mOutputPhase);
- incrementOutputPhase();
- output = (sinOut * mOutputAmplitude)
- + (mWhiteNoise.nextRandomDouble() * kNoiseAmplitude);
- // ALOGD("sin(%f) = %f, %f\n", mOutputPhase, sinOut, mPhaseIncrement);
- }
- frameData[0] = output;
- for (int i = 1; i < channelCount; i++) {
- frameData[i] = 0.0f;
- }
- return RESULT_OK;
- }
-
- void onGlitchStart() {
- mGlitchCount++;
-// ALOGD("%5d: STARTED a glitch # %d", mFrameCounter, mGlitchCount);
- mState = STATE_GLITCHING;
- mGlitchLength = 1;
- mNonGlitchCount = 0;
- }
-
- void onGlitchEnd() {
-// ALOGD("%5d: ENDED a glitch # %d, length = %d", mFrameCounter, mGlitchCount, mGlitchLength);
- mState = STATE_LOCKED;
- resetAccumulator();
- }
-
- // reset the sine wave detector
- void resetAccumulator() {
- mFramesAccumulated = 0;
- mSinAccumulator = 0.0;
- mCosAccumulator = 0.0;
- mSumSquareSignal = 0.0;
- mSumSquareNoise = 0.0;
- }
-
- void relock() {
-// ALOGD("relock: %d because of a very long %d glitch", mFrameCounter, mGlitchLength);
- mState = STATE_WAITING_FOR_LOCK;
- resetAccumulator();
- }
-
- void reset() override {
- LoopbackProcessor::reset();
- mState = STATE_IDLE;
- mDownCounter = IDLE_FRAME_COUNT;
- resetAccumulator();
- }
-
- void prepareToTest() override {
- LoopbackProcessor::prepareToTest();
- mSinePeriod = getSampleRate() / kTargetGlitchFrequency;
- mOutputPhase = 0.0f;
- mInverseSinePeriod = 1.0 / mSinePeriod;
- mPhaseIncrement = 2.0 * M_PI * mInverseSinePeriod;
- mGlitchCount = 0;
- mMaxGlitchDelta = 0.0;
- for (int i = 0; i < NUM_STATES; i++) {
- mStateFrameCounters[i] = 0;
- }
- }
-
-private:
-
- // These must match the values in GlitchActivity.java
- enum sine_state_t {
- STATE_IDLE, // beginning
- STATE_IMMUNE, // ignoring input, waiting fo HW to settle
- STATE_WAITING_FOR_SIGNAL, // looking for a loud signal
- STATE_WAITING_FOR_LOCK, // trying to lock onto the phase of the sine
- STATE_LOCKED, // locked on the sine wave, looking for glitches
- STATE_GLITCHING, // locked on the sine wave but glitching
- NUM_STATES
- };
-
- enum constants {
- // Arbitrary durations, assuming 48000 Hz
- IDLE_FRAME_COUNT = 48 * 100,
- IMMUNE_FRAME_COUNT = 48 * 100,
- PERIODS_NEEDED_FOR_LOCK = 8,
- MIN_SNR_DB = 65
- };
-
- static constexpr float kNoiseAmplitude = 0.00; // Used to experiment with warbling caused by DRC.
- static constexpr int kTargetGlitchFrequency = 607;
- static constexpr double kMaxPhaseError = M_PI * 0.05;
-
- float mTolerance = 0.10; // scaled from 0.0 to 1.0
- double mThreshold = 0.005;
- int mSinePeriod = 1; // this will be set before use
- double mInverseSinePeriod = 1.0;
-
- int32_t mStateFrameCounters[NUM_STATES];
-
- double mPhaseIncrement = 0.0;
- double mInputPhase = 0.0;
- double mOutputPhase = 0.0;
- double mMagnitude = 0.0;
- int32_t mFramesAccumulated = 0;
- double mSinAccumulator = 0.0;
- double mCosAccumulator = 0.0;
- double mMaxGlitchDelta = 0.0;
- int32_t mGlitchCount = 0;
- int32_t mNonGlitchCount = 0;
- int32_t mGlitchLength = 0;
- // This is used for processing every frame so we cache it here.
- double mScaledTolerance = 0.0;
- int mDownCounter = IDLE_FRAME_COUNT;
- int32_t mFrameCounter = 0;
- double mOutputAmplitude = 0.75;
-
- int32_t mForceGlitchDuration = 0; // if > 0 then force a glitch for debugging
- int32_t mForceGlitchCounter = 4 * 48000; // count down and trigger at zero
-
- // measure background noise continuously as a deviation from the expected signal
- double mSumSquareSignal = 0.0;
- double mSumSquareNoise = 0.0;
- double mMeanSquareSignal = 0.0;
- double mMeanSquareNoise = 0.0;
-
- PeakDetector mPeakFollower;
-
- PseudoRandom mWhiteNoise;
-
- sine_state_t mState = STATE_IDLE;
-};
-
-
-#endif //ANALYZER_GLITCH_ANALYZER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h b/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h
deleted file mode 100644
index e506791..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/LatencyAnalyzer.h
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tools for measuring latency and for detecting glitches.
- * These classes are pure math and can be used with any audio system.
- */
-
-#ifndef ANALYZER_LATENCY_ANALYZER_H
-#define ANALYZER_LATENCY_ANALYZER_H
-
-#include <algorithm>
-#include <assert.h>
-#include <cctype>
-#include <iomanip>
-#include <iostream>
-#include <math.h>
-#include <memory>
-#include <sstream>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <vector>
-
-#include "PeakDetector.h"
-#include "PseudoRandom.h"
-#include "RandomPulseGenerator.h"
-
-// This is used when the code is in Oboe.
-#ifndef ALOGD
-#define ALOGD printf
-#define ALOGE printf
-#define ALOGW printf
-#endif
-
-#define LOOPBACK_RESULT_TAG "RESULT: "
-
-static constexpr int32_t kDefaultSampleRate = 48000;
-static constexpr int32_t kMillisPerSecond = 1000;
-static constexpr int32_t kMaxLatencyMillis = 700; // arbitrary and generous
-static constexpr double kMinimumConfidence = 0.2;
-
-struct LatencyReport {
- int32_t latencyInFrames = 0.0;
- double confidence = 0.0;
-
- void reset() {
- latencyInFrames = 0;
- confidence = 0.0;
- }
-};
-
-// Calculate a normalized cross correlation.
-static double calculateNormalizedCorrelation(const float *a,
- const float *b,
- int windowSize) {
- double correlation = 0.0;
- double sumProducts = 0.0;
- double sumSquares = 0.0;
-
- // Correlate a against b.
- for (int i = 0; i < windowSize; i++) {
- float s1 = a[i];
- float s2 = b[i];
- // Use a normalized cross-correlation.
- sumProducts += s1 * s2;
- sumSquares += ((s1 * s1) + (s2 * s2));
- }
-
- if (sumSquares >= 1.0e-9) {
- correlation = 2.0 * sumProducts / sumSquares;
- }
- return correlation;
-}
-
-static double calculateRootMeanSquare(float *data, int32_t numSamples) {
- double sum = 0.0;
- for (int32_t i = 0; i < numSamples; i++) {
- float sample = data[i];
- sum += sample * sample;
- }
- return sqrt(sum / numSamples);
-}
-
-/**
- * Monophonic recording with processing.
- */
-class AudioRecording
-{
-public:
-
- void allocate(int maxFrames) {
- mData = std::make_unique<float[]>(maxFrames);
- mMaxFrames = maxFrames;
- }
-
- // Write SHORT data from the first channel.
- int32_t write(int16_t *inputData, int32_t inputChannelCount, int32_t numFrames) {
- // stop at end of buffer
- if ((mFrameCounter + numFrames) > mMaxFrames) {
- numFrames = mMaxFrames - mFrameCounter;
- }
- for (int i = 0; i < numFrames; i++) {
- mData[mFrameCounter++] = inputData[i * inputChannelCount] * (1.0f / 32768);
- }
- return numFrames;
- }
-
- // Write FLOAT data from the first channel.
- int32_t write(float *inputData, int32_t inputChannelCount, int32_t numFrames) {
- // stop at end of buffer
- if ((mFrameCounter + numFrames) > mMaxFrames) {
- numFrames = mMaxFrames - mFrameCounter;
- }
- for (int i = 0; i < numFrames; i++) {
- mData[mFrameCounter++] = inputData[i * inputChannelCount];
- }
- return numFrames;
- }
-
- // Write FLOAT data from the first channel.
- int32_t write(float sample) {
- // stop at end of buffer
- if (mFrameCounter < mMaxFrames) {
- mData[mFrameCounter++] = sample;
- return 1;
- }
- return 0;
- }
-
- void clear() {
- mFrameCounter = 0;
- }
- int32_t size() const {
- return mFrameCounter;
- }
-
- bool isFull() const {
- return mFrameCounter >= mMaxFrames;
- }
-
- float *getData() const {
- return mData.get();
- }
-
- void setSampleRate(int32_t sampleRate) {
- mSampleRate = sampleRate;
- }
-
- int32_t getSampleRate() const {
- return mSampleRate;
- }
-
- /**
- * Square the samples so they are all positive and so the peaks are emphasized.
- */
- void square() {
- float *x = mData.get();
- for (int i = 0; i < mFrameCounter; i++) {
- x[i] *= x[i];
- }
- }
-
- /**
- * Amplify a signal so that the peak matches the specified target.
- *
- * @param target final max value
- * @return gain applied to signal
- */
- float normalize(float target) {
- float maxValue = 1.0e-9f;
- for (int i = 0; i < mFrameCounter; i++) {
- maxValue = std::max(maxValue, abs(mData[i]));
- }
- float gain = target / maxValue;
- for (int i = 0; i < mFrameCounter; i++) {
- mData[i] *= gain;
- }
- return gain;
- }
-
-private:
- std::unique_ptr<float[]> mData;
- int32_t mFrameCounter = 0;
- int32_t mMaxFrames = 0;
- int32_t mSampleRate = kDefaultSampleRate; // common default
-};
-
-static int measureLatencyFromPulse(AudioRecording &recorded,
- AudioRecording &pulse,
- LatencyReport *report) {
-
- report->latencyInFrames = 0;
- report->confidence = 0.0;
-
- int numCorrelations = recorded.size() - pulse.size();
- if (numCorrelations < 10) {
- ALOGE("%s() recording too small = %d frames\n", __func__, recorded.size());
- return -1;
- }
- std::unique_ptr<float[]> correlations= std::make_unique<float[]>(numCorrelations);
-
- // Correlate pulse against the recorded data.
- for (int i = 0; i < numCorrelations; i++) {
- float correlation = (float) calculateNormalizedCorrelation(&recorded.getData()[i],
- &pulse.getData()[0],
- pulse.size());
- correlations[i] = correlation;
- }
-
- // Find highest peak in correlation array.
- float peakCorrelation = 0.0;
- int peakIndex = -1;
- for (int i = 0; i < numCorrelations; i++) {
- float value = abs(correlations[i]);
- if (value > peakCorrelation) {
- peakCorrelation = value;
- peakIndex = i;
- }
- }
- if (peakIndex < 0) {
- ALOGE("%s() no signal for correlation\n", __func__);
- return -2;
- }
-
- report->latencyInFrames = peakIndex;
- report->confidence = peakCorrelation;
-
- return 0;
-}
-
-// ====================================================================================
-class LoopbackProcessor {
-public:
- virtual ~LoopbackProcessor() = default;
-
- enum result_code {
- RESULT_OK = 0,
- ERROR_NOISY = -99,
- ERROR_VOLUME_TOO_LOW,
- ERROR_VOLUME_TOO_HIGH,
- ERROR_CONFIDENCE,
- ERROR_INVALID_STATE,
- ERROR_GLITCHES,
- ERROR_NO_LOCK
- };
-
- virtual void prepareToTest() {
- reset();
- }
-
- virtual void reset() {
- mResult = 0;
- mResetCount++;
- }
-
- virtual result_code processInputFrame(float *frameData, int channelCount) = 0;
- virtual result_code processOutputFrame(float *frameData, int channelCount) = 0;
-
- void process(float *inputData, int inputChannelCount, int numInputFrames,
- float *outputData, int outputChannelCount, int numOutputFrames) {
- int numBoth = std::min(numInputFrames, numOutputFrames);
- // Process one frame at a time.
- for (int i = 0; i < numBoth; i++) {
- processInputFrame(inputData, inputChannelCount);
- inputData += inputChannelCount;
- processOutputFrame(outputData, outputChannelCount);
- outputData += outputChannelCount;
- }
- // If there is more input than output.
- for (int i = numBoth; i < numInputFrames; i++) {
- processInputFrame(inputData, inputChannelCount);
- inputData += inputChannelCount;
- }
- // If there is more output than input.
- for (int i = numBoth; i < numOutputFrames; i++) {
- processOutputFrame(outputData, outputChannelCount);
- outputData += outputChannelCount;
- }
- }
-
- virtual std::string analyze() = 0;
-
- virtual void printStatus() {};
-
- int32_t getResult() {
- return mResult;
- }
-
- void setResult(int32_t result) {
- mResult = result;
- }
-
- virtual bool isDone() {
- return false;
- }
-
- virtual int save(const char *fileName) {
- (void) fileName;
- return -1;
- }
-
- virtual int load(const char *fileName) {
- (void) fileName;
- return -1;
- }
-
- virtual void setSampleRate(int32_t sampleRate) {
- mSampleRate = sampleRate;
- }
-
- int32_t getSampleRate() const {
- return mSampleRate;
- }
-
- int32_t getResetCount() const {
- return mResetCount;
- }
-
- /** Called when not enough input frames could be read after synchronization.
- */
- virtual void onInsufficientRead() {
- reset();
- }
-
-protected:
- int32_t mResetCount = 0;
-
-private:
- int32_t mSampleRate = kDefaultSampleRate;
- int32_t mResult = 0;
-};
-
-class LatencyAnalyzer : public LoopbackProcessor {
-public:
-
- LatencyAnalyzer() : LoopbackProcessor() {}
- virtual ~LatencyAnalyzer() = default;
-
- virtual int32_t getProgress() const = 0;
-
- virtual int getState() = 0;
-
- // @return latency in frames
- virtual int32_t getMeasuredLatency() = 0;
-
- virtual double getMeasuredConfidence() = 0;
-
- virtual double getBackgroundRMS() = 0;
-
- virtual double getSignalRMS() = 0;
-
-};
-
-// ====================================================================================
-/**
- * Measure latency given a loopback stream data.
- * Use an encoded bit train as the sound source because it
- * has an unambiguous correlation value.
- * Uses a state machine to cycle through various stages.
- *
- */
-class PulseLatencyAnalyzer : public LatencyAnalyzer {
-public:
-
- PulseLatencyAnalyzer() : LatencyAnalyzer() {
- int32_t maxLatencyFrames = getSampleRate() * kMaxLatencyMillis / kMillisPerSecond;
- int32_t numPulseBits = getSampleRate() * kPulseLengthMillis
- / (kFramesPerEncodedBit * kMillisPerSecond);
- int32_t pulseLength = numPulseBits * kFramesPerEncodedBit;
- mFramesToRecord = pulseLength + maxLatencyFrames;
- mAudioRecording.allocate(mFramesToRecord);
- mAudioRecording.setSampleRate(getSampleRate());
- generateRandomPulse(pulseLength);
- }
-
- void generateRandomPulse(int32_t pulseLength) {
- mPulse.allocate(pulseLength);
- RandomPulseGenerator pulser(kFramesPerEncodedBit);
- for (int i = 0; i < pulseLength; i++) {
- mPulse.write(pulser.nextFloat());
- }
- }
-
- int getState() override {
- return mState;
- }
-
- void setSampleRate(int32_t sampleRate) override {
- LoopbackProcessor::setSampleRate(sampleRate);
- mAudioRecording.setSampleRate(sampleRate);
- }
-
- void reset() override {
- LoopbackProcessor::reset();
- mDownCounter = getSampleRate() / 2;
- mLoopCounter = 0;
-
- mPulseCursor = 0;
- mBackgroundSumSquare = 0.0f;
- mBackgroundSumCount = 0;
- mBackgroundRMS = 0.0f;
- mSignalRMS = 0.0f;
-
- mState = STATE_MEASURE_BACKGROUND;
- mAudioRecording.clear();
- mLatencyReport.reset();
- }
-
- bool hasEnoughData() {
- return mAudioRecording.isFull();
- }
-
- bool isDone() override {
- return mState == STATE_DONE;
- }
-
- int32_t getProgress() const override {
- return mAudioRecording.size();
- }
-
- std::string analyze() override {
- std::stringstream report;
- report << "PulseLatencyAnalyzer ---------------\n";
- report << LOOPBACK_RESULT_TAG "test.state = "
- << std::setw(8) << mState << "\n";
- report << LOOPBACK_RESULT_TAG "test.state.name = "
- << convertStateToText(mState) << "\n";
- report << LOOPBACK_RESULT_TAG "background.rms = "
- << std::setw(8) << mBackgroundRMS << "\n";
-
- int32_t newResult = RESULT_OK;
- if (mState != STATE_GOT_DATA) {
- report << "WARNING - Bad state. Check volume on device.\n";
- // setResult(ERROR_INVALID_STATE);
- } else {
- float gain = mAudioRecording.normalize(1.0f);
- measureLatencyFromPulse(mAudioRecording,
- mPulse,
- &mLatencyReport);
-
- if (mLatencyReport.confidence < kMinimumConfidence) {
- report << " ERROR - confidence too low!";
- newResult = ERROR_CONFIDENCE;
- } else {
- mSignalRMS = calculateRootMeanSquare(
- &mAudioRecording.getData()[mLatencyReport.latencyInFrames], mPulse.size())
- / gain;
- }
- double latencyMillis = kMillisPerSecond * (double) mLatencyReport.latencyInFrames
- / getSampleRate();
- report << LOOPBACK_RESULT_TAG "latency.frames = " << std::setw(8)
- << mLatencyReport.latencyInFrames << "\n";
- report << LOOPBACK_RESULT_TAG "latency.msec = " << std::setw(8)
- << latencyMillis << "\n";
- report << LOOPBACK_RESULT_TAG "latency.confidence = " << std::setw(8)
- << mLatencyReport.confidence << "\n";
- }
- mState = STATE_DONE;
- if (getResult() == RESULT_OK) {
- setResult(newResult);
- }
-
- return report.str();
- }
-
- int32_t getMeasuredLatency() override {
- return mLatencyReport.latencyInFrames;
- }
-
- double getMeasuredConfidence() override {
- return mLatencyReport.confidence;
- }
-
- double getBackgroundRMS() override {
- return mBackgroundRMS;
- }
-
- double getSignalRMS() override {
- return mSignalRMS;
- }
-
- void printStatus() override {
- ALOGD("st = %d", mState);
- }
-
- result_code processInputFrame(float *frameData, int channelCount) override {
- echo_state nextState = mState;
- mLoopCounter++;
-
- switch (mState) {
- case STATE_MEASURE_BACKGROUND:
- // Measure background RMS on channel 0
- mBackgroundSumSquare += frameData[0] * frameData[0];
- mBackgroundSumCount++;
- mDownCounter--;
- if (mDownCounter <= 0) {
- mBackgroundRMS = sqrtf(mBackgroundSumSquare / mBackgroundSumCount);
- nextState = STATE_IN_PULSE;
- mPulseCursor = 0;
- }
- break;
-
- case STATE_IN_PULSE:
- // Record input until the mAudioRecording is full.
- mAudioRecording.write(frameData, channelCount, 1);
- if (hasEnoughData()) {
- nextState = STATE_GOT_DATA;
- }
- break;
-
- case STATE_GOT_DATA:
- case STATE_DONE:
- default:
- break;
- }
-
- mState = nextState;
- return RESULT_OK;
- }
-
- result_code processOutputFrame(float *frameData, int channelCount) override {
- switch (mState) {
- case STATE_IN_PULSE:
- if (mPulseCursor < mPulse.size()) {
- float pulseSample = mPulse.getData()[mPulseCursor++];
- for (int i = 0; i < channelCount; i++) {
- frameData[i] = pulseSample;
- }
- } else {
- for (int i = 0; i < channelCount; i++) {
- frameData[i] = 0;
- }
- }
- break;
-
- case STATE_MEASURE_BACKGROUND:
- case STATE_GOT_DATA:
- case STATE_DONE:
- default:
- for (int i = 0; i < channelCount; i++) {
- frameData[i] = 0.0f; // silence
- }
- break;
- }
-
- return RESULT_OK;
- }
-
-private:
-
- enum echo_state {
- STATE_MEASURE_BACKGROUND,
- STATE_IN_PULSE,
- STATE_GOT_DATA, // must match RoundTripLatencyActivity.java
- STATE_DONE,
- };
-
- const char *convertStateToText(echo_state state) {
- switch (state) {
- case STATE_MEASURE_BACKGROUND:
- return "INIT";
- case STATE_IN_PULSE:
- return "PULSE";
- case STATE_GOT_DATA:
- return "GOT_DATA";
- case STATE_DONE:
- return "DONE";
- }
- return "UNKNOWN";
- }
-
- int32_t mDownCounter = 500;
- int32_t mLoopCounter = 0;
- echo_state mState = STATE_MEASURE_BACKGROUND;
-
- static constexpr int32_t kFramesPerEncodedBit = 8; // multiple of 2
- static constexpr int32_t kPulseLengthMillis = 500;
-
- AudioRecording mPulse;
- int32_t mPulseCursor = 0;
-
- double mBackgroundSumSquare = 0.0;
- int32_t mBackgroundSumCount = 0;
- double mBackgroundRMS = 0.0;
- double mSignalRMS = 0.0;
- int32_t mFramesToRecord = 0;
-
- AudioRecording mAudioRecording; // contains only the input after starting the pulse
- LatencyReport mLatencyReport;
-};
-
-#endif // ANALYZER_LATENCY_ANALYZER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h b/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h
deleted file mode 100644
index 0a4bd5b..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/ManchesterEncoder.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANALYZER_MANCHESTER_ENCODER_H
-#define ANALYZER_MANCHESTER_ENCODER_H
-
-#include <cstdint>
-
-/**
- * Encode bytes using Manchester Coding scheme.
- *
- * Manchester Code is self clocking.
- * There is a transition in the middle of every bit.
- * Zero is high then low.
- * One is low then high.
- *
- * This avoids having long DC sections that would droop when
- * passed though analog circuits with AC coupling.
- *
- * IEEE 802.3 compatible.
- */
-
-class ManchesterEncoder {
-public:
- ManchesterEncoder(int samplesPerPulse)
- : mSamplesPerPulse(samplesPerPulse)
- , mSamplesPerPulseHalf(samplesPerPulse / 2)
- , mCursor(samplesPerPulse) {
- }
-
- virtual ~ManchesterEncoder() = default;
-
- /**
- * This will be called when the next byte is needed.
- * @return
- */
- virtual uint8_t onNextByte() = 0;
-
- /**
- * Generate the next floating point sample.
- * @return
- */
- virtual float nextFloat() {
- advanceSample();
- if (mCurrentBit) {
- return (mCursor < mSamplesPerPulseHalf) ? -1.0f : 1.0f; // one
- } else {
- return (mCursor < mSamplesPerPulseHalf) ? 1.0f : -1.0f; // zero
- }
- }
-
-protected:
- /**
- * This will be called when a new bit is ready to be encoded.
- * It can be used to prepare the encoded samples.
- * @param current
- */
- virtual void onNextBit(bool /* current */) {};
-
- void advanceSample() {
- // Are we ready for a new bit?
- if (++mCursor >= mSamplesPerPulse) {
- mCursor = 0;
- if (mBitsLeft == 0) {
- mCurrentByte = onNextByte();
- mBitsLeft = 8;
- }
- --mBitsLeft;
- mCurrentBit = (mCurrentByte >> mBitsLeft) & 1;
- onNextBit(mCurrentBit);
- }
- }
-
- bool getCurrentBit() {
- return mCurrentBit;
- }
-
- const int mSamplesPerPulse;
- const int mSamplesPerPulseHalf;
- int mCursor;
- int mBitsLeft = 0;
- uint8_t mCurrentByte = 0;
- bool mCurrentBit = false;
-};
-#endif //ANALYZER_MANCHESTER_ENCODER_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h b/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h
deleted file mode 100644
index 4b3b4e7..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/PeakDetector.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANALYZER_PEAK_DETECTOR_H
-#define ANALYZER_PEAK_DETECTOR_H
-
-#include <math.h>
-
-/**
- * Measure a peak envelope by rising with the peaks,
- * and decaying exponentially after each peak.
- * The absolute value of the input signal is used.
- */
-class PeakDetector {
-public:
-
- void reset() {
- mLevel = 0.0;
- }
-
- double process(double input) {
- mLevel *= mDecay; // exponential decay
- input = fabs(input);
- // never fall below the input signal
- if (input > mLevel) {
- mLevel = input;
- }
- return mLevel;
- }
-
- double getLevel() const {
- return mLevel;
- }
-
- double getDecay() const {
- return mDecay;
- }
-
- /**
- * Multiply the level by this amount on every iteration.
- * This provides an exponential decay curve.
- * A value just under 1.0 is best, for example, 0.99;
- * @param decay scale level for each input
- */
- void setDecay(double decay) {
- mDecay = decay;
- }
-
-private:
- static constexpr double kDefaultDecay = 0.99f;
-
- double mLevel = 0.0;
- double mDecay = kDefaultDecay;
-};
-#endif //ANALYZER_PEAK_DETECTOR_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h b/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h
deleted file mode 100644
index 1c4938c..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/PseudoRandom.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#ifndef ANALYZER_PSEUDORANDOM_H
-#define ANALYZER_PSEUDORANDOM_H
-
-#include <cctype>
-
-class PseudoRandom {
-public:
- PseudoRandom(int64_t seed = 99887766)
- : mSeed(seed)
- {}
-
- /**
- * Returns the next random double from -1.0 to 1.0
- *
- * @return value from -1.0 to 1.0
- */
- double nextRandomDouble() {
- return nextRandomInteger() * (0.5 / (((int32_t)1) << 30));
- }
-
- /** Calculate random 32 bit number using linear-congruential method
- * with known real-time performance.
- */
- int32_t nextRandomInteger() {
-#if __has_builtin(__builtin_mul_overflow) && __has_builtin(__builtin_add_overflow)
- int64_t prod;
- // Use values for 64-bit sequence from MMIX by Donald Knuth.
- __builtin_mul_overflow(mSeed, (int64_t)6364136223846793005, &prod);
- __builtin_add_overflow(prod, (int64_t)1442695040888963407, &mSeed);
-#else
- mSeed = (mSeed * (int64_t)6364136223846793005) + (int64_t)1442695040888963407;
-#endif
- return (int32_t) (mSeed >> 32); // The higher bits have a longer sequence.
- }
-
-private:
- int64_t mSeed;
-};
-
-#endif //ANALYZER_PSEUDORANDOM_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h b/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h
deleted file mode 100644
index 030050b..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/RandomPulseGenerator.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANALYZER_RANDOM_PULSE_GENERATOR_H
-#define ANALYZER_RANDOM_PULSE_GENERATOR_H
-
-#include <stdlib.h>
-#include "RoundedManchesterEncoder.h"
-
-/**
- * Encode random ones and zeros using Manchester Code per IEEE 802.3.
- */
-class RandomPulseGenerator : public RoundedManchesterEncoder {
-public:
- RandomPulseGenerator(int samplesPerPulse)
- : RoundedManchesterEncoder(samplesPerPulse) {
- }
-
- virtual ~RandomPulseGenerator() = default;
-
- /**
- * This will be called when the next byte is needed.
- * @return random byte
- */
- uint8_t onNextByte() override {
- return static_cast<uint8_t>(rand());
- }
-};
-
-#endif //ANALYZER_RANDOM_PULSE_GENERATOR_H
diff --git a/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h b/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h
deleted file mode 100644
index f2eba84..0000000
--- a/media/libaaudio/examples/loopback/src/analyzer/RoundedManchesterEncoder.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Copyright 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
-#define ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
-
-#include <math.h>
-#include <memory.h>
-#include <stdlib.h>
-#include "ManchesterEncoder.h"
-
-/**
- * Encode bytes using Manchester Code.
- * Round the edges using a half cosine to reduce ringing caused by a hard edge.
- */
-
-class RoundedManchesterEncoder : public ManchesterEncoder {
-public:
- RoundedManchesterEncoder(int samplesPerPulse)
- : ManchesterEncoder(samplesPerPulse) {
- int rampSize = samplesPerPulse / 4;
- mZeroAfterZero = std::make_unique<float[]>(samplesPerPulse);
- mZeroAfterOne = std::make_unique<float[]>(samplesPerPulse);
-
- int sampleIndex = 0;
- for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
- float phase = (rampIndex + 1) * M_PI / rampSize;
- float sample = -cosf(phase);
- mZeroAfterZero[sampleIndex] = sample;
- mZeroAfterOne[sampleIndex] = 1.0f;
- sampleIndex++;
- }
- for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
- mZeroAfterZero[sampleIndex] = 1.0f;
- mZeroAfterOne[sampleIndex] = 1.0f;
- sampleIndex++;
- }
- for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
- float phase = (rampIndex + 1) * M_PI / rampSize;
- float sample = cosf(phase);
- mZeroAfterZero[sampleIndex] = sample;
- mZeroAfterOne[sampleIndex] = sample;
- sampleIndex++;
- }
- for (int rampIndex = 0; rampIndex < rampSize; rampIndex++) {
- mZeroAfterZero[sampleIndex] = -1.0f;
- mZeroAfterOne[sampleIndex] = -1.0f;
- sampleIndex++;
- }
- }
-
- void onNextBit(bool current) override {
- // Do we need to use the rounded edge?
- mCurrentSamples = (current ^ mPreviousBit)
- ? mZeroAfterOne.get()
- : mZeroAfterZero.get();
- mPreviousBit = current;
- }
-
- float nextFloat() override {
- advanceSample();
- float output = mCurrentSamples[mCursor];
- if (getCurrentBit()) output = -output;
- return output;
- }
-
-private:
-
- bool mPreviousBit = false;
- float *mCurrentSamples = nullptr;
- std::unique_ptr<float[]> mZeroAfterZero;
- std::unique_ptr<float[]> mZeroAfterOne;
-};
-
-#endif //ANALYZER_ROUNDED_MANCHESTER_ENCODER_H
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 0d2ec70..6fff568 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -36,8 +36,12 @@
#include "AAudioSimpleRecorder.h"
#include "AAudioExampleUtils.h"
+// Get logging macros from OboeTester
+#include "android_debug.h"
+// Get signal analyzers from OboeTester
#include "analyzer/GlitchAnalyzer.h"
#include "analyzer/LatencyAnalyzer.h"
+
#include "../../utils/AAudioExampleUtils.h"
// V0.4.00 = rectify and low-pass filter the echos, auto-correlate entire echo
@@ -45,8 +49,9 @@
// fix -n option to set output buffer for -tm
// plot first glitch
// V0.4.02 = allow -n0 for minimal buffer size
-// V0.5.00 = use latency analyzer from OboeTester, uses random noise for latency
-#define APP_VERSION "0.5.00"
+// V0.5.00 = use latency analyzer copied from OboeTester, uses random noise for latency
+// V0.5.01 = use latency analyzer directly from OboeTester in external/oboe
+#define APP_VERSION "0.5.01"
// Tag for machine readable results as property = value pairs
#define RESULT_TAG "RESULT: "
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index e0ac7e5..6666788 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -689,7 +689,7 @@
aaudio_performance_mode_t mode) __INTRODUCED_IN(26);
/**
- * Set the intended use case for the stream.
+ * Set the intended use case for the output stream.
*
* The AAudio system will use this information to optimize the
* behavior of the stream.
@@ -706,7 +706,7 @@
aaudio_usage_t usage) __INTRODUCED_IN(28);
/**
- * Set the type of audio data that the stream will carry.
+ * Set the type of audio data that the output stream will carry.
*
* The AAudio system will use this information to optimize the
* behavior of the stream.
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 717f31a..328ceda 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -85,6 +85,7 @@
"libcutils",
"libutils",
"libbinder",
+ "aaudio-aidl-cpp",
],
cflags: [
@@ -114,11 +115,10 @@
"client/AudioStreamInternalPlay.cpp",
"client/IsochronousClockModel.cpp",
"binding/AudioEndpointParcelable.cpp",
+ "binding/AAudioBinderAdapter.cpp",
"binding/AAudioBinderClient.cpp",
"binding/AAudioStreamRequest.cpp",
"binding/AAudioStreamConfiguration.cpp",
- "binding/IAAudioClient.cpp",
- "binding/IAAudioService.cpp",
"binding/RingBufferParcelable.cpp",
"binding/SharedMemoryParcelable.cpp",
"binding/SharedRegionParcelable.cpp",
@@ -138,3 +138,33 @@
misc_undefined: ["bounds"],
},
}
+
+aidl_interface {
+ name: "aaudio-aidl",
+ unstable: true,
+ local_include_dir: "binding/aidl",
+ srcs: [
+ "binding/aidl/aaudio/Endpoint.aidl",
+ "binding/aidl/aaudio/RingBuffer.aidl",
+ "binding/aidl/aaudio/SharedRegion.aidl",
+ "binding/aidl/aaudio/StreamParameters.aidl",
+ "binding/aidl/aaudio/StreamRequest.aidl",
+ "binding/aidl/aaudio/IAAudioClient.aidl",
+ "binding/aidl/aaudio/IAAudioService.aidl",
+ ],
+ imports: [
+ "audio_common-aidl",
+ "shared-file-region-aidl",
+ ],
+ backend:
+ {
+ cpp: {
+ enabled: true,
+ },
+ java: {
+ // TODO: need to have audio_common-aidl available in Java to enable
+ // this.
+ enabled: false,
+ },
+ },
+}
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.cpp b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
new file mode 100644
index 0000000..2b2fe6d
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <binding/AAudioBinderAdapter.h>
+#include <utility/AAudioUtilities.h>
+
+namespace aaudio {
+
+using android::binder::Status;
+
+AAudioBinderAdapter::AAudioBinderAdapter(IAAudioService* delegate)
+ : mDelegate(delegate) {}
+
+void AAudioBinderAdapter::registerClient(const android::sp<IAAudioClient>& client) {
+ mDelegate->registerClient(client);
+}
+
+aaudio_handle_t AAudioBinderAdapter::openStream(const AAudioStreamRequest& request,
+ AAudioStreamConfiguration& config) {
+ aaudio_handle_t result;
+ StreamParameters params;
+ Status status = mDelegate->openStream(request.parcelable(),
+ ¶ms,
+ &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ config = params;
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::closeStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->closeStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable& endpointOut) {
+ aaudio_result_t result;
+ Endpoint endpoint;
+ Status status = mDelegate->getStreamDescription(streamHandle,
+ &endpoint,
+ &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ endpointOut = std::move(endpoint);
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::startStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->startStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::pauseStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->pauseStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::stopStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->stopStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::flushStream(aaudio_handle_t streamHandle) {
+ aaudio_result_t result;
+ Status status = mDelegate->flushStream(streamHandle, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) {
+ aaudio_result_t result;
+ Status status = mDelegate->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+aaudio_result_t AAudioBinderAdapter::unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) {
+ aaudio_result_t result;
+ Status status = mDelegate->unregisterAudioThread(streamHandle, clientThreadId, &result);
+ if (!status.isOk()) {
+ result = AAudioConvert_androidToAAudioResult(status.transactionError());
+ }
+ return result;
+}
+
+} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderAdapter.h b/media/libaaudio/src/binding/AAudioBinderAdapter.h
new file mode 100644
index 0000000..5e9ab57
--- /dev/null
+++ b/media/libaaudio/src/binding/AAudioBinderAdapter.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aaudio/IAAudioService.h>
+#include <binding/AAudioServiceInterface.h>
+
+namespace aaudio {
+
+/**
+ * An adapter that takes in an underlying IAAudioService and exposes an
+ * AAudioServiceInterface.
+ *
+ * This class is abstract: the client is expected to inherit from this class and implement those
+ * methods from AAudioServiceInterface that don't have counterparts in IAAudioService.
+ */
+class AAudioBinderAdapter : public AAudioServiceInterface {
+public:
+ explicit AAudioBinderAdapter(IAAudioService* delegate);
+
+ void registerClient(const android::sp<IAAudioClient>& client) override;
+
+ aaudio_handle_t openStream(const AAudioStreamRequest& request,
+ AAudioStreamConfiguration& configuration) override;
+
+ aaudio_result_t closeStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
+ AudioEndpointParcelable& endpoint) override;
+
+ aaudio_result_t startStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t stopStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t flushStream(aaudio_handle_t streamHandle) override;
+
+ aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId,
+ int64_t periodNanoseconds) override;
+
+ aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
+ pid_t clientThreadId) override;
+
+private:
+ IAAudioService* const mDelegate;
+};
+
+} // namespace aaudio
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.cpp b/media/libaaudio/src/binding/AAudioBinderClient.cpp
index 7b0d31f..fa5a2da 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.cpp
+++ b/media/libaaudio/src/binding/AAudioBinderClient.cpp
@@ -19,35 +19,30 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
-#include <binder/IInterface.h>
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
#include <utils/Mutex.h>
#include <utils/RefBase.h>
#include <utils/Singleton.h>
-#include <media/AudioSystem.h>
-
#include <aaudio/AAudio.h>
#include "AudioEndpointParcelable.h"
-#include "binding/AAudioBinderClient.h"
-//#include "binding/AAudioStreamRequest.h"
-//#include "binding/AAudioStreamConfiguration.h"
-//#include "binding/IAAudioService.h"
-//#include "binding/AAudioServiceMessage.h"
-//#include "AAudioServiceInterface.h"
+#include "binding/AAudioBinderClient.h"
+
+#define AAUDIO_SERVICE_NAME "media.aaudio"
using android::String16;
using android::IServiceManager;
using android::defaultServiceManager;
using android::interface_cast;
using android::IInterface;
-using android::IAAudioService;
using android::Mutex;
using android::ProcessState;
using android::sp;
+using android::status_t;
using android::wp;
+using android::binder::Status;
using namespace aaudio;
@@ -67,20 +62,18 @@
AAudioBinderClient::~AAudioBinderClient() {
ALOGV("%s - destroying %p", __func__, this);
Mutex::Autolock _l(mServiceLock);
- if (mAAudioService != 0) {
- IInterface::asBinder(mAAudioService)->unlinkToDeath(mAAudioClient);
- }
}
// TODO Share code with other service clients.
// Helper function to get access to the "AAudioService" service.
// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
-const sp<IAAudioService> AAudioBinderClient::getAAudioService() {
+std::shared_ptr<AAudioServiceInterface> AAudioBinderClient::getAAudioService() {
+ std::shared_ptr<AAudioServiceInterface> result;
sp<IAAudioService> aaudioService;
bool needToRegister = false;
{
Mutex::Autolock _l(mServiceLock);
- if (mAAudioService.get() == nullptr) {
+ if (mAdapter == nullptr) {
sp<IBinder> binder;
sp<IServiceManager> sm = defaultServiceManager();
// Try several times to get the service.
@@ -99,7 +92,8 @@
if (status != NO_ERROR) {
ALOGE("%s() - linkToDeath() returned %d", __func__, status);
}
- mAAudioService = interface_cast<IAAudioService>(binder);
+ aaudioService = interface_cast<IAAudioService>(binder);
+ mAdapter.reset(new Adapter(aaudioService, mAAudioClient));
needToRegister = true;
// Make sure callbacks can be received by mAAudioClient
ProcessState::self()->startThreadPool();
@@ -107,18 +101,18 @@
ALOGE("AAudioBinderClient could not connect to %s", AAUDIO_SERVICE_NAME);
}
}
- aaudioService = mAAudioService;
+ result = mAdapter;
}
// Do this outside the mutex lock.
if (needToRegister && aaudioService.get() != nullptr) { // new client?
aaudioService->registerClient(mAAudioClient);
}
- return aaudioService;
+ return result;
}
void AAudioBinderClient::dropAAudioService() {
Mutex::Autolock _l(mServiceLock);
- mAAudioService.clear(); // force a reconnect
+ mAdapter.reset();
}
/**
@@ -127,13 +121,13 @@
* @return handle to the stream or a negative error
*/
aaudio_handle_t AAudioBinderClient::openStream(const AAudioStreamRequest &request,
- AAudioStreamConfiguration &configurationOutput) {
+ AAudioStreamConfiguration &configuration) {
aaudio_handle_t stream;
for (int i = 0; i < 2; i++) {
- const sp<IAAudioService> &service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- stream = service->openStream(request, configurationOutput);
+ stream = service->openStream(request, configuration);
if (stream == AAUDIO_ERROR_NO_SERVICE) {
ALOGE("openStream lost connection to AAudioService.");
@@ -146,8 +140,9 @@
}
aaudio_result_t AAudioBinderClient::closeStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->closeStream(streamHandle);
}
@@ -155,33 +150,38 @@
* used to communicate with the underlying HAL or Service.
*/
aaudio_result_t AAudioBinderClient::getStreamDescription(aaudio_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) {
- const sp<IAAudioService> service = getAAudioService();
+ AudioEndpointParcelable& endpointOut) {
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->getStreamDescription(streamHandle, parcelable);
+
+ return service->getStreamDescription(streamHandle, endpointOut);
}
aaudio_result_t AAudioBinderClient::startStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->startStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::pauseStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->pauseStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::stopStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->stopStream(streamHandle);
}
aaudio_result_t AAudioBinderClient::flushStream(aaudio_handle_t streamHandle) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
+
return service->flushStream(streamHandle);
}
@@ -191,17 +191,16 @@
aaudio_result_t AAudioBinderClient::registerAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId,
int64_t periodNanoseconds) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->registerAudioThread(streamHandle,
- clientThreadId,
- periodNanoseconds);
+
+ return service->registerAudioThread(streamHandle, clientThreadId, periodNanoseconds);
}
aaudio_result_t AAudioBinderClient::unregisterAudioThread(aaudio_handle_t streamHandle,
pid_t clientThreadId) {
- const sp<IAAudioService> service = getAAudioService();
+ std::shared_ptr<AAudioServiceInterface> service = getAAudioService();
if (service.get() == nullptr) return AAUDIO_ERROR_NO_SERVICE;
- return service->unregisterAudioThread(streamHandle,
- clientThreadId);
+
+ return service->unregisterAudioThread(streamHandle, clientThreadId);
}
diff --git a/media/libaaudio/src/binding/AAudioBinderClient.h b/media/libaaudio/src/binding/AAudioBinderClient.h
index e8c91fc..6a7b639 100644
--- a/media/libaaudio/src/binding/AAudioBinderClient.h
+++ b/media/libaaudio/src/binding/AAudioBinderClient.h
@@ -21,12 +21,15 @@
#include <utils/Singleton.h>
#include <aaudio/AAudio.h>
-#include "AAudioServiceDefinitions.h"
+#include <binder/IInterface.h>
+
+#include "aaudio/BnAAudioClient.h"
+#include "aaudio/IAAudioService.h"
#include "AAudioServiceInterface.h"
+#include "binding/AAudioBinderAdapter.h"
#include "binding/AAudioStreamRequest.h"
-#include "binding/AAudioStreamConfiguration.h"
#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioService.h"
+#include "core/AAudioStreamParameters.h"
/**
* Implements the AAudioServiceInterface by talking to the service through Binder.
@@ -44,11 +47,7 @@
virtual ~AAudioBinderClient();
- const android::sp<android::IAAudioService> getAAudioService();
-
- void dropAAudioService();
-
- void registerClient(const android::sp<android::IAAudioClient>& client __unused) override {}
+ void registerClient(const android::sp<IAAudioClient>& client __unused) override {}
/**
* @param request info needed to create the stream
@@ -64,7 +63,7 @@
* used to communicate with the underlying HAL or Service.
*/
aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
- AudioEndpointParcelable &parcelable) override;
+ AudioEndpointParcelable &endpointOut) override;
/**
* Start the flow of data.
@@ -115,8 +114,7 @@
ALOGW("onStreamChange called!");
}
- class AAudioClient : public android::IBinder::DeathRecipient , public android::BnAAudioClient
- {
+ class AAudioClient : public android::IBinder::DeathRecipient, public BnAAudioClient {
public:
AAudioClient(android::wp<AAudioBinderClient> aaudioBinderClient)
: mBinderClient(aaudioBinderClient) {
@@ -132,21 +130,66 @@
}
// implement BnAAudioClient
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) {
+ android::binder::Status onStreamChange(int32_t handle, int32_t opcode, int32_t value) {
+ static_assert(std::is_same_v<aaudio_handle_t, int32_t>);
android::sp<AAudioBinderClient> client = mBinderClient.promote();
if (client.get() != nullptr) {
client->onStreamChange(handle, opcode, value);
}
+ return android::binder::Status::ok();
}
private:
android::wp<AAudioBinderClient> mBinderClient;
};
-private:
+ // This adapter is used to convert the binder interface (delegate) to the AudioServiceInterface
+ // conventions (translating between data types and respective parcelables, translating error
+ // codes and calling conventions).
+ // The adapter also owns the underlying service object and is responsible to unlink its death
+ // listener when destroyed.
+ class Adapter : public AAudioBinderAdapter {
+ public:
+ Adapter(const android::sp<IAAudioService>& delegate,
+ const android::sp<AAudioClient>& aaudioClient)
+ : AAudioBinderAdapter(delegate.get()),
+ mDelegate(delegate),
+ mAAudioClient(aaudioClient) {}
- android::Mutex mServiceLock;
- android::sp<android::IAAudioService> mAAudioService;
- android::sp<AAudioClient> mAAudioClient;
+ virtual ~Adapter() {
+ if (mDelegate != nullptr) {
+ android::IInterface::asBinder(mDelegate)->unlinkToDeath(mAAudioClient);
+ }
+ }
+
+ // This should never be called (call is rejected at the AudioBinderClient level).
+ aaudio_result_t startClient(aaudio_handle_t streamHandle __unused,
+ const android::AudioClient& client __unused,
+ const audio_attributes_t* attr __unused,
+ audio_port_handle_t* clientHandle __unused) override {
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ // This should never be called (call is rejected at the AudioBinderClient level).
+ aaudio_result_t stopClient(aaudio_handle_t streamHandle __unused,
+ audio_port_handle_t clientHandle __unused) override {
+ LOG_ALWAYS_FATAL("Shouldn't get here");
+ return AAUDIO_ERROR_UNAVAILABLE;
+ }
+
+ private:
+ android::sp<IAAudioService> mDelegate;
+ android::sp<AAudioClient> mAAudioClient;
+ };
+
+private:
+ android::Mutex mServiceLock;
+ std::shared_ptr<AAudioServiceInterface> mAdapter;
+ android::sp<AAudioClient> mAAudioClient;
+
+ std::shared_ptr<AAudioServiceInterface> getAAudioService();
+
+ void dropAAudioService();
};
diff --git a/media/libaaudio/src/binding/AAudioServiceInterface.h b/media/libaaudio/src/binding/AAudioServiceInterface.h
index 9c28cc7..5d11512 100644
--- a/media/libaaudio/src/binding/AAudioServiceInterface.h
+++ b/media/libaaudio/src/binding/AAudioServiceInterface.h
@@ -20,11 +20,11 @@
#include <utils/StrongPointer.h>
#include <media/AudioClient.h>
+#include "aaudio/IAAudioClient.h"
#include "binding/AAudioServiceDefinitions.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioClient.h"
/**
* This has the same methods as IAAudioService but without the Binder features.
@@ -40,7 +40,7 @@
AAudioServiceInterface() {};
virtual ~AAudioServiceInterface() = default;
- virtual void registerClient(const android::sp<android::IAAudioClient>& client) = 0;
+ virtual void registerClient(const android::sp<IAAudioClient>& client) = 0;
/**
* @param request info needed to create the stream
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index b785f88..2d501ef 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -23,101 +23,66 @@
#include <sys/mman.h>
#include <aaudio/AAudio.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
-
#include "binding/AAudioStreamConfiguration.h"
-using android::NO_ERROR;
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
using namespace aaudio;
-AAudioStreamConfiguration::AAudioStreamConfiguration() {}
-AAudioStreamConfiguration::~AAudioStreamConfiguration() {}
+using android::media::audio::common::AudioFormat;
-status_t AAudioStreamConfiguration::writeToParcel(Parcel* parcel) const {
- status_t status;
-
- status = parcel->writeInt32(getDeviceId());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSampleRate());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSamplesPerFrame());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getSharingMode());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getFormat());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32((int32_t) getDirection());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getBufferCapacity());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getUsage());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getContentType());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getInputPreset());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32((int32_t) getAllowedCapturePolicy());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(getSessionId());
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(isPrivacySensitive() ? 1 : 0);
- if (status != NO_ERROR) goto error;
- return NO_ERROR;
-error:
- ALOGE("%s(): write failed = %d", __func__, status);
- return status;
+AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
+ setSamplesPerFrame(parcelable.samplesPerFrame);
+ setSampleRate(parcelable.sampleRate);
+ setDeviceId(parcelable.deviceId);
+ static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
+ setSharingMode(parcelable.sharingMode);
+ static_assert(sizeof(audio_format_t) == sizeof(parcelable.audioFormat));
+ setFormat(static_cast<audio_format_t>(parcelable.audioFormat));
+ static_assert(sizeof(aaudio_direction_t) == sizeof(parcelable.direction));
+ setDirection(parcelable.direction);
+ static_assert(sizeof(audio_usage_t) == sizeof(parcelable.usage));
+ setUsage(parcelable.usage);
+ static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
+ setContentType(parcelable.contentType);
+ static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
+ setInputPreset(parcelable.inputPreset);
+ setBufferCapacity(parcelable.bufferCapacity);
+ static_assert(
+ sizeof(aaudio_allowed_capture_policy_t) == sizeof(parcelable.allowedCapturePolicy));
+ setAllowedCapturePolicy(parcelable.allowedCapturePolicy);
+ static_assert(sizeof(aaudio_session_id_t) == sizeof(parcelable.sessionId));
+ setSessionId(parcelable.sessionId);
+ setPrivacySensitive(parcelable.isPrivacySensitive);
}
-status_t AAudioStreamConfiguration::readFromParcel(const Parcel* parcel) {
- int32_t value;
- status_t status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setDeviceId(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSampleRate(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSamplesPerFrame(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSharingMode((aaudio_sharing_mode_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setFormat((audio_format_t) value);
+AAudioStreamConfiguration&
+AAudioStreamConfiguration::operator=(const StreamParameters& parcelable) {
+ this->~AAudioStreamConfiguration();
+ new (this) AAudioStreamConfiguration(parcelable);
+ return *this;
+}
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setDirection((aaudio_direction_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setBufferCapacity(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setUsage((aaudio_usage_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setContentType((aaudio_content_type_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setInputPreset((aaudio_input_preset_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setAllowedCapturePolicy((aaudio_allowed_capture_policy_t) value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setSessionId(value);
- status = parcel->readInt32(&value);
- if (status != NO_ERROR) goto error;
- setPrivacySensitive(value == 1);
- return NO_ERROR;
-error:
- ALOGE("%s(): read failed = %d", __func__, status);
- return status;
+StreamParameters AAudioStreamConfiguration::parcelable() const {
+ StreamParameters result;
+ result.samplesPerFrame = getSamplesPerFrame();
+ result.sampleRate = getSampleRate();
+ result.deviceId = getDeviceId();
+ static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
+ result.sharingMode = getSharingMode();
+ static_assert(sizeof(audio_format_t) == sizeof(result.audioFormat));
+ result.audioFormat = static_cast<AudioFormat>(getFormat());
+ static_assert(sizeof(aaudio_direction_t) == sizeof(result.direction));
+ result.direction = getDirection();
+ static_assert(sizeof(audio_usage_t) == sizeof(result.usage));
+ result.usage = getUsage();
+ static_assert(sizeof(aaudio_content_type_t) == sizeof(result.contentType));
+ result.contentType = getContentType();
+ static_assert(sizeof(aaudio_input_preset_t) == sizeof(result.inputPreset));
+ result.inputPreset = getInputPreset();
+ result.bufferCapacity = getBufferCapacity();
+ static_assert(sizeof(aaudio_allowed_capture_policy_t) == sizeof(result.allowedCapturePolicy));
+ result.allowedCapturePolicy = getAllowedCapturePolicy();
+ static_assert(sizeof(aaudio_session_id_t) == sizeof(result.sessionId));
+ result.sessionId = getSessionId();
+ result.isPrivacySensitive = isPrivacySensitive();
+ return result;
}
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.h b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
index b324896..f428eb0 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.h
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.h
@@ -20,24 +20,24 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
+#include <aaudio/StreamParameters.h>
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include "core/AAudioStreamParameters.h"
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
namespace aaudio {
-class AAudioStreamConfiguration : public AAudioStreamParameters, public Parcelable {
+// This is a holder for AAudioStreamParameters, which allows conversion to/from it parcelable
+// representation, StreamParameters.
+class AAudioStreamConfiguration : public AAudioStreamParameters {
public:
- AAudioStreamConfiguration();
- virtual ~AAudioStreamConfiguration();
+ AAudioStreamConfiguration() = default;
- virtual status_t writeToParcel(Parcel* parcel) const override;
+ explicit AAudioStreamConfiguration(const StreamParameters& parcelable);
- virtual status_t readFromParcel(const Parcel* parcel) override;
+ AAudioStreamConfiguration& operator=(const StreamParameters& parcelable);
+
+ StreamParameters parcelable() const;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.cpp b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
index c30c5b9..536395a 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.cpp
@@ -21,67 +21,32 @@
#include <stdint.h>
#include <sys/mman.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include <aaudio/AAudio.h>
#include "binding/AAudioStreamConfiguration.h"
#include "binding/AAudioStreamRequest.h"
-using android::NO_ERROR;
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
using namespace aaudio;
-AAudioStreamRequest::AAudioStreamRequest()
- : mConfiguration()
- {}
-
-AAudioStreamRequest::~AAudioStreamRequest() {}
-
-status_t AAudioStreamRequest::writeToParcel(Parcel* parcel) const {
- status_t status = parcel->writeInt32((int32_t) mUserId);
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeBool(mSharingModeMatchRequired);
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeBool(mInService);
- if (status != NO_ERROR) goto error;
-
- status = mConfiguration.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("writeToParcel(): write failed = %d", status);
- return status;
+AAudioStreamRequest::AAudioStreamRequest(const StreamRequest& parcelable) :
+ mConfiguration(std::move(parcelable.params)),
+ mUserId(parcelable.userId),
+ mProcessId(parcelable.processId),
+ mSharingModeMatchRequired(parcelable.sharingModeMatchRequired),
+ mInService(parcelable.inService) {
+ static_assert(sizeof(mUserId) == sizeof(parcelable.userId));
+ static_assert(sizeof(mProcessId) == sizeof(parcelable.processId));
}
-status_t AAudioStreamRequest::readFromParcel(const Parcel* parcel) {
- int32_t temp;
- status_t status = parcel->readInt32(&temp);
- if (status != NO_ERROR) goto error;
- mUserId = (uid_t) temp;
-
- status = parcel->readBool(&mSharingModeMatchRequired);
- if (status != NO_ERROR) goto error;
-
- status = parcel->readBool(&mInService);
- if (status != NO_ERROR) goto error;
-
- status = mConfiguration.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("readFromParcel(): read failed = %d", status);
- return status;
+StreamRequest AAudioStreamRequest::parcelable() const {
+ StreamRequest result;
+ result.params = std::move(mConfiguration).parcelable();
+ result.userId = mUserId;
+ result.processId = mProcessId;
+ result.sharingModeMatchRequired = mSharingModeMatchRequired;
+ result.inService = mInService;
+ return result;
}
aaudio_result_t AAudioStreamRequest::validate() const {
diff --git a/media/libaaudio/src/binding/AAudioStreamRequest.h b/media/libaaudio/src/binding/AAudioStreamRequest.h
index 492f69d..31d3ea1 100644
--- a/media/libaaudio/src/binding/AAudioStreamRequest.h
+++ b/media/libaaudio/src/binding/AAudioStreamRequest.h
@@ -20,21 +20,18 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <aaudio/StreamRequest.h>
#include "binding/AAudioStreamConfiguration.h"
-using android::status_t;
-using android::Parcel;
-using android::Parcelable;
-
namespace aaudio {
-class AAudioStreamRequest : public Parcelable {
+class AAudioStreamRequest {
public:
- AAudioStreamRequest();
- virtual ~AAudioStreamRequest();
+ AAudioStreamRequest() = default;
+
+ // Construct based on a parcelable representation.
+ explicit AAudioStreamRequest(const StreamRequest& parcelable);
uid_t getUserId() const {
return mUserId;
@@ -76,15 +73,14 @@
mInService = inService;
}
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t validate() const;
void dump() const;
-protected:
+ // Extract a parcelable representation of this object.
+ StreamRequest parcelable() const;
+
+private:
AAudioStreamConfiguration mConfiguration;
uid_t mUserId = (uid_t) -1;
pid_t mProcessId = (pid_t) -1;
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
index 61d7d27..aa4ac27 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.cpp
@@ -29,22 +29,43 @@
#include "binding/AudioEndpointParcelable.h"
using android::base::unique_fd;
+using android::media::SharedFileRegion;
using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
using namespace aaudio;
-/**
- * Container for information about the message queues plus
- * general stream information needed by AAudio clients.
- * It contains no addresses, just sizes, offsets and file descriptors for
- * shared memory that can be passed through Binder.
- */
-AudioEndpointParcelable::AudioEndpointParcelable() {}
+AudioEndpointParcelable::AudioEndpointParcelable(Endpoint&& parcelable)
+ : mUpMessageQueueParcelable(std::move(parcelable.upMessageQueueParcelable)),
+ mDownMessageQueueParcelable(std::move(parcelable.downMessageQueueParcelable)),
+ mUpDataQueueParcelable(std::move(parcelable.upDataQueueParcelable)),
+ mDownDataQueueParcelable(std::move(parcelable.downDataQueueParcelable)),
+ mNumSharedMemories(parcelable.sharedMemories.size()) {
+ for (size_t i = 0; i < parcelable.sharedMemories.size() && i < MAX_SHARED_MEMORIES; ++i) {
+ // Re-construct.
+ mSharedMemories[i].~SharedMemoryParcelable();
+ new(&mSharedMemories[i]) SharedMemoryParcelable(std::move(parcelable.sharedMemories[i]));
+ }
+}
-AudioEndpointParcelable::~AudioEndpointParcelable() {}
+AudioEndpointParcelable& AudioEndpointParcelable::operator=(Endpoint&& parcelable) {
+ this->~AudioEndpointParcelable();
+ new(this) AudioEndpointParcelable(std::move(parcelable));
+ return *this;
+}
+
+Endpoint AudioEndpointParcelable::parcelable()&& {
+ Endpoint result;
+ result.upMessageQueueParcelable = std::move(mUpMessageQueueParcelable).parcelable();
+ result.downMessageQueueParcelable = std::move(mDownMessageQueueParcelable).parcelable();
+ result.upDataQueueParcelable = std::move(mUpDataQueueParcelable).parcelable();
+ result.downDataQueueParcelable = std::move(mDownDataQueueParcelable).parcelable();
+ result.sharedMemories.reserve(std::min(mNumSharedMemories, MAX_SHARED_MEMORIES));
+ for (size_t i = 0; i < mNumSharedMemories && i < MAX_SHARED_MEMORIES; ++i) {
+ result.sharedMemories.emplace_back(std::move(mSharedMemories[i]).parcelable());
+ }
+ return result;
+}
/**
* Add the file descriptor to the table.
@@ -60,60 +81,6 @@
return index;
}
-/**
- * The read and write must be symmetric.
- */
-status_t AudioEndpointParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mNumSharedMemories);
- if (status != NO_ERROR) goto error;
-
- for (int i = 0; i < mNumSharedMemories; i++) {
- status = mSharedMemories[i].writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- status = mUpMessageQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownMessageQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mUpDataQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownDataQueueParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return NO_ERROR;
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t AudioEndpointParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mNumSharedMemories);
- if (status != NO_ERROR) goto error;
-
- for (int i = 0; i < mNumSharedMemories; i++) {
- mSharedMemories[i].readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- status = mUpMessageQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownMessageQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mUpDataQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDownDataQueueParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
-
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t AudioEndpointParcelable::resolve(EndpointDescriptor *descriptor) {
aaudio_result_t result = mUpMessageQueueParcelable.resolve(mSharedMemories,
&descriptor->upMessageQueueDescriptor);
diff --git a/media/libaaudio/src/binding/AudioEndpointParcelable.h b/media/libaaudio/src/binding/AudioEndpointParcelable.h
index e4f8b9e..5237a1a 100644
--- a/media/libaaudio/src/binding/AudioEndpointParcelable.h
+++ b/media/libaaudio/src/binding/AudioEndpointParcelable.h
@@ -20,16 +20,13 @@
#include <stdint.h>
//#include <sys/mman.h>
+#include <aaudio/Endpoint.h>
#include <android-base/unique_fd.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
#include "binding/AAudioServiceDefinitions.h"
#include "binding/RingBufferParcelable.h"
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
namespace aaudio {
@@ -39,10 +36,15 @@
* It contains no addresses, just sizes, offsets and file descriptors for
* shared memory that can be passed through Binder.
*/
-class AudioEndpointParcelable : public Parcelable {
+class AudioEndpointParcelable {
public:
- AudioEndpointParcelable();
- virtual ~AudioEndpointParcelable();
+ AudioEndpointParcelable() = default;
+
+ // Ctor/assignment from a parcelable representation.
+ // Since the parcelable object owns unique FDs (for shared memory blocks), move semantics are
+ // provided to avoid the need to dupe.
+ AudioEndpointParcelable(Endpoint&& parcelable);
+ AudioEndpointParcelable& operator=(Endpoint&& parcelable);
/**
* Add the file descriptor to the table.
@@ -50,16 +52,17 @@
*/
int32_t addFileDescriptor(const android::base::unique_fd& fd, int32_t sizeInBytes);
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(EndpointDescriptor *descriptor);
aaudio_result_t close();
void dump();
+ // Extract a parcelable representation of this object.
+ // Since our shared memory objects own a unique FD, move semantics are provided to avoid the
+ // need to dupe.
+ Endpoint parcelable()&&;
+
public: // TODO add getters
// Set capacityInFrames to zero if Queue is unused.
RingBufferParcelable mUpMessageQueueParcelable; // server to client
diff --git a/media/libaaudio/src/binding/IAAudioClient.cpp b/media/libaaudio/src/binding/IAAudioClient.cpp
deleted file mode 100644
index c69c4e8..0000000
--- a/media/libaaudio/src/binding/IAAudioClient.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioBinderClient.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/IAAudioClient.h"
-#include "utility/AAudioUtilities.h"
-
-namespace android {
-
-using aaudio::aaudio_handle_t;
-
-/**
- * This is used by the AAudio Service to talk to an AAudio Client.
- *
- * The order of parameters in the Parcels must match with code in AAudioClient.cpp.
- */
-class BpAAudioClient : public BpInterface<IAAudioClient>
-{
-public:
- explicit BpAAudioClient(const sp<IBinder>& impl)
- : BpInterface<IAAudioClient>(impl)
- {
- }
-
- void onStreamChange(aaudio_handle_t handle, int32_t opcode, int32_t value) override {
- Parcel data, reply;
- data.writeInterfaceToken(IAAudioClient::getInterfaceDescriptor());
- data.writeInt32(handle);
- data.writeInt32(opcode);
- data.writeInt32(value);
- remote()->transact(ON_STREAM_CHANGE, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
-};
-
-// Implement an interface to the service.
-IMPLEMENT_META_INTERFACE(AAudioClient, "IAAudioClient");
-
-// The order of parameters in the Parcels must match with code in BpAAudioClient
-
-status_t BnAAudioClient::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- aaudio_handle_t streamHandle;
- int32_t opcode = 0;
- int32_t value = 0;
- ALOGV("BnAAudioClient::onTransact(%u) %u", code, flags);
-
- switch(code) {
- case ON_STREAM_CHANGE: {
- CHECK_INTERFACE(IAAudioClient, data, reply);
- data.readInt32(&streamHandle);
- data.readInt32(&opcode);
- data.readInt32(&value);
- onStreamChange(streamHandle, opcode, value);
- ALOGD("BnAAudioClient onStreamChange(%x, %d, %d)", streamHandle, opcode, value);
- return NO_ERROR;
- } break;
-
- default:
- // ALOGW("BnAAudioClient::onTransact not handled %u", code);
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioClient.h b/media/libaaudio/src/binding/IAAudioClient.h
deleted file mode 100644
index f21fd93..0000000
--- a/media/libaaudio/src/binding/IAAudioClient.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AAUDIO_IAAUDIO_CLIENT_H
-#define ANDROID_AAUDIO_IAAUDIO_CLIENT_H
-
-#include <stdint.h>
-#include <binder/IInterface.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioCommon.h"
-
-namespace android {
-
-
-// Interface (our AIDL) - client methods called by service
-class IAAudioClient : public IInterface {
-public:
-
- DECLARE_META_INTERFACE(AAudioClient);
-
- virtual void onStreamChange(aaudio::aaudio_handle_t handle, int32_t opcode, int32_t value) = 0;
-
-};
-
-class BnAAudioClient : public BnInterface<IAAudioClient> {
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags = 0);
-};
-
-} /* namespace android */
-
-#endif //ANDROID_AAUDIO_IAAUDIO_SERVICE_H
diff --git a/media/libaaudio/src/binding/IAAudioService.cpp b/media/libaaudio/src/binding/IAAudioService.cpp
deleted file mode 100644
index e017b3a..0000000
--- a/media/libaaudio/src/binding/IAAudioService.cpp
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AAudio"
-//#define LOG_NDEBUG 0
-#include <utils/Log.h>
-
-#include <aaudio/AAudio.h>
-#include <binder/IPCThreadState.h>
-
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/AAudioStreamRequest.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/AAudioStreamConfiguration.h"
-#include "binding/IAAudioService.h"
-#include "utility/AAudioUtilities.h"
-
-namespace android {
-
-using aaudio::aaudio_handle_t;
-
-/**
- * This is used by the AAudio Client to talk to the AAudio Service.
- *
- * The order of parameters in the Parcels must match with code in AAudioService.cpp.
- */
-class BpAAudioService : public BpInterface<IAAudioService>
-{
-public:
- explicit BpAAudioService(const sp<IBinder>& impl)
- : BpInterface<IAAudioService>(impl)
- {
- }
-
- void registerClient(const sp<IAAudioClient>& client) override
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(client));
- remote()->transact(REGISTER_CLIENT, data, &reply);
- }
-
- aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- // request.dump();
- request.writeToParcel(&data);
- status_t err = remote()->transact(OPEN_STREAM, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client openStream transact failed %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_handle_t stream;
- err = reply.readInt32(&stream);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(OPEN_STREAM) readInt %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- } else if (stream < 0) {
- return stream;
- }
- err = configurationOutput.readFromParcel(&reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client openStream readFromParcel failed %d", err);
- closeStream(stream);
- return AAudioConvert_androidToAAudioResult(err);
- }
- return stream;
- }
-
- virtual aaudio_result_t closeStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(CLOSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client closeStream transact failed %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t getStreamDescription(aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(GET_STREAM_DESCRIPTION, data, &reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) returns %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t result;
- err = reply.readInt32(&result);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) readInt %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- } else if (result != AAUDIO_OK) {
- ALOGE("BpAAudioService::client GET_STREAM_DESCRIPTION passed result %d", result);
- return result;
- }
- err = parcelable.readFromParcel(&reply);
- if (err != NO_ERROR) {
- ALOGE("BpAAudioService::client transact(GET_STREAM_DESCRIPTION) read endpoint %d", err);
- return AAudioConvert_androidToAAudioResult(err);
- }
- return result;
- }
-
- // TODO should we wait for a reply?
- virtual aaudio_result_t startStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(START_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t pauseStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(PAUSE_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t stopStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(STOP_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t flushStream(aaudio_handle_t streamHandle) override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- status_t err = remote()->transact(FLUSH_STREAM, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t registerAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t periodNanoseconds)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- data.writeInt64(periodNanoseconds);
- status_t err = remote()->transact(REGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
- virtual aaudio_result_t unregisterAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId)
- override {
- Parcel data, reply;
- // send command
- data.writeInterfaceToken(IAAudioService::getInterfaceDescriptor());
- data.writeInt32(streamHandle);
- data.writeInt32((int32_t) clientThreadId);
- status_t err = remote()->transact(UNREGISTER_AUDIO_THREAD, data, &reply);
- if (err != NO_ERROR) {
- return AAudioConvert_androidToAAudioResult(err);
- }
- // parse reply
- aaudio_result_t res;
- reply.readInt32(&res);
- return res;
- }
-
-};
-
-// Implement an interface to the service.
-// This is here so that you don't have to link with libaaudio static library.
-IMPLEMENT_META_INTERFACE(AAudioService, "IAAudioService");
-
-// The order of parameters in the Parcels must match with code in BpAAudioService
-
-status_t BnAAudioService::onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags) {
- aaudio_handle_t streamHandle = 0;
- aaudio::AAudioStreamRequest request;
- aaudio::AAudioStreamConfiguration configuration;
- pid_t tid = 0;
- int64_t nanoseconds = 0;
- aaudio_result_t result = AAUDIO_OK;
- status_t status = NO_ERROR;
- ALOGV("BnAAudioService::onTransact(%i) %i", code, flags);
-
- switch(code) {
- case REGISTER_CLIENT: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- sp<IAAudioClient> client = interface_cast<IAAudioClient>(
- data.readStrongBinder());
- // readStrongBinder() can return null
- if (client.get() == nullptr) {
- ALOGE("BnAAudioService::%s(REGISTER_CLIENT) client is NULL!", __func__);
- android_errorWriteLog(0x534e4554, "116230453");
- return DEAD_OBJECT;
- } else {
- registerClient(client);
- return NO_ERROR;
- }
- } break;
-
- case OPEN_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- request.readFromParcel(&data);
- result = request.validate();
- if (result != AAUDIO_OK) {
- streamHandle = result;
- } else {
- //ALOGD("BnAAudioService::client openStream request dump --------------------");
- //request.dump();
- // Override the uid and pid from the client in case they are incorrect.
- request.setUserId(IPCThreadState::self()->getCallingUid());
- request.setProcessId(IPCThreadState::self()->getCallingPid());
- streamHandle = openStream(request, configuration);
- //ALOGD("BnAAudioService::onTransact OPEN_STREAM server handle = 0x%08X",
- // streamHandle);
- }
- reply->writeInt32(streamHandle);
- configuration.writeToParcel(reply);
- return NO_ERROR;
- } break;
-
- case CLOSE_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(CLOSE_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = closeStream(streamHandle);
- //ALOGD("BnAAudioService::onTransact CLOSE_STREAM 0x%08X, result = %d",
- // streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case GET_STREAM_DESCRIPTION: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(GET_STREAM_DESCRIPTION) streamHandle failed!", __func__);
- return status;
- }
- aaudio::AudioEndpointParcelable parcelable;
- result = getStreamDescription(streamHandle, parcelable);
- if (result != AAUDIO_OK) {
- return AAudioConvert_aaudioToAndroidStatus(result);
- }
- status = reply->writeInt32(result);
- if (status != NO_ERROR) {
- return status;
- }
- return parcelable.writeToParcel(reply);
- } break;
-
- case START_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(START_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = startStream(streamHandle);
- ALOGV("BnAAudioService::onTransact START_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case PAUSE_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(PAUSE_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = pauseStream(streamHandle);
- ALOGV("BnAAudioService::onTransact PAUSE_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case STOP_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(STOP_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = stopStream(streamHandle);
- ALOGV("BnAAudioService::onTransact STOP_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case FLUSH_STREAM: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(FLUSH_STREAM) streamHandle failed!", __func__);
- return status;
- }
- result = flushStream(streamHandle);
- ALOGV("BnAAudioService::onTransact FLUSH_STREAM 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case REGISTER_AUDIO_THREAD: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) streamHandle failed!", __func__);
- return status;
- }
- status = data.readInt32(&tid);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) tid failed!", __func__);
- return status;
- }
- status = data.readInt64(&nanoseconds);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(REGISTER_AUDIO_THREAD) nanoseconds failed!", __func__);
- return status;
- }
- result = registerAudioThread(streamHandle, tid, nanoseconds);
- ALOGV("BnAAudioService::%s(REGISTER_AUDIO_THREAD) 0x%08X, result = %d",
- __func__, streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- case UNREGISTER_AUDIO_THREAD: {
- CHECK_INTERFACE(IAAudioService, data, reply);
- status = data.readInt32(&streamHandle);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(UNREGISTER_AUDIO_THREAD) streamHandle failed!", __func__);
- return status;
- }
- status = data.readInt32(&tid);
- if (status != NO_ERROR) {
- ALOGE("BnAAudioService::%s(UNREGISTER_AUDIO_THREAD) tid failed!", __func__);
- return status;
- }
- result = unregisterAudioThread(streamHandle, tid);
- ALOGV("BnAAudioService::onTransact UNREGISTER_AUDIO_THREAD 0x%08X, result = %d",
- streamHandle, result);
- reply->writeInt32(result);
- return NO_ERROR;
- } break;
-
- default:
- // ALOGW("BnAAudioService::onTransact not handled %u", code);
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-} /* namespace android */
diff --git a/media/libaaudio/src/binding/IAAudioService.h b/media/libaaudio/src/binding/IAAudioService.h
deleted file mode 100644
index 6bdb826..0000000
--- a/media/libaaudio/src/binding/IAAudioService.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_AAUDIO_IAAUDIO_SERVICE_H
-#define ANDROID_AAUDIO_IAAUDIO_SERVICE_H
-
-#include <stdint.h>
-#include <utils/RefBase.h>
-#include <binder/TextOutput.h>
-#include <binder/IInterface.h>
-
-#include <aaudio/AAudio.h>
-
-#include "binding/AAudioCommon.h"
-#include "binding/AAudioServiceDefinitions.h"
-#include "binding/AAudioStreamConfiguration.h"
-#include "binding/AAudioStreamRequest.h"
-#include "binding/AudioEndpointParcelable.h"
-#include "binding/IAAudioClient.h"
-
-namespace android {
-
-#define AAUDIO_SERVICE_NAME "media.aaudio"
-
-// Interface (our AIDL) - service methods called by client
-class IAAudioService : public IInterface {
-public:
-
- DECLARE_META_INTERFACE(AAudioService);
-
- // Register an object to receive audio input/output change and track notifications.
- // For a given calling pid, AAudio service disregards any registrations after the first.
- // Thus the IAAudioClient must be a singleton per process.
- virtual void registerClient(const sp<IAAudioClient>& client) = 0;
-
- /**
- * @param request info needed to create the stream
- * @param configuration contains information about the created stream
- * @return handle to the stream or a negative error
- */
- virtual aaudio::aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) = 0;
-
- virtual aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /* Get an immutable description of the in-memory queues
- * used to communicate with the underlying HAL or Service.
- */
- virtual aaudio_result_t getStreamDescription(aaudio::aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) = 0;
-
- /**
- * Start the flow of data.
- * This is asynchronous. When complete, the service will send a STARTED event.
- */
- virtual aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Stop the flow of data such that start() can resume without loss of data.
- * This is asynchronous. When complete, the service will send a PAUSED event.
- */
- virtual aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Stop the flow of data such that the data currently in the buffer is played.
- * This is asynchronous. When complete, the service will send a STOPPED event.
- */
- virtual aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Discard any data held by the underlying HAL or Service.
- * This is asynchronous. When complete, the service will send a FLUSHED event.
- */
- virtual aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) = 0;
-
- /**
- * Manage the specified thread as a low latency audio thread.
- */
- virtual aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t periodNanoseconds) = 0;
-
- virtual aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t clientThreadId) = 0;
-};
-
-class BnAAudioService : public BnInterface<IAAudioService> {
-public:
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags = 0);
-
-};
-
-} /* namespace android */
-
-#endif //ANDROID_AAUDIO_IAAUDIO_SERVICE_H
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.cpp b/media/libaaudio/src/binding/RingBufferParcelable.cpp
index 4996b3f..a4b3cec 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.cpp
+++ b/media/libaaudio/src/binding/RingBufferParcelable.cpp
@@ -29,8 +29,29 @@
using namespace aaudio;
-RingBufferParcelable::RingBufferParcelable() {}
-RingBufferParcelable::~RingBufferParcelable() {}
+RingBufferParcelable::RingBufferParcelable(const RingBuffer& parcelable)
+ : mReadCounterParcelable(std::move(parcelable.readCounterParcelable)),
+ mWriteCounterParcelable(std::move(parcelable.writeCounterParcelable)),
+ mDataParcelable(std::move(parcelable.dataParcelable)),
+ mBytesPerFrame(parcelable.bytesPerFrame),
+ mFramesPerBurst(parcelable.framesPerBurst),
+ mCapacityInFrames(parcelable.capacityInFrames),
+ mFlags(static_cast<RingbufferFlags>(parcelable.flags)) {
+ static_assert(sizeof(mFlags) == sizeof(parcelable.flags));
+}
+
+RingBuffer RingBufferParcelable::parcelable() const {
+ RingBuffer result;
+ result.readCounterParcelable = std::move(mReadCounterParcelable).parcelable();
+ result.writeCounterParcelable = std::move(mWriteCounterParcelable).parcelable();
+ result.dataParcelable = std::move(mDataParcelable).parcelable();
+ result.bytesPerFrame = mBytesPerFrame;
+ result.framesPerBurst = mFramesPerBurst;
+ result.capacityInFrames = mCapacityInFrames;
+ static_assert(sizeof(mFlags) == sizeof(result.flags));
+ result.flags = static_cast<int32_t>(mFlags);
+ return result;
+}
// TODO This assumes that all three use the same SharedMemoryParcelable
void RingBufferParcelable::setupMemory(int32_t sharedMemoryIndex,
@@ -76,58 +97,6 @@
mCapacityInFrames = capacityInFrames;
}
-/**
- * The read and write must be symmetric.
- */
-status_t RingBufferParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mCapacityInFrames);
- if (status != NO_ERROR) goto error;
- if (mCapacityInFrames > 0) {
- status = parcel->writeInt32(mBytesPerFrame);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mFramesPerBurst);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mFlags);
- if (status != NO_ERROR) goto error;
- status = mReadCounterParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mWriteCounterParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDataParcelable.writeToParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- return NO_ERROR;
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t RingBufferParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mCapacityInFrames);
- if (status != NO_ERROR) goto error;
- if (mCapacityInFrames > 0) {
- status = parcel->readInt32(&mBytesPerFrame);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32(&mFramesPerBurst);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32((int32_t *)&mFlags);
- if (status != NO_ERROR) goto error;
- status = mReadCounterParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mWriteCounterParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- status = mDataParcelable.readFromParcel(parcel);
- if (status != NO_ERROR) goto error;
- }
- return AAudioConvert_aaudioToAndroidStatus(validate());
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t RingBufferParcelable::resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor) {
aaudio_result_t result;
diff --git a/media/libaaudio/src/binding/RingBufferParcelable.h b/media/libaaudio/src/binding/RingBufferParcelable.h
index 1dbcf07..2508cea 100644
--- a/media/libaaudio/src/binding/RingBufferParcelable.h
+++ b/media/libaaudio/src/binding/RingBufferParcelable.h
@@ -19,6 +19,7 @@
#include <stdint.h>
+#include <aaudio/RingBuffer.h>
#include <binder/Parcelable.h>
#include "binding/AAudioServiceDefinitions.h"
@@ -26,10 +27,12 @@
namespace aaudio {
-class RingBufferParcelable : public Parcelable {
+class RingBufferParcelable {
public:
- RingBufferParcelable();
- virtual ~RingBufferParcelable();
+ RingBufferParcelable() = default;
+
+ // Construct based on a parcelable representation.
+ explicit RingBufferParcelable(const RingBuffer& parcelable);
// TODO This assumes that all three use the same SharedMemoryParcelable
void setupMemory(int32_t sharedMemoryIndex,
@@ -57,21 +60,14 @@
bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
- /**
- * The read and write must be symmetric.
- */
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, RingBufferDescriptor *descriptor);
void dump();
+ // Extract a parcelable representation of this object.
+ RingBuffer parcelable() const;
+
private:
-
- aaudio_result_t validate() const;
-
SharedRegionParcelable mReadCounterParcelable;
SharedRegionParcelable mWriteCounterParcelable;
SharedRegionParcelable mDataParcelable;
@@ -79,6 +75,8 @@
int32_t mFramesPerBurst = 0; // for ISOCHRONOUS queues
int32_t mCapacityInFrames = 0; // zero if unused
RingbufferFlags mFlags = RingbufferFlags::NONE;
+
+ aaudio_result_t validate() const;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
index b6e8472..685b779 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
@@ -33,61 +34,36 @@
using android::base::unique_fd;
using android::NO_ERROR;
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
+using android::media::SharedFileRegion;
using namespace aaudio;
-SharedMemoryParcelable::SharedMemoryParcelable() {}
-SharedMemoryParcelable::~SharedMemoryParcelable() {};
+SharedMemoryParcelable::SharedMemoryParcelable(SharedFileRegion&& parcelable) {
+ mFd = parcelable.fd.release();
+ mSizeInBytes = parcelable.size;
+ mOffsetInBytes = parcelable.offset;
+}
+
+SharedFileRegion SharedMemoryParcelable::parcelable() && {
+ SharedFileRegion result;
+ result.fd.reset(std::move(mFd));
+ result.size = mSizeInBytes;
+ result.offset = mOffsetInBytes;
+ return result;
+}
+
+SharedMemoryParcelable SharedMemoryParcelable::dup() const {
+ SharedMemoryParcelable result;
+ result.setup(mFd, static_cast<int32_t>(mSizeInBytes));
+ return result;
+}
void SharedMemoryParcelable::setup(const unique_fd& fd, int32_t sizeInBytes) {
- mFd.reset(dup(fd.get())); // store a duplicate fd
+ mFd.reset(::dup(fd.get())); // store a duplicate fd
ALOGV("setup(fd = %d -> %d, size = %d) this = %p\n", fd.get(), mFd.get(), sizeInBytes, this);
mSizeInBytes = sizeInBytes;
}
-status_t SharedMemoryParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) return status;
-
- status = parcel->writeInt32(mSizeInBytes);
- if (status != NO_ERROR) return status;
- if (mSizeInBytes > 0) {
- ALOGV("writeToParcel() mFd = %d, this = %p\n", mFd.get(), this);
- status = parcel->writeUniqueFileDescriptor(mFd);
- ALOGE_IF(status != NO_ERROR, "SharedMemoryParcelable writeDupFileDescriptor failed : %d",
- status);
- }
- return status;
-}
-
-status_t SharedMemoryParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mSizeInBytes);
- if (status != NO_ERROR) goto error;
-
- if (mSizeInBytes > 0) {
- // The Parcel owns the file descriptor and will close it later.
- unique_fd mmapFd;
- status = parcel->readUniqueFileDescriptor(&mmapFd);
- if (status != NO_ERROR) {
- ALOGE("readFromParcel() readUniqueFileDescriptor() failed : %d", status);
- goto error;
- }
-
- // Resolve the memory now while we still have the FD from the Parcel.
- // Closing the FD will not affect the shared memory once mmap() has been called.
- aaudio_result_t result = resolveSharedMemory(mmapFd);
- status = AAudioConvert_aaudioToAndroidStatus(result);
- if (status != NO_ERROR) goto error;
- }
-
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- return status;
-}
-
aaudio_result_t SharedMemoryParcelable::close() {
if (mResolvedAddress != MMAP_UNRESOLVED_ADDRESS) {
int err = munmap(mResolvedAddress, mSizeInBytes);
@@ -104,7 +80,7 @@
mResolvedAddress = (uint8_t *) mmap(0, mSizeInBytes, PROT_READ | PROT_WRITE,
MAP_SHARED, fd.get(), 0);
if (mResolvedAddress == MMAP_UNRESOLVED_ADDRESS) {
- ALOGE("mmap() failed for fd = %d, nBytes = %d, errno = %s",
+ ALOGE("mmap() failed for fd = %d, nBytes = %" PRId64 ", errno = %s",
fd.get(), mSizeInBytes, strerror(errno));
return AAUDIO_ERROR_INTERNAL;
}
@@ -118,7 +94,7 @@
return AAUDIO_ERROR_OUT_OF_RANGE;
} else if ((offsetInBytes + sizeInBytes) > mSizeInBytes) {
ALOGE("out of range, offsetInBytes = %d, "
- "sizeInBytes = %d, mSizeInBytes = %d",
+ "sizeInBytes = %d, mSizeInBytes = %" PRId64,
offsetInBytes, sizeInBytes, mSizeInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
@@ -148,7 +124,11 @@
aaudio_result_t SharedMemoryParcelable::validate() const {
if (mSizeInBytes < 0 || mSizeInBytes >= MAX_MMAP_SIZE_BYTES) {
- ALOGE("invalid mSizeInBytes = %d", mSizeInBytes);
+ ALOGE("invalid mSizeInBytes = %" PRId64, mSizeInBytes);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+ }
+ if (mOffsetInBytes != 0) {
+ ALOGE("invalid mOffsetInBytes = %" PRId64, mOffsetInBytes);
return AAUDIO_ERROR_OUT_OF_RANGE;
}
return AAUDIO_OK;
@@ -156,5 +136,5 @@
void SharedMemoryParcelable::dump() {
ALOGD("mFd = %d", mFd.get());
- ALOGD("mSizeInBytes = %d", mSizeInBytes);
+ ALOGD("mSizeInBytes = %" PRId64, mSizeInBytes);
}
diff --git a/media/libaaudio/src/binding/SharedMemoryParcelable.h b/media/libaaudio/src/binding/SharedMemoryParcelable.h
index 3927f58..1f2c335 100644
--- a/media/libaaudio/src/binding/SharedMemoryParcelable.h
+++ b/media/libaaudio/src/binding/SharedMemoryParcelable.h
@@ -21,8 +21,7 @@
#include <sys/mman.h>
#include <android-base/unique_fd.h>
-#include <binder/Parcel.h>
-#include <binder/Parcelable.h>
+#include <android/media/SharedFileRegion.h>
namespace aaudio {
@@ -36,10 +35,14 @@
* It may be divided into several regions.
* The memory can be shared using Binder or simply shared between threads.
*/
-class SharedMemoryParcelable : public android::Parcelable {
+class SharedMemoryParcelable {
public:
- SharedMemoryParcelable();
- virtual ~SharedMemoryParcelable();
+ SharedMemoryParcelable() = default;
+
+ // Ctor from a parcelable representation.
+ // Since the parcelable object owns a unique FD, move semantics are provided to avoid the need
+ // to dupe.
+ explicit SharedMemoryParcelable(android::media::SharedFileRegion&& parcelable);
/**
* Make a dup() of the fd and store it for later use.
@@ -49,10 +52,6 @@
*/
void setup(const android::base::unique_fd& fd, int32_t sizeInBytes);
- virtual android::status_t writeToParcel(android::Parcel* parcel) const override;
-
- virtual android::status_t readFromParcel(const android::Parcel* parcel) override;
-
// mmap() shared memory
aaudio_result_t resolve(int32_t offsetInBytes, int32_t sizeInBytes, void **regionAddressPtr);
@@ -63,20 +62,23 @@
void dump();
-protected:
+ // Extract a parcelable representation of this object.
+ // Since we own a unique FD, move semantics are provided to avoid the need to dupe.
+ android::media::SharedFileRegion parcelable() &&;
-#define MMAP_UNRESOLVED_ADDRESS reinterpret_cast<uint8_t*>(MAP_FAILED)
-
- aaudio_result_t resolveSharedMemory(const android::base::unique_fd& fd);
-
- android::base::unique_fd mFd;
- int32_t mSizeInBytes = 0;
- uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+ // Copy this instance. Duplicates the underlying FD.
+ SharedMemoryParcelable dup() const;
private:
+#define MMAP_UNRESOLVED_ADDRESS reinterpret_cast<uint8_t*>(MAP_FAILED)
+ android::base::unique_fd mFd;
+ int64_t mSizeInBytes = 0;
+ int64_t mOffsetInBytes = 0;
+ uint8_t *mResolvedAddress = MMAP_UNRESOLVED_ADDRESS;
+
+ aaudio_result_t resolveSharedMemory(const android::base::unique_fd& fd);
aaudio_result_t validate() const;
-
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.cpp b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
index c776116..56b99c0 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.cpp
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.cpp
@@ -36,8 +36,18 @@
using namespace aaudio;
-SharedRegionParcelable::SharedRegionParcelable() {}
-SharedRegionParcelable::~SharedRegionParcelable() {}
+SharedRegionParcelable::SharedRegionParcelable(const SharedRegion& parcelable)
+ : mSharedMemoryIndex(parcelable.sharedMemoryIndex),
+ mOffsetInBytes(parcelable.offsetInBytes),
+ mSizeInBytes(parcelable.sizeInBytes) {}
+
+SharedRegion SharedRegionParcelable::parcelable() const {
+ SharedRegion result;
+ result.sharedMemoryIndex = mSharedMemoryIndex;
+ result.offsetInBytes = mOffsetInBytes;
+ result.sizeInBytes = mSizeInBytes;
+ return result;
+}
void SharedRegionParcelable::setup(int32_t sharedMemoryIndex,
int32_t offsetInBytes,
@@ -47,41 +57,6 @@
mSizeInBytes = sizeInBytes;
}
-status_t SharedRegionParcelable::writeToParcel(Parcel* parcel) const {
- status_t status = AAudioConvert_aaudioToAndroidStatus(validate());
- if (status != NO_ERROR) goto error;
-
- status = parcel->writeInt32(mSizeInBytes);
- if (status != NO_ERROR) goto error;
- if (mSizeInBytes > 0) {
- status = parcel->writeInt32(mSharedMemoryIndex);
- if (status != NO_ERROR) goto error;
- status = parcel->writeInt32(mOffsetInBytes);
- if (status != NO_ERROR) goto error;
- }
- return NO_ERROR;
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
-status_t SharedRegionParcelable::readFromParcel(const Parcel* parcel) {
- status_t status = parcel->readInt32(&mSizeInBytes);
- if (status != NO_ERROR) goto error;
- if (mSizeInBytes > 0) {
- status = parcel->readInt32(&mSharedMemoryIndex);
- if (status != NO_ERROR) goto error;
- status = parcel->readInt32(&mOffsetInBytes);
- if (status != NO_ERROR) goto error;
- }
- return AAudioConvert_aaudioToAndroidStatus(validate());
-
-error:
- ALOGE("%s returning %d", __func__, status);
- return status;
-}
-
aaudio_result_t SharedRegionParcelable::resolve(SharedMemoryParcelable *memoryParcels,
void **regionAddressPtr) {
if (mSizeInBytes == 0) {
diff --git a/media/libaaudio/src/binding/SharedRegionParcelable.h b/media/libaaudio/src/binding/SharedRegionParcelable.h
index 0cd8c04..c15fc30 100644
--- a/media/libaaudio/src/binding/SharedRegionParcelable.h
+++ b/media/libaaudio/src/binding/SharedRegionParcelable.h
@@ -20,41 +20,39 @@
#include <stdint.h>
#include <sys/mman.h>
-#include <binder/Parcelable.h>
#include <aaudio/AAudio.h>
+#include <aaudio/SharedRegion.h>
#include "binding/SharedMemoryParcelable.h"
using android::status_t;
-using android::Parcel;
-using android::Parcelable;
namespace aaudio {
-class SharedRegionParcelable : public Parcelable {
+class SharedRegionParcelable {
public:
- SharedRegionParcelable();
- virtual ~SharedRegionParcelable();
+ SharedRegionParcelable() = default;
+
+ // Construct based on a parcelable representation.
+ explicit SharedRegionParcelable(const SharedRegion& parcelable);
void setup(int32_t sharedMemoryIndex, int32_t offsetInBytes, int32_t sizeInBytes);
- virtual status_t writeToParcel(Parcel* parcel) const override;
-
- virtual status_t readFromParcel(const Parcel* parcel) override;
-
aaudio_result_t resolve(SharedMemoryParcelable *memoryParcels, void **regionAddressPtr);
bool isFileDescriptorSafe(SharedMemoryParcelable *memoryParcels);
void dump();
-protected:
+ // Extract a parcelable representation of this object.
+ SharedRegion parcelable() const;
+
+private:
int32_t mSharedMemoryIndex = -1;
int32_t mOffsetInBytes = 0;
int32_t mSizeInBytes = 0;
-private:
aaudio_result_t validate() const;
};
diff --git a/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl b/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl
new file mode 100644
index 0000000..3600b6a
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/Endpoint.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.RingBuffer;
+import android.media.SharedFileRegion;
+
+parcelable Endpoint {
+ // Set capacityInFrames to zero if Queue is unused.
+ RingBuffer upMessageQueueParcelable; // server to client
+ RingBuffer downMessageQueueParcelable; // to server
+ RingBuffer upDataQueueParcelable; // eg. record, could share same queue
+ RingBuffer downDataQueueParcelable; // eg. playback
+ SharedFileRegion[] sharedMemories;
+}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl
similarity index 61%
copy from media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
copy to media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl
index 1a5d81a..a010dbc 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioClient.aidl
@@ -14,24 +14,8 @@
* limitations under the License.
*/
-package android.media;
+package aaudio;
-/**
- * Priority of a transcoding job.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum TranscodingJobPriority {
- // TODO(hkuang): define what each priority level actually mean.
- kUnspecified = 0,
- kLow = 1,
- /**
- * 2 ~ 20 is reserved for future use.
- */
- kNormal = 21,
- /**
- * 22 ~ 30 is reserved for future use.
- */
- kHigh = 31,
-}
\ No newline at end of file
+interface IAAudioClient {
+ oneway void onStreamChange(int handle, int opcode, int value);
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
new file mode 100644
index 0000000..44d2211
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/IAAudioService.aidl
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.Endpoint;
+import aaudio.IAAudioClient;
+import aaudio.StreamParameters;
+import aaudio.StreamRequest;
+
+interface IAAudioService {
+ /**
+ * Register an object to receive audio input/output change and track notifications.
+ * For a given calling pid, AAudio service disregards any registrations after the first.
+ * Thus the IAAudioClient must be a singleton per process.
+ */
+ void registerClient(IAAudioClient client);
+
+ /**
+ * @param request info needed to create the stream
+ * @param paramsOut contains information about the created stream
+ * @return handle to the stream or a negative error
+ */
+ int openStream(in StreamRequest request,
+ out StreamParameters paramsOut);
+
+ int closeStream(int streamHandle);
+
+ /*
+ * Get an immutable description of the in-memory queues
+ * used to communicate with the underlying HAL or Service.
+ */
+ int getStreamDescription(int streamHandle, out Endpoint endpoint);
+
+ /**
+ * Start the flow of data.
+ * This is asynchronous. When complete, the service will send a STARTED event.
+ */
+ int startStream(int streamHandle);
+
+ /**
+ * Stop the flow of data such that start() can resume without loss of data.
+ * This is asynchronous. When complete, the service will send a PAUSED event.
+ */
+ int pauseStream(int streamHandle);
+
+ /**
+ * Stop the flow of data such that the data currently in the buffer is played.
+ * This is asynchronous. When complete, the service will send a STOPPED event.
+ */
+ int stopStream(int streamHandle);
+
+ /**
+ * Discard any data held by the underlying HAL or Service.
+ * This is asynchronous. When complete, the service will send a FLUSHED event.
+ */
+ int flushStream(int streamHandle);
+
+ /**
+ * Manage the specified thread as a low latency audio thread.
+ */
+ int registerAudioThread(int streamHandle,
+ int clientThreadId,
+ long periodNanoseconds);
+
+ int unregisterAudioThread(int streamHandle,
+ int clientThreadId);
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
new file mode 100644
index 0000000..a58b33a
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/RingBuffer.aidl
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import aaudio.SharedRegion;
+
+parcelable RingBuffer {
+ SharedRegion readCounterParcelable;
+ SharedRegion writeCounterParcelable;
+ SharedRegion dataParcelable;
+ int bytesPerFrame; // index is in frames
+ int framesPerBurst; // for ISOCHRONOUS queues
+ int capacityInFrames; // zero if unused
+ int /* RingbufferFlags */ flags; // = RingbufferFlags::NONE;
+}
\ No newline at end of file
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl b/media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl
similarity index 61%
copy from media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
copy to media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl
index 1a5d81a..26153e8 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/SharedRegion.aidl
@@ -14,24 +14,10 @@
* limitations under the License.
*/
-package android.media;
+package aaudio;
-/**
- * Priority of a transcoding job.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum TranscodingJobPriority {
- // TODO(hkuang): define what each priority level actually mean.
- kUnspecified = 0,
- kLow = 1,
- /**
- * 2 ~ 20 is reserved for future use.
- */
- kNormal = 21,
- /**
- * 22 ~ 30 is reserved for future use.
- */
- kHigh = 31,
-}
\ No newline at end of file
+parcelable SharedRegion {
+ int sharedMemoryIndex;
+ int offsetInBytes;
+ int sizeInBytes;
+}
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
new file mode 100644
index 0000000..b7c4f70
--- /dev/null
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package aaudio;
+
+import android.media.audio.common.AudioFormat;
+
+parcelable StreamParameters {
+ int samplesPerFrame; // = AAUDIO_UNSPECIFIED;
+ int sampleRate; // = AAUDIO_UNSPECIFIED;
+ int deviceId; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_sharing_mode_t */ sharingMode; // = AAUDIO_SHARING_MODE_SHARED;
+ AudioFormat audioFormat; // = AUDIO_FORMAT_DEFAULT;
+ int /* aaudio_direction_t */ direction; // = AAUDIO_DIRECTION_OUTPUT;
+ int /* aaudio_usage_t */ usage; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_content_type_t */ contentType; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_input_preset_t */ inputPreset; // = AAUDIO_UNSPECIFIED;
+ int bufferCapacity; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_session_id_t */ sessionId; // = AAUDIO_SESSION_ID_NONE;
+ boolean isPrivacySensitive; // = false;
+}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
similarity index 61%
copy from media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
copy to media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
index 1a5d81a..9bf4077 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamRequest.aidl
@@ -14,24 +14,14 @@
* limitations under the License.
*/
-package android.media;
+package aaudio;
-/**
- * Priority of a transcoding job.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum TranscodingJobPriority {
- // TODO(hkuang): define what each priority level actually mean.
- kUnspecified = 0,
- kLow = 1,
- /**
- * 2 ~ 20 is reserved for future use.
- */
- kNormal = 21,
- /**
- * 22 ~ 30 is reserved for future use.
- */
- kHigh = 31,
+import aaudio.StreamParameters;
+
+parcelable StreamRequest {
+ StreamParameters params;
+ int userId; // = (uid_t) -1;
+ int processId; // = (pid_t) -1;
+ boolean sharingModeMatchRequired; // = false;
+ boolean inService; // = false; // Stream opened by AAudioservice
}
\ No newline at end of file
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 06f66d3..0a19d17 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -137,7 +137,7 @@
return AAUDIO_ERROR_INTERNAL;
}
- mUpCommandQueue = std::make_unique<FifoBuffer>(
+ mUpCommandQueue = std::make_unique<FifoBufferIndirect>(
descriptor->bytesPerFrame,
descriptor->capacityInFrames,
descriptor->readCounterAddress,
@@ -166,7 +166,7 @@
? &mDataWriteCounter
: descriptor->writeCounterAddress;
- mDataQueue = std::make_unique<FifoBuffer>(
+ mDataQueue = std::make_unique<FifoBufferIndirect>(
descriptor->bytesPerFrame,
descriptor->capacityInFrames,
readCounterAddress,
diff --git a/media/libaaudio/src/client/AudioEndpoint.h b/media/libaaudio/src/client/AudioEndpoint.h
index 484d917..4c8d60f 100644
--- a/media/libaaudio/src/client/AudioEndpoint.h
+++ b/media/libaaudio/src/client/AudioEndpoint.h
@@ -93,8 +93,8 @@
void dump() const;
private:
- std::unique_ptr<android::FifoBuffer> mUpCommandQueue;
- std::unique_ptr<android::FifoBuffer> mDataQueue;
+ std::unique_ptr<android::FifoBufferIndirect> mUpCommandQueue;
+ std::unique_ptr<android::FifoBufferIndirect> mDataQueue;
bool mFreeRunning;
android::fifo_counter_t mDataReadCounter; // only used if free-running
android::fifo_counter_t mDataWriteCounter; // only used if free-running
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 4520823..94f10e5 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -34,7 +34,6 @@
#include "AudioEndpointParcelable.h"
#include "binding/AAudioStreamRequest.h"
#include "binding/AAudioStreamConfiguration.h"
-#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioGlobal.h"
#include "core/AudioStreamBuilder.h"
@@ -211,10 +210,10 @@
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
}
- mFramesPerBurst = framesPerBurst; // only save good value
+ setFramesPerBurst(framesPerBurst); // only save good value
mBufferCapacityInFrames = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
- if (mBufferCapacityInFrames < mFramesPerBurst
+ if (mBufferCapacityInFrames < getFramesPerBurst()
|| mBufferCapacityInFrames > MAX_BUFFER_CAPACITY_IN_FRAMES) {
ALOGE("%s - bufferCapacity out of range = %d", __func__, mBufferCapacityInFrames);
result = AAUDIO_ERROR_OUT_OF_RANGE;
@@ -239,7 +238,7 @@
}
if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
- mCallbackFrames = mFramesPerBurst;
+ mCallbackFrames = getFramesPerBurst();
}
const int32_t callbackBufferSize = mCallbackFrames * getBytesPerFrame();
@@ -353,6 +352,8 @@
// Clear any stale timestamps from the previous run.
drainTimestampsFromService();
+ prepareBuffersForStart(); // tell subclasses to get ready
+
aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);
if (result == AAUDIO_ERROR_INVALID_HANDLE) {
ALOGD("%s() INVALID_HANDLE, stream was probably stolen", __func__);
@@ -755,9 +756,9 @@
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
int32_t adjustedFrames = requestedFrames;
- const int32_t maximumSize = getBufferCapacity() - mFramesPerBurst;
+ const int32_t maximumSize = getBufferCapacity() - getFramesPerBurst();
// Minimum size should be a multiple number of bursts.
- const int32_t minimumSize = 1 * mFramesPerBurst;
+ const int32_t minimumSize = 1 * getFramesPerBurst();
// Clip to minimum size so that rounding up will work better.
adjustedFrames = std::max(minimumSize, adjustedFrames);
@@ -767,9 +768,9 @@
adjustedFrames = maximumSize;
} else {
// Round to the next highest burst size.
- int32_t numBursts = (adjustedFrames + mFramesPerBurst - 1) / mFramesPerBurst;
- adjustedFrames = numBursts * mFramesPerBurst;
- // Clip just in case maximumSize is not a multiple of mFramesPerBurst.
+ int32_t numBursts = (adjustedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
+ adjustedFrames = numBursts * getFramesPerBurst();
+ // Clip just in case maximumSize is not a multiple of getFramesPerBurst().
adjustedFrames = std::min(maximumSize, adjustedFrames);
}
@@ -804,10 +805,6 @@
return mBufferCapacityInFrames;
}
-int32_t AudioStreamInternal::getFramesPerBurst() const {
- return mFramesPerBurst;
-}
-
// This must be called under mStreamLock.
aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 61591b3..d7024cf 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -20,7 +20,6 @@
#include <stdint.h>
#include <aaudio/AAudio.h>
-#include "binding/IAAudioService.h"
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceInterface.h"
#include "client/IsochronousClockModel.h"
@@ -29,7 +28,6 @@
#include "utility/AudioClock.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
@@ -66,8 +64,6 @@
int32_t getBufferCapacity() const override;
- int32_t getFramesPerBurst() const override;
-
int32_t getXRunCount() const override {
return mXRunCount;
}
@@ -123,7 +119,9 @@
aaudio_result_t stopCallback();
- virtual void advanceClientToMatchServerPosition() = 0;
+ virtual void prepareBuffersForStart() {}
+
+ virtual void advanceClientToMatchServerPosition(int32_t serverMargin = 0) = 0;
virtual void onFlushFromServer() {}
@@ -159,7 +157,6 @@
aaudio_handle_t mServiceStreamHandle; // opaque handle returned from service
- int32_t mFramesPerBurst = MIN_FRAMES_PER_BURST; // frames per HAL transfer
int32_t mXRunCount = 0; // how many underrun events?
// Offset from underlying frame position.
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index fce322b..5d311fc 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -47,9 +47,9 @@
AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
-void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
+void AudioStreamInternalCapture::advanceClientToMatchServerPosition(int32_t serverMargin) {
int64_t readCounter = mAudioEndpoint->getDataReadCounter();
- int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
+ int64_t writeCounter = mAudioEndpoint->getDataWriteCounter() + serverMargin;
// Bump offset so caller does not see the retrograde motion in getFramesRead().
int64_t offset = readCounter - writeCounter;
@@ -149,7 +149,7 @@
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
- int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + mFramesPerBurst;
+ int64_t nextPosition = mAudioEndpoint->getDataReadCounter() + getFramesPerBurst();
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.h b/media/libaaudio/src/client/AudioStreamInternalCapture.h
index 6436a53..251a7f2 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.h
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.h
@@ -23,7 +23,6 @@
#include "client/AudioStreamInternal.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
@@ -46,7 +45,7 @@
}
protected:
- void advanceClientToMatchServerPosition() override;
+ void advanceClientToMatchServerPosition(int32_t serverOffset = 0) override;
/**
* Low level data processing that will not block. It will just read or write as much as it can.
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index d6b73b4..980592c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -32,6 +32,7 @@
#define LOG_TAG (mInService ? "AudioStreamInternalPlay_Service" \
: "AudioStreamInternalPlay_Client")
+using android::status_t;
using android::WrappingBuffer;
using namespace aaudio;
@@ -92,8 +93,13 @@
return mServiceInterface.flushStream(mServiceStreamHandle);
}
-void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
- int64_t readCounter = mAudioEndpoint->getDataReadCounter();
+void AudioStreamInternalPlay::prepareBuffersForStart() {
+ // Prevent stale data from being played.
+ mAudioEndpoint->eraseDataMemory();
+}
+
+void AudioStreamInternalPlay::advanceClientToMatchServerPosition(int32_t serverMargin) {
+ int64_t readCounter = mAudioEndpoint->getDataReadCounter() + serverMargin;
int64_t writeCounter = mAudioEndpoint->getDataWriteCounter();
// Bump offset so caller does not see the retrograde motion in getFramesRead().
@@ -151,7 +157,9 @@
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
- advanceClientToMatchServerPosition();
+ // We add a one burst margin in case the DSP advances before we can write the data.
+ // This can help prevent the beginning of the stream from being skipped.
+ advanceClientToMatchServerPosition(getFramesPerBurst());
mNeedCatchUp.acknowledge();
}
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.h b/media/libaaudio/src/client/AudioStreamInternalPlay.h
index 2e93157..7b1cddc 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.h
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.h
@@ -25,7 +25,6 @@
#include "client/AudioStreamInternal.h"
using android::sp;
-using android::IAAudioService;
namespace aaudio {
@@ -65,7 +64,9 @@
protected:
- void advanceClientToMatchServerPosition() override;
+ void prepareBuffersForStart() override;
+
+ void advanceClientToMatchServerPosition(int32_t serverMargin = 0) override;
void onFlushFromServer() override;
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 43240ec..310ffbe 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -328,8 +328,11 @@
void AudioStream::setState(aaudio_stream_state_t state) {
ALOGD("%s(s#%d) from %d to %d", __func__, getId(), mState, state);
+ if (state == mState) {
+ return; // no change
+ }
// Track transition to DISCONNECTED state.
- if (state == AAUDIO_STREAM_STATE_DISCONNECTED && mState != state) {
+ if (state == AAUDIO_STREAM_STATE_DISCONNECTED) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT)
.set(AMEDIAMETRICS_PROP_STATE, AudioGlobal_convertStreamStateToText(getState()))
@@ -337,18 +340,18 @@
}
// CLOSED is a final state
if (mState == AAUDIO_STREAM_STATE_CLOSED) {
- ALOGE("%s(%d) tried to set to %d but already CLOSED", __func__, getId(), state);
+ ALOGW("%s(%d) tried to set to %d but already CLOSED", __func__, getId(), state);
// Once CLOSING, we can only move to CLOSED state.
} else if (mState == AAUDIO_STREAM_STATE_CLOSING
&& state != AAUDIO_STREAM_STATE_CLOSED) {
- ALOGE("%s(%d) tried to set to %d but already CLOSING", __func__, getId(), state);
+ ALOGW("%s(%d) tried to set to %d but already CLOSING", __func__, getId(), state);
// Once DISCONNECTED, we can only move to CLOSING or CLOSED state.
} else if (mState == AAUDIO_STREAM_STATE_DISCONNECTED
&& !(state == AAUDIO_STREAM_STATE_CLOSING
|| state == AAUDIO_STREAM_STATE_CLOSED)) {
- ALOGE("%s(%d) tried to set to %d but already DISCONNECTED", __func__, getId(), state);
+ ALOGW("%s(%d) tried to set to %d but already DISCONNECTED", __func__, getId(), state);
} else {
mState = state;
@@ -404,7 +407,9 @@
// It converts the 'C' function call to a C++ method call.
static void* AudioStream_internalThreadProc(void* threadArg) {
AudioStream *audioStream = (AudioStream *) threadArg;
- return audioStream->wrapUserThread();
+ // Use an sp<> to prevent the stream from being deleted while running.
+ android::sp<AudioStream> protectedStream(audioStream);
+ return protectedStream->wrapUserThread();
}
// This is not exposed in the API.
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index e0bd9d8..e438477 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -202,11 +202,11 @@
}
virtual int32_t getBufferCapacity() const {
- return AAUDIO_ERROR_UNIMPLEMENTED;
+ return mBufferCapacity;
}
virtual int32_t getFramesPerBurst() const {
- return AAUDIO_ERROR_UNIMPLEMENTED;
+ return mFramesPerBurst;
}
virtual int32_t getXRunCount() const {
@@ -498,30 +498,32 @@
mSampleRate = sampleRate;
}
- /**
- * This should not be called after the open() call.
- */
+ // This should not be called after the open() call.
void setSamplesPerFrame(int32_t samplesPerFrame) {
mSamplesPerFrame = samplesPerFrame;
}
- /**
- * This should not be called after the open() call.
- */
+ // This should not be called after the open() call.
+ void setFramesPerBurst(int32_t framesPerBurst) {
+ mFramesPerBurst = framesPerBurst;
+ }
+
+ // This should not be called after the open() call.
+ void setBufferCapacity(int32_t bufferCapacity) {
+ mBufferCapacity = bufferCapacity;
+ }
+
+ // This should not be called after the open() call.
void setSharingMode(aaudio_sharing_mode_t sharingMode) {
mSharingMode = sharingMode;
}
- /**
- * This should not be called after the open() call.
- */
+ // This should not be called after the open() call.
void setFormat(audio_format_t format) {
mFormat = format;
}
- /**
- * This should not be called after the open() call.
- */
+ // This should not be called after the open() call.
void setDeviceFormat(audio_format_t format) {
mDeviceFormat = format;
}
@@ -536,6 +538,7 @@
mDeviceId = deviceId;
}
+ // This should not be called after the open() call.
void setSessionId(int32_t sessionId) {
mSessionId = sessionId;
}
@@ -623,6 +626,8 @@
audio_format_t mFormat = AUDIO_FORMAT_DEFAULT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
+ int32_t mFramesPerBurst = 0;
+ int32_t mBufferCapacity = 0;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/fifo/FifoBuffer.cpp b/media/libaaudio/src/fifo/FifoBuffer.cpp
index f5113f2..5c11882 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.cpp
+++ b/media/libaaudio/src/fifo/FifoBuffer.cpp
@@ -31,40 +31,37 @@
#include "FifoBuffer.h"
using android::FifoBuffer;
+using android::FifoBufferAllocated;
+using android::FifoBufferIndirect;
using android::fifo_frames_t;
-FifoBuffer::FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
- : mBytesPerFrame(bytesPerFrame)
+FifoBuffer::FifoBuffer(int32_t bytesPerFrame)
+ : mBytesPerFrame(bytesPerFrame) {}
+
+FifoBufferAllocated::FifoBufferAllocated(int32_t bytesPerFrame, fifo_frames_t capacityInFrames)
+ : FifoBuffer(bytesPerFrame)
{
mFifo = std::make_unique<FifoController>(capacityInFrames, capacityInFrames);
// allocate buffer
int32_t bytesPerBuffer = bytesPerFrame * capacityInFrames;
- mStorage = new uint8_t[bytesPerBuffer];
- mStorageOwned = true;
+ mInternalStorage = std::make_unique<uint8_t[]>(bytesPerBuffer);
ALOGV("%s() capacityInFrames = %d, bytesPerFrame = %d",
__func__, capacityInFrames, bytesPerFrame);
}
-FifoBuffer::FifoBuffer( int32_t bytesPerFrame,
+FifoBufferIndirect::FifoBufferIndirect( int32_t bytesPerFrame,
fifo_frames_t capacityInFrames,
- fifo_counter_t * readIndexAddress,
- fifo_counter_t * writeIndexAddress,
+ fifo_counter_t *readIndexAddress,
+ fifo_counter_t *writeIndexAddress,
void * dataStorageAddress
)
- : mBytesPerFrame(bytesPerFrame)
- , mStorage(static_cast<uint8_t *>(dataStorageAddress))
+ : FifoBuffer(bytesPerFrame)
+ , mExternalStorage(static_cast<uint8_t *>(dataStorageAddress))
{
mFifo = std::make_unique<FifoControllerIndirect>(capacityInFrames,
capacityInFrames,
readIndexAddress,
writeIndexAddress);
- mStorageOwned = false;
-}
-
-FifoBuffer::~FifoBuffer() {
- if (mStorageOwned) {
- delete[] mStorage;
- }
}
int32_t FifoBuffer::convertFramesToBytes(fifo_frames_t frames) {
@@ -76,15 +73,16 @@
int32_t startIndex) {
wrappingBuffer->data[1] = nullptr;
wrappingBuffer->numFrames[1] = 0;
+ uint8_t *storage = getStorage();
if (framesAvailable > 0) {
fifo_frames_t capacity = mFifo->getCapacity();
- uint8_t *source = &mStorage[convertFramesToBytes(startIndex)];
+ uint8_t *source = &storage[convertFramesToBytes(startIndex)];
// Does the available data cross the end of the FIFO?
if ((startIndex + framesAvailable) > capacity) {
wrappingBuffer->data[0] = source;
fifo_frames_t firstFrames = capacity - startIndex;
wrappingBuffer->numFrames[0] = firstFrames;
- wrappingBuffer->data[1] = &mStorage[0];
+ wrappingBuffer->data[1] = &storage[0];
wrappingBuffer->numFrames[1] = framesAvailable - firstFrames;
} else {
wrappingBuffer->data[0] = source;
@@ -191,6 +189,6 @@
void FifoBuffer::eraseMemory() {
int32_t numBytes = convertFramesToBytes(getBufferCapacityInFrames());
if (numBytes > 0) {
- memset(mStorage, 0, (size_t) numBytes);
+ memset(getStorage(), 0, (size_t) numBytes);
}
}
diff --git a/media/libaaudio/src/fifo/FifoBuffer.h b/media/libaaudio/src/fifo/FifoBuffer.h
index 0d188c4..37548f0 100644
--- a/media/libaaudio/src/fifo/FifoBuffer.h
+++ b/media/libaaudio/src/fifo/FifoBuffer.h
@@ -38,15 +38,9 @@
class FifoBuffer {
public:
- FifoBuffer(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+ FifoBuffer(int32_t bytesPerFrame);
- FifoBuffer(int32_t bytesPerFrame,
- fifo_frames_t capacityInFrames,
- fifo_counter_t *readCounterAddress,
- fifo_counter_t *writeCounterAddress,
- void *dataStorageAddress);
-
- ~FifoBuffer();
+ virtual ~FifoBuffer() = default;
int32_t convertFramesToBytes(fifo_frames_t frames);
@@ -121,19 +115,53 @@
*/
void eraseMemory();
-private:
+protected:
+
+ virtual uint8_t *getStorage() const = 0;
void fillWrappingBuffer(WrappingBuffer *wrappingBuffer,
int32_t framesAvailable, int32_t startIndex);
const int32_t mBytesPerFrame;
- // We do not use a std::unique_ptr for mStorage because it is often a pointer to
- // memory shared between processes and cannot be deleted trivially.
- uint8_t *mStorage = nullptr;
- bool mStorageOwned = false; // did this object allocate the storage?
std::unique_ptr<FifoControllerBase> mFifo{};
};
+// Define two subclasses to handle the two ways that storage is allocated.
+
+// Allocate storage internally.
+class FifoBufferAllocated : public FifoBuffer {
+public:
+ FifoBufferAllocated(int32_t bytesPerFrame, fifo_frames_t capacityInFrames);
+
+private:
+
+ uint8_t *getStorage() const override {
+ return mInternalStorage.get();
+ };
+
+ std::unique_ptr<uint8_t[]> mInternalStorage;
+};
+
+// Allocate storage externally and pass it in.
+class FifoBufferIndirect : public FifoBuffer {
+public:
+ // We use raw pointers because the memory may be
+ // in the middle of an allocated block and cannot be deleted directly.
+ FifoBufferIndirect(int32_t bytesPerFrame,
+ fifo_frames_t capacityInFrames,
+ fifo_counter_t* readCounterAddress,
+ fifo_counter_t* writeCounterAddress,
+ void* dataStorageAddress);
+
+private:
+
+ uint8_t *getStorage() const override {
+ return mExternalStorage;
+ };
+
+ uint8_t *mExternalStorage = nullptr;
+};
+
} // android
#endif //FIFO_FIFO_BUFFER_H
diff --git a/media/libaaudio/src/fifo/FifoControllerIndirect.h b/media/libaaudio/src/fifo/FifoControllerIndirect.h
index 5832d9c..ec48e57 100644
--- a/media/libaaudio/src/fifo/FifoControllerIndirect.h
+++ b/media/libaaudio/src/fifo/FifoControllerIndirect.h
@@ -27,7 +27,7 @@
/**
* A FifoControllerBase with counters external to the class.
*
- * The actual copunters may be stored in separate regions of shared memory
+ * The actual counters may be stored in separate regions of shared memory
* with different access rights.
*/
class FifoControllerIndirect : public FifoControllerBase {
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
index fefe6e0..88ef270 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.h
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -112,6 +112,18 @@
return mFramesRead.increment(frames);
}
+ /**
+ * Get the framesPerBurst from the underlying API.
+ * @return framesPerBurst
+ */
+ virtual int32_t getFramesPerBurstFromDevice() const = 0;
+
+ /**
+ * Get the bufferCapacity from the underlying API.
+ * @return bufferCapacity in frames
+ */
+ virtual int32_t getBufferCapacityFromDevice() const = 0;
+
// This is used for exact matching by MediaMetrics. So do not change it.
// MediaMetricsConstants.h: AMEDIAMETRICS_PROP_CALLERNAME_VALUE_AAUDIO
static constexpr char kCallerName[] = "aaudio";
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index d62951e..d46ef56 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -118,6 +118,7 @@
setDeviceFormat(getFormat());
}
+ // To avoid glitching, let AudioFlinger pick the optimal burst size.
uint32_t notificationFrames = 0;
// Setup the callback if there is one.
@@ -128,7 +129,6 @@
streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
callback = getLegacyCallback();
callbackData = this;
- notificationFrames = builder.getFramesPerDataCallback();
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -210,12 +210,9 @@
// Get the actual values from the AudioRecord.
setSamplesPerFrame(mAudioRecord->channelCount());
-
- int32_t actualSampleRate = mAudioRecord->getSampleRate();
- ALOGW_IF(actualSampleRate != getSampleRate(),
- "open() sampleRate changed from %d to %d",
- getSampleRate(), actualSampleRate);
- setSampleRate(actualSampleRate);
+ setSampleRate(mAudioRecord->getSampleRate());
+ setBufferCapacity(getBufferCapacityFromDevice());
+ setFramesPerBurst(getFramesPerBurstFromDevice());
// We may need to pass the data through a block size adapter to guarantee constant size.
if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
@@ -491,7 +488,7 @@
return getBufferCapacity(); // TODO implement in AudioRecord?
}
-int32_t AudioStreamRecord::getBufferCapacity() const
+int32_t AudioStreamRecord::getBufferCapacityFromDevice() const
{
return static_cast<int32_t>(mAudioRecord->frameCount());
}
@@ -501,8 +498,7 @@
return 0; // TODO implement when AudioRecord supports it
}
-int32_t AudioStreamRecord::getFramesPerBurst() const
-{
+int32_t AudioStreamRecord::getFramesPerBurstFromDevice() const {
return static_cast<int32_t>(mAudioRecord->getNotificationPeriodInFrames());
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index e4ef1c0..ad8dfe4 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -56,14 +56,10 @@
int32_t getBufferSize() const override;
- int32_t getBufferCapacity() const override;
-
int32_t getXRunCount() const override;
int64_t getFramesWritten() override;
- int32_t getFramesPerBurst() const override;
-
aaudio_result_t updateStateMachine() override;
aaudio_direction_t getDirection() const override {
@@ -79,6 +75,11 @@
const void * maybeConvertDeviceData(const void *audioData, int32_t numFrames) override;
+protected:
+
+ int32_t getFramesPerBurstFromDevice() const override;
+ int32_t getBufferCapacityFromDevice() const override;
+
private:
android::sp<android::AudioRecord> mAudioRecord;
// adapts between variable sized blocks and fixed size blocks
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 3831046..307904e 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -96,6 +96,7 @@
size_t frameCount = (size_t)builder.getBufferCapacity();
+ // To avoid glitching, let AudioFlinger pick the optimal burst size.
int32_t notificationFrames = 0;
const audio_format_t format = (getFormat() == AUDIO_FORMAT_DEFAULT)
@@ -118,8 +119,6 @@
// Take advantage of a special trick that allows us to create a buffer
// that is some multiple of the burst size.
notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
- } else {
- notificationFrames = builder.getFramesPerDataCallback();
}
}
mCallbackBufferSize = builder.getFramesPerDataCallback();
@@ -193,12 +192,9 @@
setSamplesPerFrame(mAudioTrack->channelCount());
setFormat(mAudioTrack->format());
setDeviceFormat(mAudioTrack->format());
-
- int32_t actualSampleRate = mAudioTrack->getSampleRate();
- ALOGW_IF(actualSampleRate != getSampleRate(),
- "open() sampleRate changed from %d to %d",
- getSampleRate(), actualSampleRate);
- setSampleRate(actualSampleRate);
+ setSampleRate(mAudioTrack->getSampleRate());
+ setBufferCapacity(getBufferCapacityFromDevice());
+ setFramesPerBurst(getFramesPerBurstFromDevice());
// We may need to pass the data through a block size adapter to guarantee constant size.
if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
@@ -221,9 +217,6 @@
: (aaudio_session_id_t) mAudioTrack->getSessionId();
setSessionId(actualSessionId);
- mInitialBufferCapacity = getBufferCapacity();
- mInitialFramesPerBurst = getFramesPerBurst();
-
mAudioTrack->addAudioDeviceCallback(this);
// Update performance mode based on the actual stream flags.
@@ -240,11 +233,11 @@
setSharingMode(AAUDIO_SHARING_MODE_SHARED); // EXCLUSIVE mode not supported in legacy
- // Log warning if we did not get what we asked for.
- ALOGW_IF(actualFlags != flags,
+ // Log if we did not get what we asked for.
+ ALOGD_IF(actualFlags != flags,
"open() flags changed from 0x%08X to 0x%08X",
flags, actualFlags);
- ALOGW_IF(actualPerformanceMode != perfMode,
+ ALOGD_IF(actualPerformanceMode != perfMode,
"open() perfMode changed from %d to %d",
perfMode, actualPerformanceMode);
@@ -288,8 +281,8 @@
|| mAudioTrack->format() != getFormat()
|| mAudioTrack->getSampleRate() != getSampleRate()
|| mAudioTrack->getRoutedDeviceId() != getDeviceId()
- || getBufferCapacity() != mInitialBufferCapacity
- || getFramesPerBurst() != mInitialFramesPerBurst) {
+ || getBufferCapacityFromDevice() != getBufferCapacity()
+ || getFramesPerBurstFromDevice() != getFramesPerBurst()) {
processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
}
break;
@@ -478,7 +471,7 @@
return static_cast<int32_t>(mAudioTrack->getBufferSizeInFrames());
}
-int32_t AudioStreamTrack::getBufferCapacity() const
+int32_t AudioStreamTrack::getBufferCapacityFromDevice() const
{
return static_cast<int32_t>(mAudioTrack->frameCount());
}
@@ -488,8 +481,7 @@
return static_cast<int32_t>(mAudioTrack->getUnderrunCount());
}
-int32_t AudioStreamTrack::getFramesPerBurst() const
-{
+int32_t AudioStreamTrack::getFramesPerBurstFromDevice() const {
return static_cast<int32_t>(mAudioTrack->getNotificationPeriodInFrames());
}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 6334f66..5a8fb39 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -69,8 +69,6 @@
aaudio_result_t setBufferSize(int32_t requestedFrames) override;
int32_t getBufferSize() const override;
- int32_t getBufferCapacity() const override;
- int32_t getFramesPerBurst()const override;
int32_t getXRunCount() const override;
int64_t getFramesRead() override;
@@ -96,6 +94,11 @@
const android::media::VolumeShaper::Operation& operation) override;
#endif
+protected:
+
+ int32_t getFramesPerBurstFromDevice() const override;
+ int32_t getBufferCapacityFromDevice() const override;
+
private:
android::sp<android::AudioTrack> mAudioTrack;
@@ -105,10 +108,6 @@
// TODO add 64-bit position reporting to AudioTrack and use it.
aaudio_wrapping_frames_t mPositionWhenPausing = 0;
-
- // initial AudioTrack frame count and notification period
- int32_t mInitialBufferCapacity = 0;
- int32_t mInitialFramesPerBurst = 0;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index 9007b10..dbb3d2b 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -231,7 +231,8 @@
case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
return AUDIO_FLAG_NO_MEDIA_PROJECTION;
case AAUDIO_ALLOW_CAPTURE_BY_NONE:
- return AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE;
+ return static_cast<audio_flags_mask_t>(
+ AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE);
default:
ALOGE("%s() 0x%08X unrecognized", __func__, policy);
return AUDIO_FLAG_NONE; //
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 8935d57..95d6543 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -11,10 +11,12 @@
defaults: ["libaaudio_tests_defaults"],
srcs: ["test_marshalling.cpp"],
shared_libs: [
+ "aaudio-aidl-cpp",
"libaaudio_internal",
"libbinder",
"libcutils",
"libutils",
+ "shared-file-region-aidl-unstable-cpp",
],
}
diff --git a/media/libaaudio/tests/test_aaudio_monkey.cpp b/media/libaaudio/tests/test_aaudio_monkey.cpp
index be54835..cc29678 100644
--- a/media/libaaudio/tests/test_aaudio_monkey.cpp
+++ b/media/libaaudio/tests/test_aaudio_monkey.cpp
@@ -46,11 +46,10 @@
int32_t numFrames);
void AAudioMonkeyErrorCallbackProc(
- AAudioStream *stream __unused,
- void *userData __unused,
- aaudio_result_t error) {
- printf("Error Callback, error: %d\n",(int)error);
-}
+ AAudioStream * /* stream */,
+ void *userData,
+ aaudio_result_t error);
+
// This function is not thread safe. Only use this from a single thread.
double nextRandomDouble() {
@@ -99,6 +98,10 @@
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
aaudio_result_t result = AAudioStream_waitForStateChange(getStream(),
AAUDIO_STREAM_STATE_UNKNOWN, &state, 0);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ printf("WARNING - AAudioStream_waitForStateChange returned DISCONNECTED\n");
+ return true; // OK
+ }
if (result != AAUDIO_OK) {
printf("ERROR - AAudioStream_waitForStateChange returned %d\n", result);
return false;
@@ -114,7 +117,7 @@
(unsigned long long) framesRead,
xRuns);
- if (framesWritten < framesRead) {
+ if (state != AAUDIO_STREAM_STATE_STARTING && framesWritten < framesRead) {
printf("WARNING - UNDERFLOW - diff = %d !!!!!!!!!!!!\n",
(int) (framesWritten - framesRead));
}
@@ -132,8 +135,23 @@
return -1;
}
+ // update and query stream state
+ aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNKNOWN;
+ state = AAudioStream_getState(getStream());
+ if (state < 0) {
+ printf("ERROR - AAudioStream_getState returned %d\n", state);
+ return state;
+ }
+
+ if (state == AAUDIO_STREAM_STATE_DISCONNECTED) {
+ printf("#%d, Closing disconnected stream.\n", getIndex());
+ result = close();
+ return result;
+ }
+
double dice = nextRandomDouble();
// Select an action based on a weighted probability.
+ printf(" "); // indent action
if (dice < PROB_START) {
printf("start\n");
result = AAudioStream_requestStart(getStream());
@@ -200,6 +218,10 @@
return AAUDIO_CALLBACK_RESULT_CONTINUE;
}
+ int getIndex() const {
+ return mIndex;
+ }
+
private:
const AAudioArgsParser *mArgParser;
const int mIndex;
@@ -223,6 +245,13 @@
return monkey->renderAudio(stream, audioData, numFrames);
}
+void AAudioMonkeyErrorCallbackProc(
+ AAudioStream * /* stream */,
+ void *userData,
+ aaudio_result_t error) {
+ AAudioMonkey *monkey = (AAudioMonkey *) userData;
+ printf("#%d, Error Callback, error: %d\n", monkey->getIndex(), (int)error);
+}
static void usage() {
AAudioArgsParser::usage();
diff --git a/media/libaaudio/tests/test_atomic_fifo.cpp b/media/libaaudio/tests/test_atomic_fifo.cpp
index 130ef43..4dbb219 100644
--- a/media/libaaudio/tests/test_atomic_fifo.cpp
+++ b/media/libaaudio/tests/test_atomic_fifo.cpp
@@ -26,6 +26,7 @@
using android::fifo_counter_t;
using android::FifoController;
using android::FifoBuffer;
+using android::FifoBufferIndirect;
using android::WrappingBuffer;
TEST(test_fifo_controller, fifo_indices) {
@@ -325,7 +326,7 @@
verifyStorageIntegrity();
}
- FifoBuffer mFifoBuffer;
+ FifoBufferIndirect mFifoBuffer;
fifo_frames_t mNextWriteIndex = 0;
fifo_frames_t mNextVerifyIndex = 0;
fifo_frames_t mThreshold;
diff --git a/media/libaaudio/tests/test_marshalling.cpp b/media/libaaudio/tests/test_marshalling.cpp
index c51fbce..49213dc 100644
--- a/media/libaaudio/tests/test_marshalling.cpp
+++ b/media/libaaudio/tests/test_marshalling.cpp
@@ -33,6 +33,29 @@
using namespace android;
using namespace aaudio;
+template<typename T>
+T copy(const T& object) {
+ return T(object);
+}
+
+template<>
+SharedMemoryParcelable copy<SharedMemoryParcelable>(const SharedMemoryParcelable& object) {
+ return object.dup();
+}
+
+template<typename T>
+void writeToParcel(const T& object, Parcel* parcel) {
+ copy(object).parcelable().writeToParcel(parcel);
+}
+
+template<typename T>
+T readFromParcel(const Parcel& parcel) {
+ using ParcelType = std::decay_t<decltype(std::declval<T>().parcelable())>;
+ ParcelType parcelable;
+ parcelable.readFromParcel(&parcel);
+ return T(std::move(parcelable));
+}
+
// Test adding one value.
TEST(test_marshalling, aaudio_one_read_write) {
Parcel parcel;
@@ -48,7 +71,6 @@
// Test SharedMemoryParcel.
TEST(test_marshalling, aaudio_shared_memory) {
SharedMemoryParcelable sharedMemoryA;
- SharedMemoryParcelable sharedMemoryB;
const size_t memSizeBytes = 840;
unique_fd fd(ashmem_create_region("TestMarshalling", memSizeBytes));
ASSERT_LE(0, fd);
@@ -63,10 +85,10 @@
Parcel parcel;
size_t pos = parcel.dataPosition();
- sharedMemoryA.writeToParcel(&parcel);
+ writeToParcel(sharedMemoryA, &parcel);
parcel.setDataPosition(pos);
- sharedMemoryB.readFromParcel(&parcel);
+ SharedMemoryParcelable sharedMemoryB = readFromParcel<SharedMemoryParcelable>(parcel);
EXPECT_EQ(sharedMemoryA.getSizeInBytes(), sharedMemoryB.getSizeInBytes());
// should see same value at two different addresses
@@ -81,7 +103,6 @@
TEST(test_marshalling, aaudio_shared_region) {
SharedMemoryParcelable sharedMemories[2];
SharedRegionParcelable sharedRegionA;
- SharedRegionParcelable sharedRegionB;
const size_t memSizeBytes = 840;
unique_fd fd(ashmem_create_region("TestMarshalling", memSizeBytes));
ASSERT_LE(0, fd);
@@ -97,10 +118,10 @@
Parcel parcel;
size_t pos = parcel.dataPosition();
- sharedRegionA.writeToParcel(&parcel);
+ writeToParcel(sharedRegionA, &parcel);
parcel.setDataPosition(pos);
- sharedRegionB.readFromParcel(&parcel);
+ SharedRegionParcelable sharedRegionB = readFromParcel<SharedRegionParcelable>(parcel);
// should see same value
void *region2;
@@ -113,7 +134,6 @@
TEST(test_marshalling, aaudio_ring_buffer_parcelable) {
SharedMemoryParcelable sharedMemories[2];
RingBufferParcelable ringBufferA;
- RingBufferParcelable ringBufferB;
const size_t bytesPerFrame = 8;
const size_t framesPerBurst = 32;
@@ -147,11 +167,11 @@
// write A to parcel
Parcel parcel;
size_t pos = parcel.dataPosition();
- ringBufferA.writeToParcel(&parcel);
+ writeToParcel(ringBufferA, &parcel);
// read B from parcel
parcel.setDataPosition(pos);
- ringBufferB.readFromParcel(&parcel);
+ RingBufferParcelable ringBufferB = readFromParcel<RingBufferParcelable>(parcel);
RingBufferDescriptor descriptorB;
EXPECT_EQ(AAUDIO_OK, ringBufferB.resolve(sharedMemories, &descriptorB));
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index e8e1a09..d7e9461 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -77,8 +77,6 @@
"IAudioPolicyService.cpp",
"IAudioPolicyServiceClient.cpp",
"IAudioTrack.cpp",
- "IEffect.cpp",
- "IEffectClient.cpp",
"ToneGenerator.cpp",
"PlayerBase.cpp",
"RecordingActivityTracker.cpp",
@@ -99,6 +97,7 @@
"libmediautils",
"libnblog",
"libprocessgroup",
+ "libshmemcompat",
"libutils",
"libvibrator",
],
@@ -108,7 +107,8 @@
"frameworks/av/media/libnbaio/include_mono/",
],
local_include_dirs: [
- "include/media", "aidl"
+ "include/media",
+ "aidl",
],
header_libs: [
"libaudioclient_headers",
@@ -116,10 +116,16 @@
"libmedia_headers",
],
export_header_lib_headers: ["libaudioclient_headers"],
+ export_static_lib_headers: [
+ "effect-aidl-cpp",
+ "shared-file-region-aidl-unstable-cpp",
+ ],
- // for memory heap analysis
static_libs: [
+ "effect-aidl-cpp",
+ // for memory heap analysis
"libc_malloc_debug_backtrace",
+ "shared-file-region-aidl-unstable-cpp",
],
cflags: [
"-Wall",
@@ -127,7 +133,7 @@
"-Wno-error=deprecated-declarations",
],
sanitize: {
- misc_undefined : [
+ misc_undefined: [
"unsigned-integer-overflow",
"signed-integer-overflow",
],
@@ -170,3 +176,16 @@
"aidl/android/media/ICaptureStateListener.aidl",
],
}
+
+aidl_interface {
+ name: "effect-aidl",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ "aidl/android/media/IEffect.aidl",
+ "aidl/android/media/IEffectClient.aidl",
+ ],
+ imports: [
+ "shared-file-region-aidl",
+ ],
+}
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 73b96ab..1282474 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -23,16 +23,28 @@
#include <sys/types.h>
#include <limits.h>
-#include <private/media/AudioEffectShared.h>
-#include <media/AudioEffect.h>
-
-#include <utils/Log.h>
#include <binder/IPCThreadState.h>
-
-
+#include <media/AudioEffect.h>
+#include <media/ShmemCompat.h>
+#include <private/media/AudioEffectShared.h>
+#include <utils/Log.h>
namespace android {
+using binder::Status;
+
+namespace {
+
+// Copy from a raw pointer + size into a vector of bytes.
+void appendToBuffer(const void* data,
+ size_t size,
+ std::vector<uint8_t>* buffer) {
+ const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
+ buffer->insert(buffer->end(), p, p + size);
+}
+
+} // namespace
+
// ---------------------------------------------------------------------------
AudioEffect::AudioEffect(const String16& opPackageName)
@@ -50,7 +62,7 @@
const AudioDeviceTypeAddr& device,
bool probe)
{
- sp<IEffect> iEffect;
+ sp<media::IEffect> iEffect;
sp<IMemory> cblk;
int enabled;
@@ -112,8 +124,10 @@
mEnabled = (volatile int32_t)enabled;
- cblk = iEffect->getCblk();
- if (cblk == 0) {
+ if (media::SharedFileRegion shmem;
+ !iEffect->getCblk(&shmem).isOk()
+ || !convertSharedFileRegionToIMemory(shmem, &cblk)
+ || cblk == 0) {
mStatus = NO_INIT;
ALOGE("Could not get control block");
return mStatus;
@@ -216,15 +230,19 @@
}
status_t status = NO_ERROR;
-
AutoMutex lock(mLock);
if (enabled != mEnabled) {
+ Status bs;
+
if (enabled) {
ALOGV("enable %p", this);
- status = mIEffect->enable();
+ bs = mIEffect->enable(&status);
} else {
ALOGV("disable %p", this);
- status = mIEffect->disable();
+ bs = mIEffect->disable(&status);
+ }
+ if (!bs.isOk()) {
+ status = bs.transactionError();
}
if (status == NO_ERROR) {
mEnabled = enabled;
@@ -257,7 +275,20 @@
mLock.lock();
}
- status_t status = mIEffect->command(cmdCode, cmdSize, cmdData, replySize, replyData);
+ std::vector<uint8_t> data;
+ appendToBuffer(cmdData, cmdSize, &data);
+
+ status_t status;
+ std::vector<uint8_t> response;
+
+ Status bs = mIEffect->command(cmdCode, data, *replySize, &response, &status);
+ if (!bs.isOk()) {
+ status = bs.transactionError();
+ }
+ if (status == NO_ERROR) {
+ memcpy(replyData, response.data(), response.size());
+ *replySize = response.size();
+ }
if (cmdCode == EFFECT_CMD_ENABLE || cmdCode == EFFECT_CMD_DISABLE) {
if (status == NO_ERROR) {
@@ -272,7 +303,6 @@
return status;
}
-
status_t AudioEffect::setParameter(effect_param_t *param)
{
if (mProbe) {
@@ -286,14 +316,27 @@
return BAD_VALUE;
}
- uint32_t size = sizeof(int);
uint32_t psize = ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) + param->vsize;
ALOGV("setParameter: param: %d, param2: %d", *(int *)param->data,
(param->psize == 8) ? *((int *)param->data + 1): -1);
- return mIEffect->command(EFFECT_CMD_SET_PARAM, sizeof (effect_param_t) + psize, param, &size,
- ¶m->status);
+ std::vector<uint8_t> cmd;
+ appendToBuffer(param, sizeof(effect_param_t) + psize, &cmd);
+ std::vector<uint8_t> response;
+ status_t status;
+ Status bs = mIEffect->command(EFFECT_CMD_SET_PARAM,
+ cmd,
+ sizeof(int),
+ &response,
+ &status);
+ if (!bs.isOk()) {
+ status = bs.transactionError();
+ return status;
+ }
+ assert(response.size() == sizeof(int));
+ memcpy(¶m->status, response.data(), response.size());
+ return status;
}
status_t AudioEffect::setParameterDeferred(effect_param_t *param)
@@ -338,8 +381,18 @@
if (mCblk->clientIndex == 0) {
return INVALID_OPERATION;
}
- uint32_t size = 0;
- return mIEffect->command(EFFECT_CMD_SET_PARAM_COMMIT, 0, NULL, &size, NULL);
+ std::vector<uint8_t> cmd;
+ std::vector<uint8_t> response;
+ status_t status;
+ Status bs = mIEffect->command(EFFECT_CMD_SET_PARAM_COMMIT,
+ cmd,
+ 0,
+ &response,
+ &status);
+ if (!bs.isOk()) {
+ status = bs.transactionError();
+ }
+ return status;
}
status_t AudioEffect::getParameter(effect_param_t *param)
@@ -361,8 +414,18 @@
uint32_t psize = sizeof(effect_param_t) + ((param->psize - 1) / sizeof(int) + 1) * sizeof(int) +
param->vsize;
- return mIEffect->command(EFFECT_CMD_GET_PARAM, sizeof(effect_param_t) + param->psize, param,
- &psize, param);
+ status_t status;
+ std::vector<uint8_t> cmd;
+ std::vector<uint8_t> response;
+ appendToBuffer(param, sizeof(effect_param_t) + param->psize, &cmd);
+
+ Status bs = mIEffect->command(EFFECT_CMD_GET_PARAM, cmd, psize, &response, &status);
+ if (!bs.isOk()) {
+ status = bs.transactionError();
+ return status;
+ }
+ memcpy(param, response.data(), response.size());
+ return status;
}
@@ -410,19 +473,18 @@
}
}
-void AudioEffect::commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize __unused,
- void *cmdData,
- uint32_t replySize __unused,
- void *replyData)
+void AudioEffect::commandExecuted(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ const std::vector<uint8_t>& replyData)
{
- if (cmdData == NULL || replyData == NULL) {
+ if (cmdData.empty() || replyData.empty()) {
return;
}
if (mCbf != NULL && cmdCode == EFFECT_CMD_SET_PARAM) {
- effect_param_t *cmd = (effect_param_t *)cmdData;
- cmd->status = *(int32_t *)replyData;
+ std::vector<uint8_t> cmdDataCopy(cmdData);
+ effect_param_t* cmd = reinterpret_cast<effect_param_t *>(cmdDataCopy.data());
+ cmd->status = *reinterpret_cast<const int32_t *>(replyData.data());
mCbf(EVENT_PARAMETER_CHANGED, mUserData, cmd);
}
}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index d6671e3..55b836f 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -279,7 +279,8 @@
mAttributes.source = inputSource;
if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION
|| inputSource == AUDIO_SOURCE_CAMCORDER) {
- mAttributes.flags |= AUDIO_FLAG_CAPTURE_PRIVATE;
+ mAttributes.flags = static_cast<audio_flags_mask_t>(
+ mAttributes.flags | AUDIO_FLAG_CAPTURE_PRIVATE);
}
} else {
// stream type shouldn't be looked at, this track has audio attributes
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 3aa60da..edb0889 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -1363,7 +1363,7 @@
return aps->registerPolicyMixes(mixes, registration);
}
-status_t AudioSystem::setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+status_t AudioSystem::setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
@@ -1377,7 +1377,7 @@
}
status_t AudioSystem::setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices)
+ const AudioDeviceTypeAddrVector& devices)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
@@ -1604,33 +1604,88 @@
return aps->isCallScreenModeSupported();
}
-status_t AudioSystem::setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device)
+status_t AudioSystem::setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) {
return PERMISSION_DENIED;
}
- return aps->setPreferredDeviceForStrategy(strategy, device);
+ return aps->setDevicesRoleForStrategy(strategy, role, devices);
}
-status_t AudioSystem::removePreferredDeviceForStrategy(product_strategy_t strategy)
+status_t AudioSystem::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) {
return PERMISSION_DENIED;
}
- return aps->removePreferredDeviceForStrategy(strategy);
+ return aps->removeDevicesRoleForStrategy(strategy, role);
}
-status_t AudioSystem::getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device)
+status_t AudioSystem::getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) {
return PERMISSION_DENIED;
}
- return aps->getPreferredDeviceForStrategy(strategy, device);
+ return aps->getDevicesForRoleAndStrategy(strategy, role, devices);
+}
+
+status_t AudioSystem::setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->setDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->addDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->removeDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioSystem::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->clearDevicesRoleForCapturePreset(audioSource, role);
+}
+
+status_t AudioSystem::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ return aps->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
}
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 41af78c..2c40fbb 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -227,7 +227,7 @@
{
mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
mAttributes.usage = AUDIO_USAGE_UNKNOWN;
- mAttributes.flags = 0x0;
+ mAttributes.flags = AUDIO_FLAG_NONE;
strcpy(mAttributes.tags, "");
}
@@ -467,7 +467,7 @@
if (format == AUDIO_FORMAT_DEFAULT) {
format = AUDIO_FORMAT_PCM_16_BIT;
} else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
- mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
+ flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
}
// validate parameters
@@ -644,6 +644,36 @@
return status;
}
+
+status_t AudioTrack::set(
+ audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ uint32_t channelMask,
+ size_t frameCount,
+ audio_output_flags_t flags,
+ callback_t cbf,
+ void* user,
+ int32_t notificationFrames,
+ const sp<IMemory>& sharedBuffer,
+ bool threadCanCallJava,
+ audio_session_t sessionId,
+ transfer_type transferType,
+ const audio_offload_info_t *offloadInfo,
+ uid_t uid,
+ pid_t pid,
+ const audio_attributes_t* pAttributes,
+ bool doNotReconnect,
+ float maxRequiredSpeed,
+ audio_port_handle_t selectedDeviceId)
+{
+ return set(streamType, sampleRate, format,
+ static_cast<audio_channel_mask_t>(channelMask),
+ frameCount, flags, cbf, user, notificationFrames, sharedBuffer,
+ threadCanCallJava, sessionId, transferType, offloadInfo, uid, pid,
+ pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
+}
+
// -------------------------------------------------------------------------
status_t AudioTrack::start()
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index f1f8f9c..e2c9698 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -900,11 +900,8 @@
}
audio_track_cblk_t* cblk = mCblk;
- int32_t flush = cblk->u.mStreaming.mFlush;
- if (flush != mFlush) {
- // FIXME should return an accurate value, but over-estimate is better than under-estimate
- return mFrameCount;
- }
+ flushBufferIfNeeded();
+
const int32_t rear = getRear();
ssize_t filled = audio_utils::safe_sub_overflow(rear, cblk->u.mStreaming.mFront);
// pipe should not already be overfull
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 6d79aba..7c304a1 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,6 +24,8 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
+#include <media/AudioSanitizer.h>
+#include <media/IAudioPolicyService.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/TimeCheck.h>
#include "IAudioFlinger.h"
@@ -653,9 +655,9 @@
return NO_ERROR;
}
- virtual sp<IEffect> createEffect(
+ virtual sp<media::IEffect> createEffect(
effect_descriptor_t *pDesc,
- const sp<IEffectClient>& client,
+ const sp<media::IEffectClient>& client,
int32_t priority,
audio_io_handle_t output,
audio_session_t sessionId,
@@ -668,7 +670,7 @@
int *enabled)
{
Parcel data, reply;
- sp<IEffect> effect;
+ sp<media::IEffect> effect;
if (pDesc == NULL) {
if (status != NULL) {
*status = BAD_VALUE;
@@ -705,7 +707,7 @@
if (enabled != NULL) {
*enabled = tmp;
}
- effect = interface_cast<IEffect>(reply.readStrongBinder());
+ effect = interface_cast<media::IEffect>(reply.readStrongBinder());
reply.read(pDesc, sizeof(effect_descriptor_t));
}
if (status != NULL) {
@@ -1024,6 +1026,16 @@
std::string tag("IAudioFlinger command " + std::to_string(code));
TimeCheck check(tag.c_str());
+ // Make sure we connect to Audio Policy Service before calling into AudioFlinger:
+ // - AudioFlinger can call into Audio Policy Service with its global mutex held
+ // - If this is the first time Audio Policy Service is queried from inside audioserver process
+ // this will trigger Audio Policy Manager initialization.
+ // - Audio Policy Manager initialization calls into AudioFlinger which will try to lock
+ // its global mutex and a deadlock will occur.
+ if (IPCThreadState::self()->getCallingPid() != getpid()) {
+ AudioSystem::get_audio_policy_service();
+ }
+
switch (code) {
case CREATE_TRACK: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
@@ -1210,7 +1222,7 @@
CHECK_INTERFACE(IAudioFlinger, data, reply);
uint32_t sampleRate = data.readInt32();
audio_format_t format = (audio_format_t) data.readInt32();
- audio_channel_mask_t channelMask = data.readInt32();
+ audio_channel_mask_t channelMask = (audio_channel_mask_t) data.readInt32();
reply->writeInt64( getInputBufferSize(sampleRate, format, channelMask) );
return NO_ERROR;
} break;
@@ -1386,7 +1398,8 @@
if (data.read(&desc, sizeof(effect_descriptor_t)) != NO_ERROR) {
ALOGE("b/23905951");
}
- sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder());
+ sp<media::IEffectClient> client =
+ interface_cast<media::IEffectClient>(data.readStrongBinder());
int32_t priority = data.readInt32();
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
audio_session_t sessionId = (audio_session_t) data.readInt32();
@@ -1402,8 +1415,8 @@
int id = 0;
int enabled = 0;
- sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId, device,
- opPackageName, pid, probe, &status, &id, &enabled);
+ sp<media::IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
+ device, opPackageName, pid, probe, &status, &id, &enabled);
reply->writeInt32(status);
reply->writeInt32(id);
reply->writeInt32(enabled);
@@ -1483,10 +1496,15 @@
case GET_AUDIO_PORT: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
struct audio_port port = {};
- if (data.read(&port, sizeof(struct audio_port)) != NO_ERROR) {
+ status_t status = data.read(&port, sizeof(struct audio_port));
+ if (status != NO_ERROR) {
ALOGE("b/23905951");
+ return status;
}
- status_t status = getAudioPort(&port);
+ status = AudioSanitizer::sanitizeAudioPort(&port);
+ if (status == NO_ERROR) {
+ status = getAudioPort(&port);
+ }
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&port, sizeof(struct audio_port));
@@ -1496,12 +1514,20 @@
case CREATE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
struct audio_patch patch;
- data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
- ALOGE("b/23905951");
+ status_t status = data.read(&patch, sizeof(struct audio_patch));
+ if (status != NO_ERROR) {
+ return status;
}
- status_t status = createAudioPatch(&patch, &handle);
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ status = data.read(&handle, sizeof(audio_patch_handle_t));
+ if (status != NO_ERROR) {
+ ALOGE("b/23905951");
+ return status;
+ }
+ status = AudioSanitizer::sanitizeAudioPatch(&patch);
+ if (status == NO_ERROR) {
+ status = createAudioPatch(&patch, &handle);
+ }
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&handle, sizeof(audio_patch_handle_t));
@@ -1546,8 +1572,14 @@
case SET_AUDIO_PORT_CONFIG: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
struct audio_port_config config;
- data.read(&config, sizeof(struct audio_port_config));
- status_t status = setAudioPortConfig(&config);
+ status_t status = data.read(&config, sizeof(struct audio_port_config));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = AudioSanitizer::sanitizeAudioPortConfig(&config);
+ if (status == NO_ERROR) {
+ status = setAudioPortConfig(&config);
+ }
reply->writeInt32(status);
return NO_ERROR;
} break;
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 60af84b..81f9dff 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -26,6 +26,7 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
#include <media/AudioEffect.h>
+#include <media/AudioSanitizer.h>
#include <media/IAudioPolicyService.h>
#include <mediautils/ServiceUtilities.h>
#include <mediautils/TimeCheck.h>
@@ -112,13 +113,18 @@
MOVE_EFFECTS_TO_IO,
SET_RTT_ENABLED,
IS_CALL_SCREEN_MODE_SUPPORTED,
- SET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
- REMOVE_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
- GET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
+ SET_DEVICES_ROLE_FOR_PRODUCT_STRATEGY,
+ REMOVE_DEVICES_ROLE_FOR_PRODUCT_STRATEGY,
+ GET_DEVICES_FOR_ROLE_AND_PRODUCT_STRATEGY,
GET_DEVICES_FOR_ATTRIBUTES,
AUDIO_MODULES_UPDATED, // oneway
SET_CURRENT_IME_UID,
REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER,
+ SET_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET,
};
#define MAX_ITEMS_PER_LIST 1024
@@ -1173,31 +1179,18 @@
return reply.readBool();
}
- virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ virtual status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32((int32_t) uid);
- size_t size = devices.size();
- size_t sizePosition = data.dataPosition();
- data.writeInt32((int32_t) size);
- size_t finalSize = size;
- for (size_t i = 0; i < size; i++) {
- size_t position = data.dataPosition();
- if (devices[i].writeToParcel(&data) != NO_ERROR) {
- data.setDataPosition(position);
- finalSize--;
- }
- }
- if (size != finalSize) {
- size_t position = data.dataPosition();
- data.setDataPosition(sizePosition);
- data.writeInt32(finalSize);
- data.setDataPosition(position);
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
}
- status_t status = remote()->transact(SET_UID_DEVICE_AFFINITY, data, &reply);
+ status = remote()->transact(SET_UID_DEVICE_AFFINITY, data, &reply);
if (status == NO_ERROR) {
status = (status_t)reply.readInt32();
}
@@ -1218,51 +1211,37 @@
return status;
}
- virtual status_t setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ virtual status_t setUserIdDeviceAffinities(int userId, const AudioDeviceTypeAddrVector& devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeInt32((int32_t) userId);
- size_t size = devices.size();
- size_t sizePosition = data.dataPosition();
- data.writeInt32((int32_t) size);
- size_t finalSize = size;
- for (size_t i = 0; i < size; i++) {
- size_t position = data.dataPosition();
- if (devices[i].writeToParcel(&data) != NO_ERROR) {
- data.setDataPosition(position);
- finalSize--;
- }
- }
- if (size != finalSize) {
- size_t position = data.dataPosition();
- data.setDataPosition(sizePosition);
- data.writeInt32(finalSize);
- data.setDataPosition(position);
- }
-
- status_t status = remote()->transact(SET_USERID_DEVICE_AFFINITY, data, &reply);
- if (status == NO_ERROR) {
- status = (status_t)reply.readInt32();
- }
+ data.writeInt32((int32_t) userId);
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
return status;
}
- virtual status_t removeUserIdDeviceAffinities(int userId) {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
-
- data.writeInt32((int32_t) userId);
-
- status_t status =
- remote()->transact(REMOVE_USERID_DEVICE_AFFINITY, data, &reply);
- if (status == NO_ERROR) {
- status = (status_t) reply.readInt32();
- }
- return status;
+ status = remote()->transact(SET_USERID_DEVICE_AFFINITY, data, &reply);
+ if (status == NO_ERROR) {
+ status = (status_t)reply.readInt32();
}
+ return status;
+ }
+
+ virtual status_t removeUserIdDeviceAffinities(int userId) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ data.writeInt32((int32_t) userId);
+
+ status_t status =
+ remote()->transact(REMOVE_USERID_DEVICE_AFFINITY, data, &reply);
+ if (status == NO_ERROR) {
+ status = (status_t) reply.readInt32();
+ }
+ return status;
+ }
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies)
{
@@ -1384,17 +1363,31 @@
return reply.readBool();
}
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device)
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeUint32(static_cast<uint32_t>(strategy));
- status_t status = device.writeToParcel(&data);
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
if (status != NO_ERROR) {
return BAD_VALUE;
}
- status = remote()->transact(SET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
+ status = remote()->transact(SET_DEVICES_ROLE_FOR_PRODUCT_STRATEGY, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(strategy));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(REMOVE_DEVICES_ROLE_FOR_PRODUCT_STRATEGY,
data, &reply);
if (status != NO_ERROR) {
return status;
@@ -1402,31 +1395,108 @@
return static_cast<status_t>(reply.readInt32());
}
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy)
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role, AudioDeviceTypeAddrVector &devices)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeUint32(static_cast<uint32_t>(strategy));
- status_t status = remote()->transact(REMOVE_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
- data, &reply);
- if (status != NO_ERROR) {
- return status;
- }
- return static_cast<status_t>(reply.readInt32());
- }
-
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device)
- {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
- data.writeUint32(static_cast<uint32_t>(strategy));
- status_t status = remote()->transact(GET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY,
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(GET_DEVICES_FOR_ROLE_AND_PRODUCT_STRATEGY,
data, &reply);
if (status != NO_ERROR) {
return status;
}
- status = device.readFromParcel(&reply);
+ status = reply.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(SET_DEVICES_ROLE_FOR_CAPTURE_PRESET, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = data.writeParcelableVector(devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t clearDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast<status_t>(reply.readInt32());
+ }
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeUint32(static_cast<uint32_t>(audioSource));
+ data.writeUint32(static_cast<uint32_t>(role));
+ status_t status = remote()->transact(GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET,
+ data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = reply.readParcelableVector(&devices);
if (status != NO_ERROR) {
return status;
}
@@ -1561,15 +1631,20 @@
case RELEASE_SOUNDTRIGGER_SESSION:
case SET_RTT_ENABLED:
case IS_CALL_SCREEN_MODE_SUPPORTED:
- case SET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY:
+ case SET_DEVICES_ROLE_FOR_PRODUCT_STRATEGY:
case SET_SUPPORTED_SYSTEM_USAGES:
- case REMOVE_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY:
- case GET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY:
+ case REMOVE_DEVICES_ROLE_FOR_PRODUCT_STRATEGY:
+ case GET_DEVICES_FOR_ROLE_AND_PRODUCT_STRATEGY:
case GET_DEVICES_FOR_ATTRIBUTES:
case SET_ALLOWED_CAPTURE_POLICY:
case AUDIO_MODULES_UPDATED:
case SET_CURRENT_IME_UID:
- case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER: {
+ case REGISTER_SOUNDTRIGGER_CAPTURE_STATE_LISTENER:
+ case SET_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET:
+ case GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1685,7 +1760,6 @@
if (status != NO_ERROR) {
return status;
}
- sanetizeAudioAttributes(&attr);
audio_session_t session = (audio_session_t)data.readInt32();
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
bool hasStream = data.readInt32() != 0;
@@ -1703,10 +1777,14 @@
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
audio_io_handle_t output = 0;
std::vector<audio_io_handle_t> secondaryOutputs;
- status = getOutputForAttr(&attr,
- &output, session, &stream, pid, uid,
- &config,
- flags, &selectedDeviceId, &portId, &secondaryOutputs);
+
+ status = AudioSanitizer::sanitizeAudioAttributes(&attr, "68953950");
+ if (status == NO_ERROR) {
+ status = getOutputForAttr(&attr,
+ &output, session, &stream, pid, uid,
+ &config,
+ flags, &selectedDeviceId, &portId, &secondaryOutputs);
+ }
reply->writeInt32(status);
status = reply->write(&attr, sizeof(audio_attributes_t));
if (status != NO_ERROR) {
@@ -1745,8 +1823,11 @@
case GET_INPUT_FOR_ATTR: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_attributes_t attr = {};
- data.read(&attr, sizeof(audio_attributes_t));
- sanetizeAudioAttributes(&attr);
+ status_t status = data.read(&attr, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+
audio_io_handle_t input = (audio_io_handle_t)data.readInt32();
audio_unique_id_t riid = (audio_unique_id_t)data.readInt32();
audio_session_t session = (audio_session_t)data.readInt32();
@@ -1759,9 +1840,13 @@
audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
audio_port_handle_t portId = (audio_port_handle_t)data.readInt32();
- status_t status = getInputForAttr(&attr, &input, riid, session, pid, uid,
- opPackageName, &config,
- flags, &selectedDeviceId, &portId);
+
+ status = AudioSanitizer::sanitizeAudioAttributes(&attr, "68953950");
+ if (status == NO_ERROR) {
+ status = getInputForAttr(&attr, &input, riid, session, pid, uid,
+ opPackageName, &config,
+ flags, &selectedDeviceId, &portId);
+ }
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->writeInt32(input);
@@ -1842,11 +1927,15 @@
if (status != NO_ERROR) {
return status;
}
+
int index = data.readInt32();
audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
- reply->writeInt32(static_cast <uint32_t>(setVolumeIndexForAttributes(attributes,
- index, device)));
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "169572641");
+ if (status == NO_ERROR) {
+ status = setVolumeIndexForAttributes(attributes, index, device);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
return NO_ERROR;
} break;
@@ -1860,8 +1949,11 @@
audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
int index = 0;
- status = getVolumeIndexForAttributes(attributes, index, device);
- reply->writeInt32(static_cast <uint32_t>(status));
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "169572641");
+ if (status == NO_ERROR) {
+ status = getVolumeIndexForAttributes(attributes, index, device);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
if (status == NO_ERROR) {
reply->writeInt32(index);
}
@@ -1877,8 +1969,11 @@
}
int index = 0;
- status = getMinVolumeIndexForAttributes(attributes, index);
- reply->writeInt32(static_cast <uint32_t>(status));
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "169572641");
+ if (status == NO_ERROR) {
+ status = getMinVolumeIndexForAttributes(attributes, index);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
if (status == NO_ERROR) {
reply->writeInt32(index);
}
@@ -1894,8 +1989,11 @@
}
int index = 0;
- status = getMaxVolumeIndexForAttributes(attributes, index);
- reply->writeInt32(static_cast <uint32_t>(status));
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "169572641");
+ if (status == NO_ERROR) {
+ status = getMaxVolumeIndexForAttributes(attributes, index);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
if (status == NO_ERROR) {
reply->writeInt32(index);
}
@@ -1913,31 +2011,37 @@
case GET_OUTPUT_FOR_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
effect_descriptor_t desc = {};
- if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ status_t status = data.read(&desc, sizeof(desc));
+ if (status != NO_ERROR) {
android_errorWriteLog(0x534e4554, "73126106");
+ return status;
}
- (void)sanitizeEffectDescriptor(&desc);
- audio_io_handle_t output = getOutputForEffect(&desc);
- reply->writeInt32(static_cast <int>(output));
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ status = AudioSanitizer::sanitizeEffectDescriptor(&desc, "73126106");
+ if (status == NO_ERROR) {
+ output = getOutputForEffect(&desc);
+ }
+ reply->writeInt32(static_cast <int32_t>(output));
return NO_ERROR;
} break;
case REGISTER_EFFECT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
effect_descriptor_t desc = {};
- if (data.read(&desc, sizeof(desc)) != NO_ERROR) {
+ status_t status = data.read(&desc, sizeof(desc));
+ if (status != NO_ERROR) {
android_errorWriteLog(0x534e4554, "73126106");
+ return status;
}
- (void)sanitizeEffectDescriptor(&desc);
audio_io_handle_t io = data.readInt32();
uint32_t strategy = data.readInt32();
audio_session_t session = (audio_session_t) data.readInt32();
int id = data.readInt32();
- reply->writeInt32(static_cast <int32_t>(registerEffect(&desc,
- io,
- strategy,
- session,
- id)));
+ status = AudioSanitizer::sanitizeEffectDescriptor(&desc, "73126106");
+ if (status == NO_ERROR) {
+ status = registerEffect(&desc, io, strategy, session, id);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
return NO_ERROR;
} break;
@@ -2046,7 +2150,11 @@
if (status != NO_ERROR) return status;
status = data.read(&attributes, sizeof(audio_attributes_t));
if (status != NO_ERROR) return status;
- reply->writeInt32(isDirectOutputSupported(config, attributes));
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "169572641");
+ if (status == NO_ERROR) {
+ status = isDirectOutputSupported(config, attributes);
+ }
+ reply->writeInt32(static_cast <int32_t>(status));
return NO_ERROR;
}
@@ -2085,10 +2193,15 @@
case GET_AUDIO_PORT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_port port = {};
- if (data.read(&port, sizeof(struct audio_port)) != NO_ERROR) {
+ status_t status = data.read(&port, sizeof(struct audio_port));
+ if (status != NO_ERROR) {
ALOGE("b/23912202");
+ return status;
}
- status_t status = getAudioPort(&port);
+ status = AudioSanitizer::sanitizeAudioPort(&port);
+ if (status == NO_ERROR) {
+ status = getAudioPort(&port);
+ }
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&port, sizeof(struct audio_port));
@@ -2099,12 +2212,20 @@
case CREATE_AUDIO_PATCH: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_patch patch = {};
- data.read(&patch, sizeof(struct audio_patch));
- audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
- if (data.read(&handle, sizeof(audio_patch_handle_t)) != NO_ERROR) {
- ALOGE("b/23912202");
+ status_t status = data.read(&patch, sizeof(struct audio_patch));
+ if (status != NO_ERROR) {
+ return status;
}
- status_t status = createAudioPatch(&patch, &handle);
+ audio_patch_handle_t handle = AUDIO_PATCH_HANDLE_NONE;
+ status = data.read(&handle, sizeof(audio_patch_handle_t));
+ if (status != NO_ERROR) {
+ ALOGE("b/23912202");
+ return status;
+ }
+ status = AudioSanitizer::sanitizeAudioPatch(&patch);
+ if (status == NO_ERROR) {
+ status = createAudioPatch(&patch, &handle);
+ }
reply->writeInt32(status);
if (status == NO_ERROR) {
reply->write(&handle, sizeof(audio_patch_handle_t));
@@ -2154,9 +2275,12 @@
case SET_AUDIO_PORT_CONFIG: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_port_config config = {};
- data.read(&config, sizeof(struct audio_port_config));
- (void)sanitizeAudioPortConfig(&config);
- status_t status = setAudioPortConfig(&config);
+ status_t status = data.read(&config, sizeof(struct audio_port_config));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ (void)AudioSanitizer::sanitizeAudioPortConfig(&config);
+ status = setAudioPortConfig(&config);
reply->writeInt32(status);
return NO_ERROR;
}
@@ -2232,13 +2356,25 @@
case START_AUDIO_SOURCE: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
struct audio_port_config source = {};
- data.read(&source, sizeof(struct audio_port_config));
- (void)sanitizeAudioPortConfig(&source);
+ status_t status = data.read(&source, sizeof(struct audio_port_config));
+ if (status != NO_ERROR) {
+ return status;
+ }
audio_attributes_t attributes = {};
- data.read(&attributes, sizeof(audio_attributes_t));
- sanetizeAudioAttributes(&attributes);
+ status = data.read(&attributes, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = AudioSanitizer::sanitizeAudioPortConfig(&source);
+ if (status == NO_ERROR) {
+ // OK to not always sanitize attributes as startAudioSource() is not called if
+ // the port config is invalid.
+ status = AudioSanitizer::sanitizeAudioAttributes(&attributes, "68953950");
+ }
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
- status_t status = startAudioSource(&source, &attributes, &portId);
+ if (status == NO_ERROR) {
+ status = startAudioSource(&source, &attributes, &portId);
+ }
reply->writeInt32(status);
reply->writeInt32(portId);
return NO_ERROR;
@@ -2460,15 +2596,12 @@
case SET_UID_DEVICE_AFFINITY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
const uid_t uid = (uid_t) data.readInt32();
- Vector<AudioDeviceTypeAddr> devices;
- size_t size = (size_t)data.readInt32();
- for (size_t i = 0; i < size; i++) {
- AudioDeviceTypeAddr device;
- if (device.readFromParcel((Parcel*)&data) == NO_ERROR) {
- devices.add(device);
- }
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
}
- status_t status = setUidDeviceAffinities(uid, devices);
+ status = setUidDeviceAffinities(uid, devices);
reply->writeInt32(status);
return NO_ERROR;
}
@@ -2484,15 +2617,12 @@
case SET_USERID_DEVICE_AFFINITY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
const int userId = (int) data.readInt32();
- Vector<AudioDeviceTypeAddr> devices;
- size_t size = (size_t)data.readInt32();
- for (size_t i = 0; i < size; i++) {
- AudioDeviceTypeAddr device;
- if (device.readFromParcel((Parcel*)&data) == NO_ERROR) {
- devices.add(device);
- }
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
}
- status_t status = setUserIdDeviceAffinities(userId, devices);
+ status = setUserIdDeviceAffinities(userId, devices);
reply->writeInt32(status);
return NO_ERROR;
}
@@ -2628,7 +2758,7 @@
case SET_ALLOWED_CAPTURE_POLICY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
uid_t uid = data.readInt32();
- audio_flags_mask_t flags = data.readInt32();
+ audio_flags_mask_t flags = static_cast<audio_flags_mask_t>(data.readInt32());
status_t status = setAllowedCapturePolicy(uid, flags);
reply->writeInt32(status);
return NO_ERROR;
@@ -2649,33 +2779,36 @@
return NO_ERROR;
}
- case SET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY: {
+ case SET_DEVICES_ROLE_FOR_PRODUCT_STRATEGY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
product_strategy_t strategy = (product_strategy_t) data.readUint32();
- AudioDeviceTypeAddr device;
- status_t status = device.readFromParcel((Parcel*)&data);
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
if (status != NO_ERROR) {
return status;
}
- status = setPreferredDeviceForStrategy(strategy, device);
+ status = setDevicesRoleForStrategy(strategy, role, devices);
reply->writeInt32(status);
return NO_ERROR;
}
- case REMOVE_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY: {
+ case REMOVE_DEVICES_ROLE_FOR_PRODUCT_STRATEGY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
product_strategy_t strategy = (product_strategy_t) data.readUint32();
- status_t status = removePreferredDeviceForStrategy(strategy);
+ device_role_t role = (device_role_t) data.readUint32();
+ status_t status = removeDevicesRoleForStrategy(strategy, role);
reply->writeInt32(status);
return NO_ERROR;
}
- case GET_PREFERRED_DEVICE_FOR_PRODUCT_STRATEGY: {
+ case GET_DEVICES_FOR_ROLE_AND_PRODUCT_STRATEGY: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
product_strategy_t strategy = (product_strategy_t) data.readUint32();
- AudioDeviceTypeAddr device;
- status_t status = getPreferredDeviceForStrategy(strategy, device);
- status_t marshall_status = device.writeToParcel(reply);
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = getDevicesForRoleAndStrategy(strategy, role, devices);
+ status_t marshall_status = reply->writeParcelableVector(devices);
if (marshall_status != NO_ERROR) {
return marshall_status;
}
@@ -2757,49 +2890,76 @@
return NO_ERROR;
} break;
+ case SET_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = setDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case ADD_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = addDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case REMOVE_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = data.readParcelableVector(&devices);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = removeDevicesRoleForCapturePreset(audioSource, role, devices);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case CLEAR_DEVICES_ROLE_FOR_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ status_t status = clearDevicesRoleForCapturePreset(audioSource, role);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
+ case GET_DEVICES_FOR_ROLE_AND_CAPTURE_PRESET: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_source_t audioSource = (audio_source_t) data.readUint32();
+ device_role_t role = (device_role_t) data.readUint32();
+ AudioDeviceTypeAddrVector devices;
+ status_t status = getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+ status_t marshall_status = reply->writeParcelableVector(devices);
+ if (marshall_status != NO_ERROR) {
+ return marshall_status;
+ }
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
}
-/** returns true if string overflow was prevented by zero termination */
-template <size_t size>
-static bool preventStringOverflow(char (&s)[size]) {
- if (strnlen(s, size) < size) return false;
- s[size - 1] = '\0';
- return true;
-}
-
-void BnAudioPolicyService::sanetizeAudioAttributes(audio_attributes_t* attr)
-{
- const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
- if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
- android_errorWriteLog(0x534e4554, "68953950"); // SafetyNet logging
- }
- attr->tags[tagsMaxSize - 1] = '\0';
-}
-
-/** returns BAD_VALUE if sanitization was required. */
-status_t BnAudioPolicyService::sanitizeEffectDescriptor(effect_descriptor_t* desc)
-{
- if (preventStringOverflow(desc->name)
- | /* always */ preventStringOverflow(desc->implementor)) {
- android_errorWriteLog(0x534e4554, "73126106"); // SafetyNet logging
- return BAD_VALUE;
- }
- return NO_ERROR;
-}
-
-/** returns BAD_VALUE if sanitization was required. */
-status_t BnAudioPolicyService::sanitizeAudioPortConfig(struct audio_port_config* config)
-{
- if (config->type == AUDIO_PORT_TYPE_DEVICE &&
- preventStringOverflow(config->ext.device.address)) {
- return BAD_VALUE;
- }
- return NO_ERROR;
-}
-
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioclient/IEffect.cpp b/media/libaudioclient/IEffect.cpp
deleted file mode 100644
index 5d47dff..0000000
--- a/media/libaudioclient/IEffect.cpp
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
-**
-** Copyright 2010, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IEffect"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <media/IEffect.h>
-
-namespace android {
-
-// Maximum command/reply size expected
-#define EFFECT_PARAM_SIZE_MAX 65536
-
-enum {
- ENABLE = IBinder::FIRST_CALL_TRANSACTION,
- DISABLE,
- COMMAND,
- DISCONNECT,
- GET_CBLK
-};
-
-class BpEffect: public BpInterface<IEffect>
-{
-public:
- explicit BpEffect(const sp<IBinder>& impl)
- : BpInterface<IEffect>(impl)
- {
- }
-
- status_t enable()
- {
- ALOGV("enable");
- Parcel data, reply;
- data.writeInterfaceToken(IEffect::getInterfaceDescriptor());
- remote()->transact(ENABLE, data, &reply);
- return reply.readInt32();
- }
-
- status_t disable()
- {
- ALOGV("disable");
- Parcel data, reply;
- data.writeInterfaceToken(IEffect::getInterfaceDescriptor());
- remote()->transact(DISABLE, data, &reply);
- return reply.readInt32();
- }
-
- status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *pReplySize,
- void *pReplyData)
- {
- ALOGV("command");
- Parcel data, reply;
- data.writeInterfaceToken(IEffect::getInterfaceDescriptor());
- data.writeInt32(cmdCode);
- int size = cmdSize;
- if (pCmdData == NULL) {
- size = 0;
- }
- data.writeInt32(size);
- if (size) {
- data.write(pCmdData, size);
- }
- if (pReplySize == NULL) {
- size = 0;
- } else {
- size = *pReplySize;
- }
- data.writeInt32(size);
-
- status_t status = remote()->transact(COMMAND, data, &reply);
- if (status == NO_ERROR) {
- status = reply.readInt32();
- }
- if (status != NO_ERROR) {
- if (pReplySize != NULL)
- *pReplySize = 0;
- return status;
- }
-
- size = reply.readInt32();
- if (size != 0 && pReplyData != NULL && pReplySize != NULL) {
- reply.read(pReplyData, size);
- *pReplySize = size;
- }
- return status;
- }
-
- void disconnect()
- {
- ALOGV("disconnect");
- Parcel data, reply;
- data.writeInterfaceToken(IEffect::getInterfaceDescriptor());
- remote()->transact(DISCONNECT, data, &reply);
- return;
- }
-
- virtual sp<IMemory> getCblk() const
- {
- Parcel data, reply;
- sp<IMemory> cblk;
- data.writeInterfaceToken(IEffect::getInterfaceDescriptor());
- status_t status = remote()->transact(GET_CBLK, data, &reply);
- if (status == NO_ERROR) {
- cblk = interface_cast<IMemory>(reply.readStrongBinder());
- if (cblk != 0 && cblk->unsecurePointer() == NULL) {
- cblk.clear();
- }
- }
- return cblk;
- }
- };
-
-IMPLEMENT_META_INTERFACE(Effect, "android.media.IEffect");
-
-// ----------------------------------------------------------------------
-
-status_t BnEffect::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch (code) {
- case ENABLE: {
- ALOGV("ENABLE");
- CHECK_INTERFACE(IEffect, data, reply);
- reply->writeInt32(enable());
- return NO_ERROR;
- } break;
-
- case DISABLE: {
- ALOGV("DISABLE");
- CHECK_INTERFACE(IEffect, data, reply);
- reply->writeInt32(disable());
- return NO_ERROR;
- } break;
-
- case COMMAND: {
- ALOGV("COMMAND");
- CHECK_INTERFACE(IEffect, data, reply);
- uint32_t cmdCode = data.readInt32();
- uint32_t cmdSize = data.readInt32();
- char *cmd = NULL;
- if (cmdSize) {
- if (cmdSize > EFFECT_PARAM_SIZE_MAX) {
- reply->writeInt32(NO_MEMORY);
- return NO_ERROR;
- }
- cmd = (char *)calloc(cmdSize, 1);
- if (cmd == NULL) {
- reply->writeInt32(NO_MEMORY);
- return NO_ERROR;
- }
- data.read(cmd, cmdSize);
- }
- uint32_t replySize = data.readInt32();
- uint32_t replySz = replySize;
- char *resp = NULL;
- if (replySize) {
- if (replySize > EFFECT_PARAM_SIZE_MAX) {
- free(cmd);
- reply->writeInt32(NO_MEMORY);
- return NO_ERROR;
- }
- resp = (char *)calloc(replySize, 1);
- if (resp == NULL) {
- free(cmd);
- reply->writeInt32(NO_MEMORY);
- return NO_ERROR;
- }
- }
- status_t status = command(cmdCode, cmdSize, cmd, &replySz, resp);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- if (replySz < replySize) {
- replySize = replySz;
- }
- reply->writeInt32(replySize);
- if (replySize) {
- reply->write(resp, replySize);
- }
- }
- if (cmd) {
- free(cmd);
- }
- if (resp) {
- free(resp);
- }
- return NO_ERROR;
- } break;
-
- case DISCONNECT: {
- ALOGV("DISCONNECT");
- CHECK_INTERFACE(IEffect, data, reply);
- disconnect();
- return NO_ERROR;
- } break;
-
- case GET_CBLK: {
- CHECK_INTERFACE(IEffect, data, reply);
- reply->writeStrongBinder(IInterface::asBinder(getCblk()));
- return NO_ERROR;
- } break;
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libaudioclient/IEffectClient.cpp b/media/libaudioclient/IEffectClient.cpp
deleted file mode 100644
index 3f2c67d..0000000
--- a/media/libaudioclient/IEffectClient.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-**
-** Copyright 2010, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IEffectClient"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <media/IEffectClient.h>
-
-namespace android {
-
-enum {
- CONTROL_STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
- ENABLE_STATUS_CHANGED,
- COMMAND_EXECUTED
-};
-
-class BpEffectClient: public BpInterface<IEffectClient>
-{
-public:
- explicit BpEffectClient(const sp<IBinder>& impl)
- : BpInterface<IEffectClient>(impl)
- {
- }
-
- void controlStatusChanged(bool controlGranted)
- {
- ALOGV("controlStatusChanged");
- Parcel data, reply;
- data.writeInterfaceToken(IEffectClient::getInterfaceDescriptor());
- data.writeInt32((uint32_t)controlGranted);
- remote()->transact(CONTROL_STATUS_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
- void enableStatusChanged(bool enabled)
- {
- ALOGV("enableStatusChanged");
- Parcel data, reply;
- data.writeInterfaceToken(IEffectClient::getInterfaceDescriptor());
- data.writeInt32((uint32_t)enabled);
- remote()->transact(ENABLE_STATUS_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
- void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData)
- {
- ALOGV("commandExecuted");
- Parcel data, reply;
- data.writeInterfaceToken(IEffectClient::getInterfaceDescriptor());
- data.writeInt32(cmdCode);
- int size = cmdSize;
- if (pCmdData == NULL) {
- size = 0;
- }
- data.writeInt32(size);
- if (size) {
- data.write(pCmdData, size);
- }
- size = replySize;
- if (pReplyData == NULL) {
- size = 0;
- }
- data.writeInt32(size);
- if (size) {
- data.write(pReplyData, size);
- }
- remote()->transact(COMMAND_EXECUTED, data, &reply, IBinder::FLAG_ONEWAY);
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(EffectClient, "android.media.IEffectClient");
-
-// ----------------------------------------------------------------------
-
-status_t BnEffectClient::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch (code) {
- case CONTROL_STATUS_CHANGED: {
- ALOGV("CONTROL_STATUS_CHANGED");
- CHECK_INTERFACE(IEffectClient, data, reply);
- bool hasControl = (bool)data.readInt32();
- controlStatusChanged(hasControl);
- return NO_ERROR;
- } break;
- case ENABLE_STATUS_CHANGED: {
- ALOGV("ENABLE_STATUS_CHANGED");
- CHECK_INTERFACE(IEffectClient, data, reply);
- bool enabled = (bool)data.readInt32();
- enableStatusChanged(enabled);
- return NO_ERROR;
- } break;
- case COMMAND_EXECUTED: {
- ALOGV("COMMAND_EXECUTED");
- CHECK_INTERFACE(IEffectClient, data, reply);
- uint32_t cmdCode = data.readInt32();
- uint32_t cmdSize = data.readInt32();
- char *cmd = NULL;
- if (cmdSize) {
- cmd = (char *)malloc(cmdSize);
- data.read(cmd, cmdSize);
- }
- uint32_t replySize = data.readInt32();
- char *resp = NULL;
- if (replySize) {
- resp = (char *)malloc(replySize);
- data.read(resp, replySize);
- }
- commandExecuted(cmdCode, cmdSize, cmd, replySize, resp);
- if (cmd) {
- free(cmd);
- }
- if (resp) {
- free(resp);
- }
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-} // namespace android
diff --git a/media/libaudioclient/aidl/android/media/IEffect.aidl b/media/libaudioclient/aidl/android/media/IEffect.aidl
new file mode 100644
index 0000000..9548e46
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IEffect.aidl
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SharedFileRegion;
+
+/**
+ * The IEffect interface enables control of the effect module activity and parameters.
+ *
+ * @hide
+ */
+interface IEffect {
+ /**
+ * Activates the effect module by connecting it to the audio path.
+ * @return a status_t code.
+ */
+ int enable();
+
+ /**
+ * Deactivates the effect module by disconnecting it from the audio path.
+ * @return a status_t code.
+ */
+ int disable();
+
+ /**
+ * Sends control, reads or writes parameters. Same behavior as the command() method in the
+ * effect control interface.
+ * Refer to system/audio_effect.h for a description of the valid command codes and their
+ * associated parameter and return messages. The cmdData and response parameters are expected to
+ * contain the respective types in a standard C memory layout.
+ *
+ * TODO(ytai): replace opaque byte arrays with strongly typed parameters.
+ */
+ int command(int cmdCode, in byte[] cmdData, int maxResponseSize, out byte[] response);
+
+ /**
+ * Disconnects the IEffect interface from the effect module.
+ * This will also delete the effect module and release the effect engine in the library if this
+ * is the last client disconnected. To release control of the effect module, the application can
+ * disconnect or delete the IEffect interface.
+ */
+ void disconnect();
+
+ /**
+ * returns a pointer to a shared memory area used to pass multiple parameters to the effect
+ * module without multiplying the binder calls.
+ *
+ * TODO(ytai): Explain how this should be used exactly.
+ */
+ SharedFileRegion getCblk();
+}
diff --git a/media/libaudioclient/aidl/android/media/IEffectClient.aidl b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
new file mode 100644
index 0000000..d1e331c
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * A callback interface for getting effect-related notifications.
+ *
+ * @hide
+ */
+interface IEffectClient {
+ /**
+ * Called whenever the status of granting control over the effect to the application
+ * has changed.
+ * @param controlGranted true iff the application has the control of the effect module.
+ */
+ oneway void controlStatusChanged(boolean controlGranted);
+
+ /**
+ * Called whenever the effect has been enabled or disabled. Received only if the client is not
+ * currently controlling the effect.
+ * @param enabled true if the effect module has been activated, false if deactivated.
+ */
+ oneway void enableStatusChanged(boolean enabled);
+
+ /**
+ * A command has been send to the effect engine. Received only if the client is not currently
+ * controlling the effect. See IEffect.command() for a description of buffer contents.
+ *
+ * TODO(ytai): replace opaque byte arrays with strongly typed parameters.
+ */
+ oneway void commandExecuted(int cmdCode, in byte[] cmdData, in byte[] replyData);
+}
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 3d4bb4e..8371711 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -22,8 +22,6 @@
#include <media/IAudioFlinger.h>
#include <media/IAudioPolicyService.h>
-#include <media/IEffect.h>
-#include <media/IEffectClient.h>
#include <media/AudioSystem.h>
#include <system/audio_effect.h>
@@ -31,6 +29,9 @@
#include <utils/Errors.h>
#include <binder/IInterface.h>
+#include "android/media/IEffect.h"
+#include "android/media/BnEffectClient.h"
+
namespace android {
@@ -549,45 +550,43 @@
// IEffectClient
virtual void controlStatusChanged(bool controlGranted);
virtual void enableStatusChanged(bool enabled);
- virtual void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData);
+ virtual void commandExecuted(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ const std::vector<uint8_t>& replyData);
private:
// Implements the IEffectClient interface
class EffectClient :
- public android::BnEffectClient, public android::IBinder::DeathRecipient
+ public media::BnEffectClient, public android::IBinder::DeathRecipient
{
public:
EffectClient(AudioEffect *effect) : mEffect(effect){}
// IEffectClient
- virtual void controlStatusChanged(bool controlGranted) {
+ binder::Status controlStatusChanged(bool controlGranted) override {
sp<AudioEffect> effect = mEffect.promote();
if (effect != 0) {
effect->controlStatusChanged(controlGranted);
}
+ return binder::Status::ok();
}
- virtual void enableStatusChanged(bool enabled) {
+ binder::Status enableStatusChanged(bool enabled) override {
sp<AudioEffect> effect = mEffect.promote();
if (effect != 0) {
effect->enableStatusChanged(enabled);
}
+ return binder::Status::ok();
}
- virtual void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData) {
+ binder::Status commandExecuted(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ const std::vector<uint8_t>& replyData) override {
sp<AudioEffect> effect = mEffect.promote();
if (effect != 0) {
- effect->commandExecuted(
- cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ effect->commandExecuted(cmdCode, cmdData, replyData);
}
+ return binder::Status::ok();
}
// IBinder::DeathRecipient
@@ -604,7 +603,7 @@
void binderDied();
- sp<IEffect> mIEffect; // IEffect binder interface
+ sp<media::IEffect> mIEffect; // IEffect binder interface
sp<EffectClient> mIEffectClient; // IEffectClient implementation
sp<IMemory> mCblkMemory; // shared memory for deferred parameter setting
effect_param_cblk_t* mCblk = nullptr; // control block for deferred parameter setting
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 19c2cbd..848743a 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -361,11 +361,11 @@
static status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
- static status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+ static status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices);
static status_t removeUidDeviceAffinities(uid_t uid);
- static status_t setUserIdDeviceAffinities(int userId, const Vector<AudioDeviceTypeAddr>& devices);
+ static status_t setUserIdDeviceAffinities(int userId, const AudioDeviceTypeAddrVector& devices);
static status_t removeUserIdDeviceAffinities(int userId);
@@ -425,13 +425,29 @@
*/
static status_t setAudioHalPids(const std::vector<pid_t>& pids);
- static status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device);
+ static status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices);
- static status_t removePreferredDeviceForStrategy(product_strategy_t strategy);
+ static status_t removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role);
- static status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device);
+ static status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role, AudioDeviceTypeAddrVector &devices);
+
+ static status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices);
+
+ static status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector &devices);
+
+ static status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ static status_t clearDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role);
+
+ static status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices);
static status_t getDeviceForStrategy(product_strategy_t strategy,
AudioDeviceTypeAddr &device);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index a9946da..de183d8 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -344,6 +344,27 @@
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ // FIXME(b/169889714): Vendor code depends on the old method signature at link time
+ status_t set(audio_stream_type_t streamType,
+ uint32_t sampleRate,
+ audio_format_t format,
+ uint32_t channelMask,
+ size_t frameCount = 0,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ callback_t cbf = NULL,
+ void* user = NULL,
+ int32_t notificationFrames = 0,
+ const sp<IMemory>& sharedBuffer = 0,
+ bool threadCanCallJava = false,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ transfer_type transferType = TRANSFER_DEFAULT,
+ const audio_offload_info_t *offloadInfo = NULL,
+ uid_t uid = AUDIO_UID_INVALID,
+ pid_t pid = -1,
+ const audio_attributes_t* pAttributes = NULL,
+ bool doNotReconnect = false,
+ float maxRequiredSpeed = 1.0f,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
/* Result of constructing the AudioTrack. This must be checked for successful initialization
* before using any AudioTrack API (except for set()), because using
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index bcc11f4..a01b681 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -33,8 +33,6 @@
#include <system/audio.h>
#include <system/audio_effect.h>
#include <system/audio_policy.h>
-#include <media/IEffect.h>
-#include <media/IEffectClient.h>
#include <utils/String8.h>
#include <media/MicrophoneInfo.h>
#include <string>
@@ -42,6 +40,8 @@
#include "android/media/IAudioRecord.h"
#include "android/media/IAudioTrackCallback.h"
+#include "android/media/IEffect.h"
+#include "android/media/IEffectClient.h"
namespace android {
@@ -471,9 +471,9 @@
uint32_t preferredTypeFlag,
effect_descriptor_t *pDescriptor) const = 0;
- virtual sp<IEffect> createEffect(
+ virtual sp<media::IEffect> createEffect(
effect_descriptor_t *pDesc,
- const sp<IEffectClient>& client,
+ const sp<media::IEffectClient>& client,
int32_t priority,
// AudioFlinger doesn't take over handle reference from client
audio_io_handle_t output,
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index bb1c07f..837375d 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -196,13 +196,13 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration) = 0;
- virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ virtual status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices)
= 0;
virtual status_t removeUidDeviceAffinities(uid_t uid) = 0;
virtual status_t setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices) = 0;
+ const AudioDeviceTypeAddrVector& devices) = 0;
virtual status_t removeUserIdDeviceAffinities(int userId) = 0;
@@ -241,13 +241,35 @@
virtual bool isCallScreenModeSupported() = 0;
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device) = 0;
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy) = 0;
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role) = 0;
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) = 0;
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices) = 0;
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) = 0;
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
// The return code here is only intended to represent transport errors. The
// actual server implementation should always return NO_ERROR.
@@ -266,10 +288,6 @@
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
-private:
- void sanetizeAudioAttributes(audio_attributes_t* attr);
- status_t sanitizeEffectDescriptor(effect_descriptor_t* desc);
- status_t sanitizeAudioPortConfig(struct audio_port_config* config);
};
// ----------------------------------------------------------------------------
diff --git a/media/libaudioclient/include/media/IEffect.h b/media/libaudioclient/include/media/IEffect.h
deleted file mode 100644
index ff04869..0000000
--- a/media/libaudioclient/include/media/IEffect.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IEFFECT_H
-#define ANDROID_IEFFECT_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-
-namespace android {
-
-class IEffect: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(Effect);
-
- virtual status_t enable() = 0;
-
- virtual status_t disable() = 0;
-
- virtual status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *pReplySize,
- void *pReplyData) = 0;
-
- virtual void disconnect() = 0;
-
- virtual sp<IMemory> getCblk() const = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnEffect: public BnInterface<IEffect>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IEFFECT_H
diff --git a/media/libaudioclient/include/media/IEffectClient.h b/media/libaudioclient/include/media/IEffectClient.h
deleted file mode 100644
index 2f78c98..0000000
--- a/media/libaudioclient/include/media/IEffectClient.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IEFFECTCLIENT_H
-#define ANDROID_IEFFECTCLIENT_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-
-namespace android {
-
-class IEffectClient: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(EffectClient);
-
- virtual void controlStatusChanged(bool controlGranted) = 0;
- virtual void enableStatusChanged(bool enabled) = 0;
- virtual void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnEffectClient: public BnInterface<IEffectClient>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IEFFECTCLIENT_H
diff --git a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
index b44043a..a47337b 100644
--- a/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
+++ b/media/libaudiofoundation/AudioDeviceTypeAddr.cpp
@@ -16,12 +16,57 @@
#include <media/AudioDeviceTypeAddr.h>
+#include <arpa/inet.h>
+#include <iostream>
+#include <regex>
+#include <set>
+#include <sstream>
+
namespace android {
+namespace {
+
+static const std::string SUPPRESSED = "SUPPRESSED";
+static const std::regex MAC_ADDRESS_REGEX("([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}");
+
+bool isSenstiveAddress(const std::string &address) {
+ if (std::regex_match(address, MAC_ADDRESS_REGEX)) {
+ return true;
+ }
+
+ sockaddr_storage ss4;
+ if (inet_pton(AF_INET, address.c_str(), &ss4) > 0) {
+ return true;
+ }
+
+ sockaddr_storage ss6;
+ if (inet_pton(AF_INET6, address.c_str(), &ss6) > 0) {
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace
+
+AudioDeviceTypeAddr::AudioDeviceTypeAddr(audio_devices_t type, const std::string &address) :
+ mType(type), mAddress(address) {
+ mIsAddressSensitive = isSenstiveAddress(mAddress);
+}
+
const char* AudioDeviceTypeAddr::getAddress() const {
return mAddress.c_str();
}
+const std::string& AudioDeviceTypeAddr::address() const {
+ return mAddress;
+}
+
+void AudioDeviceTypeAddr::setAddress(const std::string& address) {
+ mAddress = address;
+ mIsAddressSensitive = isSenstiveAddress(mAddress);
+}
+
bool AudioDeviceTypeAddr::equals(const AudioDeviceTypeAddr& other) const {
return mType == other.mType && mAddress == other.mAddress;
}
@@ -36,14 +81,34 @@
return false;
}
+bool AudioDeviceTypeAddr::operator==(const AudioDeviceTypeAddr &rhs) const {
+ return equals(rhs);
+}
+
+bool AudioDeviceTypeAddr::operator!=(const AudioDeviceTypeAddr &rhs) const {
+ return !operator==(rhs);
+}
+
void AudioDeviceTypeAddr::reset() {
mType = AUDIO_DEVICE_NONE;
- mAddress = "";
+ setAddress("");
+}
+
+std::string AudioDeviceTypeAddr::toString(bool includeSensitiveInfo) const {
+ std::stringstream sstream;
+ sstream << "type:0x" << std::hex << mType;
+ // IP and MAC address are sensitive information. The sensitive information will be suppressed
+ // is `includeSensitiveInfo` is false.
+ sstream << ",@:"
+ << (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED : mAddress);
+ return sstream.str();
}
status_t AudioDeviceTypeAddr::readFromParcel(const Parcel *parcel) {
status_t status;
- if ((status = parcel->readUint32(&mType)) != NO_ERROR) return status;
+ uint32_t rawDeviceType;
+ if ((status = parcel->readUint32(&rawDeviceType)) != NO_ERROR) return status;
+ mType = static_cast<audio_devices_t>(rawDeviceType);
status = parcel->readUtf8FromUtf16(&mAddress);
return status;
}
@@ -64,4 +129,30 @@
return deviceTypes;
}
-}
\ No newline at end of file
+AudioDeviceTypeAddrVector excludeDeviceTypeAddrsFrom(
+ const AudioDeviceTypeAddrVector& devices,
+ const AudioDeviceTypeAddrVector& devicesToExclude) {
+ std::set<AudioDeviceTypeAddr> devicesToExcludeSet(
+ devicesToExclude.begin(), devicesToExclude.end());
+ AudioDeviceTypeAddrVector remainedDevices;
+ for (const auto& device : devices) {
+ if (devicesToExcludeSet.count(device) == 0) {
+ remainedDevices.push_back(device);
+ }
+ }
+ return remainedDevices;
+}
+
+std::string dumpAudioDeviceTypeAddrVector(const AudioDeviceTypeAddrVector& deviceTypeAddrs,
+ bool includeSensitiveInfo) {
+ std::stringstream stream;
+ for (auto it = deviceTypeAddrs.begin(); it != deviceTypeAddrs.end(); ++it) {
+ if (it != deviceTypeAddrs.begin()) {
+ stream << " ";
+ }
+ stream << it->toString(includeSensitiveInfo);
+ }
+ return stream.str();
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 0d28335..759140e 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -152,8 +152,12 @@
if ((status = parcel->readInt32(&mIndex)) != NO_ERROR) return status;
if ((status = parcel->readBool(&mUseInChannelMask)) != NO_ERROR) return status;
if ((status = parcel->readBool(&mUseForVolume)) != NO_ERROR) return status;
- if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
- if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+ uint32_t rawGainMode;
+ if ((status = parcel->readUint32(&rawGainMode)) != NO_ERROR) return status;
+ mGain.mode = static_cast<audio_gain_mode_t>(rawGainMode);
+ uint32_t rawChannelMask;
+ if ((status = parcel->readUint32(&rawChannelMask)) != NO_ERROR) return status;
+ mGain.channel_mask = static_cast<audio_channel_mask_t>(rawChannelMask);
if ((status = parcel->readInt32(&mGain.min_value)) != NO_ERROR) return status;
if ((status = parcel->readInt32(&mGain.max_value)) != NO_ERROR) return status;
if ((status = parcel->readInt32(&mGain.default_value)) != NO_ERROR) return status;
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
index f988690..1846a6b 100644
--- a/media/libaudiofoundation/AudioPort.cpp
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -268,12 +268,17 @@
if ((status = parcel->readUint32(reinterpret_cast<uint32_t*>(&mFormat))) != NO_ERROR) {
return status;
}
- if ((status = parcel->readUint32(&mChannelMask)) != NO_ERROR) return status;
+ uint32_t rawChannelMask;
+ if ((status = parcel->readUint32(&rawChannelMask)) != NO_ERROR) return status;
+ mChannelMask = static_cast<audio_channel_mask_t>(rawChannelMask);
if ((status = parcel->readInt32(&mId)) != NO_ERROR) return status;
// Read mGain from parcel.
if ((status = parcel->readInt32(&mGain.index)) != NO_ERROR) return status;
- if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
- if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+ uint32_t rawGainMode;
+ if ((status = parcel->readUint32(&rawGainMode)) != NO_ERROR) return status;
+ mGain.mode = static_cast<audio_gain_mode_t>(rawGainMode);
+ if ((status = parcel->readUint32(&rawChannelMask)) != NO_ERROR) return status;
+ mGain.channel_mask = static_cast<audio_channel_mask_t>(rawChannelMask);
if ((status = parcel->readUint32(&mGain.ramp_duration_ms)) != NO_ERROR) return status;
std::vector<int> values;
if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
index 91be346..67b600e 100644
--- a/media/libaudiofoundation/AudioProfile.cpp
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -157,7 +157,9 @@
std::vector<int> values;
if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
mChannelMasks.clear();
- mChannelMasks.insert(values.begin(), values.end());
+ for (auto raw : values) {
+ mChannelMasks.insert(static_cast<audio_channel_mask_t>(raw));
+ }
values.clear();
if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
mSamplingRates.clear();
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index e9b589d..16cf71a 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -22,9 +22,6 @@
#include <media/DeviceDescriptorBase.h>
#include <media/TypeConverter.h>
-#include <arpa/inet.h>
-#include <regex>
-
namespace android {
DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type) :
@@ -37,46 +34,19 @@
{
}
-namespace {
-
-static const std::string SUPPRESSED = "SUPPRESSED";
-static const std::regex MAC_ADDRESS_REGEX("([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}");
-
-bool isAddressSensitive(const std::string &address) {
- if (std::regex_match(address, MAC_ADDRESS_REGEX)) {
- return true;
- }
-
- sockaddr_storage ss4;
- if (inet_pton(AF_INET, address.c_str(), &ss4) > 0) {
- return true;
- }
-
- sockaddr_storage ss6;
- if (inet_pton(AF_INET6, address.c_str(), &ss6) > 0) {
- return true;
- }
-
- return false;
-}
-
-} // namespace
-
DeviceDescriptorBase::DeviceDescriptorBase(const AudioDeviceTypeAddr &deviceTypeAddr) :
AudioPort("", AUDIO_PORT_TYPE_DEVICE,
audio_is_output_device(deviceTypeAddr.mType) ? AUDIO_PORT_ROLE_SINK :
AUDIO_PORT_ROLE_SOURCE),
mDeviceTypeAddr(deviceTypeAddr)
{
- if (mDeviceTypeAddr.mAddress.empty() && audio_is_remote_submix_device(mDeviceTypeAddr.mType)) {
- mDeviceTypeAddr.mAddress = "0";
+ if (mDeviceTypeAddr.address().empty() && audio_is_remote_submix_device(mDeviceTypeAddr.mType)) {
+ mDeviceTypeAddr.setAddress("0");
}
- mIsAddressSensitive = isAddressSensitive(mDeviceTypeAddr.mAddress);
}
void DeviceDescriptorBase::setAddress(const std::string &address) {
- mDeviceTypeAddr.mAddress = address;
- mIsAddressSensitive = isAddressSensitive(address);
+ mDeviceTypeAddr.setAddress(address);
}
void DeviceDescriptorBase::toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -157,7 +127,7 @@
"%*s- supported encapsulation metadata types: %u",
spaces, "", mEncapsulationMetadataTypes));
- if (mDeviceTypeAddr.mAddress.size() != 0) {
+ if (mDeviceTypeAddr.address().size() != 0) {
dst->append(base::StringPrintf(
"%*s- address: %-32s\n", spaces, "", mDeviceTypeAddr.getAddress()));
}
@@ -166,14 +136,7 @@
std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
{
- std::stringstream sstream;
- sstream << "type:0x" << std::hex << type();
- // IP and MAC address are sensitive information. The sensitive information will be suppressed
- // is `includeSensitiveInfo` is false.
- sstream << ",@:"
- << (!includeSensitiveInfo && mIsAddressSensitive ? SUPPRESSED
- : mDeviceTypeAddr.mAddress);
- return sstream.str();
+ return mDeviceTypeAddr.toString(includeSensitiveInfo);
}
void DeviceDescriptorBase::log() const
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 72fda49..aa7ca69 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -96,7 +96,7 @@
static inline audio_devices_t deviceTypesToBitMask(const DeviceTypeSet& deviceTypes) {
audio_devices_t types = AUDIO_DEVICE_NONE;
for (auto deviceType : deviceTypes) {
- types |= deviceType;
+ types = static_cast<audio_devices_t>(types | deviceType);
}
return types;
}
@@ -131,4 +131,4 @@
std::string toString(const DeviceTypeSet& deviceTypes);
-} // namespace android
\ No newline at end of file
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
index 60ea78e..7497faf 100644
--- a/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
+++ b/media/libaudiofoundation/include/media/AudioDeviceTypeAddr.h
@@ -27,28 +27,43 @@
namespace android {
-struct AudioDeviceTypeAddr : public Parcelable {
+class AudioDeviceTypeAddr : public Parcelable {
+public:
AudioDeviceTypeAddr() = default;
- AudioDeviceTypeAddr(audio_devices_t type, const std::string& address) :
- mType(type), mAddress(address) {}
+ AudioDeviceTypeAddr(audio_devices_t type, const std::string& address);
const char* getAddress() const;
+ const std::string& address() const;
+
+ void setAddress(const std::string& address);
+
+ bool isAddressSensitive();
+
bool equals(const AudioDeviceTypeAddr& other) const;
AudioDeviceTypeAddr& operator= (const AudioDeviceTypeAddr&) = default;
bool operator<(const AudioDeviceTypeAddr& other) const;
+ bool operator==(const AudioDeviceTypeAddr& rhs) const;
+
+ bool operator!=(const AudioDeviceTypeAddr& rhs) const;
+
void reset();
+ std::string toString(bool includeSensitiveInfo=false) const;
+
status_t readFromParcel(const Parcel *parcel) override;
status_t writeToParcel(Parcel *parcel) const override;
audio_devices_t mType = AUDIO_DEVICE_NONE;
+
+private:
std::string mAddress;
+ bool mIsAddressSensitive;
};
using AudioDeviceTypeAddrVector = std::vector<AudioDeviceTypeAddr>;
@@ -58,4 +73,15 @@
*/
DeviceTypeSet getAudioDeviceTypes(const AudioDeviceTypeAddrVector& deviceTypeAddrs);
-}
+/**
+ * Return a collection of AudioDeviceTypeAddrs that are shown in `devices` but not
+ * in `devicesToExclude`
+ */
+AudioDeviceTypeAddrVector excludeDeviceTypeAddrsFrom(
+ const AudioDeviceTypeAddrVector& devices,
+ const AudioDeviceTypeAddrVector& devicesToExclude);
+
+std::string dumpAudioDeviceTypeAddrVector(const AudioDeviceTypeAddrVector& deviceTypeAddrs,
+ bool includeSensitiveInfo=false);
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
index c143c7e..0cbd1de 100644
--- a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -41,7 +41,7 @@
virtual ~DeviceDescriptorBase() {}
audio_devices_t type() const { return mDeviceTypeAddr.mType; }
- std::string address() const { return mDeviceTypeAddr.mAddress; }
+ const std::string& address() const { return mDeviceTypeAddr.address(); }
void setAddress(const std::string &address);
const AudioDeviceTypeAddr& getDeviceTypeAddr() const { return mDeviceTypeAddr; }
@@ -77,7 +77,6 @@
protected:
AudioDeviceTypeAddr mDeviceTypeAddr;
- bool mIsAddressSensitive;
uint32_t mEncapsulationModes = 0;
uint32_t mEncapsulationMetadataTypes = 0;
};
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 1709d1e..fab0fea 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -18,6 +18,7 @@
"libaudiohal@4.0",
"libaudiohal@5.0",
"libaudiohal@6.0",
+// "libaudiohal@7.0",
],
shared_libs: [
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index 5985ef0..7228b22 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -31,6 +31,7 @@
/** Supported HAL versions, in order of preference.
*/
const char* sAudioHALVersions[] = {
+ "7.0",
"6.0",
"5.0",
"4.0",
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 967fba1..df006b5 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -116,3 +116,20 @@
]
}
+cc_library_shared {
+ enabled: false,
+ name: "libaudiohal@7.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.0-util",
+ "android.hardware.audio.effect@7.0",
+ "android.hardware.audio@7.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=7",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
+
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index 9192a31..80e2b87 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -37,7 +37,7 @@
EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
: ConversionHelperHidl("EffectsFactory") {
- ALOG_ASSERT(effectsFactory != nullptr, "Provided IDevicesFactory service is NULL");
+ ALOG_ASSERT(effectsFactory != nullptr, "Provided IEffectsFactory service is NULL");
mEffectsFactory = effectsFactory;
}
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index dece1bb..5fa85e7 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -54,6 +54,8 @@
virtual status_t dumpEffects(int fd);
+ virtual float getHalVersion() { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+
status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
status_t mirrorBuffer(void* external, size_t size,
sp<EffectBufferHalInterface>* buffer) override;
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 3a76f9f..9fb56ae 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -46,6 +46,8 @@
virtual status_t dumpEffects(int fd) = 0;
+ virtual float getHalVersion() = 0;
+
static sp<EffectsFactoryHalInterface> create();
virtual status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) = 0;
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 1a31420..d85e2e9 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -79,10 +79,14 @@
&& mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
return false; // no need to change
}
- const audio_channel_mask_t hapticChannelMask = trackChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
- trackChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
- const audio_channel_mask_t mixerHapticChannelMask = mixerChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
- mixerChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+ const audio_channel_mask_t hapticChannelMask =
+ static_cast<audio_channel_mask_t>(trackChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+ trackChannelMask = static_cast<audio_channel_mask_t>(
+ trackChannelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
+ const audio_channel_mask_t mixerHapticChannelMask = static_cast<audio_channel_mask_t>(
+ mixerChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+ mixerChannelMask = static_cast<audio_channel_mask_t>(
+ mixerChannelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
// always recompute for both channel masks even if only one has changed.
const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
@@ -362,7 +366,8 @@
const audio_channel_mask_t trackChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
if (setChannelMasks(name, trackChannelMask,
- (track->mMixerChannelMask | track->mMixerHapticChannelMask))) {
+ static_cast<audio_channel_mask_t>(
+ track->mMixerChannelMask | track->mMixerHapticChannelMask))) {
ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
invalidate();
}
@@ -407,7 +412,8 @@
case MIXER_CHANNEL_MASK: {
const audio_channel_mask_t mixerChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, track->channelMask | track->mHapticChannelMask,
+ if (setChannelMasks(name, static_cast<audio_channel_mask_t>(
+ track->channelMask | track->mHapticChannelMask),
mixerChannelMask)) {
ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
invalidate();
@@ -423,7 +429,7 @@
}
} break;
case HAPTIC_INTENSITY: {
- const haptic_intensity_t hapticIntensity = static_cast<haptic_intensity_t>(valueInt);
+ const os::HapticScale hapticIntensity = static_cast<os::HapticScale>(valueInt);
if (track->mHapticIntensity != hapticIntensity) {
track->mHapticIntensity = hapticIntensity;
}
@@ -533,9 +539,10 @@
Track* t = static_cast<Track*>(track);
audio_channel_mask_t channelMask = t->channelMask;
- t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ t->mHapticChannelMask = static_cast<audio_channel_mask_t>(
+ channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
- channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+ channelMask = static_cast<audio_channel_mask_t>(channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
t->channelCount = audio_channel_count_from_out_mask(channelMask);
ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
"Non-stereo channel mask: %d\n", channelMask);
@@ -545,7 +552,7 @@
t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
// haptic
t->mHapticPlaybackEnabled = false;
- t->mHapticIntensity = HAPTIC_SCALE_NONE;
+ t->mHapticIntensity = os::HapticScale::NONE;
t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
t->mMixerHapticChannelCount = 0;
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
@@ -590,19 +597,12 @@
const std::shared_ptr<Track> &t = getTrack(name);
if (t->mHapticPlaybackEnabled) {
size_t sampleCount = mFrameCount * t->mMixerHapticChannelCount;
- float gamma = t->getHapticScaleGamma();
- float maxAmplitudeRatio = t->getHapticMaxAmplitudeRatio();
uint8_t* buffer = (uint8_t*)pair.first + mFrameCount * audio_bytes_per_frame(
t->mMixerChannelCount, t->mMixerFormat);
switch (t->mMixerFormat) {
// Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
case AUDIO_FORMAT_PCM_FLOAT: {
- float* fout = (float*) buffer;
- for (size_t i = 0; i < sampleCount; i++) {
- float mul = fout[i] >= 0 ? 1.0 : -1.0;
- fout[i] = powf(fabsf(fout[i] / HAPTIC_MAX_AMPLITUDE_FLOAT), gamma)
- * maxAmplitudeRatio * HAPTIC_MAX_AMPLITUDE_FLOAT * mul;
- }
+ os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity);
} break;
default:
LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 3f7cd48..70eafe3 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -22,10 +22,10 @@
#include <stdint.h>
#include <sys/types.h>
-#include <android/os/IExternalVibratorService.h>
#include <media/AudioMixerBase.h>
#include <media/BufferProviders.h>
#include <utils/threads.h>
+#include <vibrator/ExternalVibrationUtils.h>
// FIXME This is actually unity gain, which might not be max in future, expressed in U.12
#define MAX_GAIN_INT AudioMixerBase::UNITY_GAIN_INT
@@ -55,32 +55,6 @@
// parameter 'value' is a pointer to the new playback rate.
};
- typedef enum { // Haptic intensity, should keep consistent with VibratorService
- HAPTIC_SCALE_MUTE = os::IExternalVibratorService::SCALE_MUTE,
- HAPTIC_SCALE_VERY_LOW = os::IExternalVibratorService::SCALE_VERY_LOW,
- HAPTIC_SCALE_LOW = os::IExternalVibratorService::SCALE_LOW,
- HAPTIC_SCALE_NONE = os::IExternalVibratorService::SCALE_NONE,
- HAPTIC_SCALE_HIGH = os::IExternalVibratorService::SCALE_HIGH,
- HAPTIC_SCALE_VERY_HIGH = os::IExternalVibratorService::SCALE_VERY_HIGH,
- } haptic_intensity_t;
- static constexpr float HAPTIC_SCALE_VERY_LOW_RATIO = 2.0f / 3.0f;
- static constexpr float HAPTIC_SCALE_LOW_RATIO = 3.0f / 4.0f;
- static const constexpr float HAPTIC_MAX_AMPLITUDE_FLOAT = 1.0f;
-
- static inline bool isValidHapticIntensity(haptic_intensity_t hapticIntensity) {
- switch (hapticIntensity) {
- case HAPTIC_SCALE_MUTE:
- case HAPTIC_SCALE_VERY_LOW:
- case HAPTIC_SCALE_LOW:
- case HAPTIC_SCALE_NONE:
- case HAPTIC_SCALE_HIGH:
- case HAPTIC_SCALE_VERY_HIGH:
- return true;
- default:
- return false;
- }
- }
-
AudioMixer(size_t frameCount, uint32_t sampleRate)
: AudioMixerBase(frameCount, sampleRate) {
pthread_once(&sOnceControl, &sInitRoutine);
@@ -170,7 +144,7 @@
// Haptic
bool mHapticPlaybackEnabled;
- haptic_intensity_t mHapticIntensity;
+ os::HapticScale mHapticIntensity;
audio_channel_mask_t mHapticChannelMask;
uint32_t mHapticChannelCount;
audio_channel_mask_t mMixerHapticChannelMask;
@@ -180,38 +154,6 @@
uint32_t mAdjustNonDestructiveInChannelCount;
uint32_t mAdjustNonDestructiveOutChannelCount;
bool mKeepContractedChannels;
-
- float getHapticScaleGamma() const {
- // Need to keep consistent with the value in VibratorService.
- switch (mHapticIntensity) {
- case HAPTIC_SCALE_VERY_LOW:
- return 2.0f;
- case HAPTIC_SCALE_LOW:
- return 1.5f;
- case HAPTIC_SCALE_HIGH:
- return 0.5f;
- case HAPTIC_SCALE_VERY_HIGH:
- return 0.25f;
- default:
- return 1.0f;
- }
- }
-
- float getHapticMaxAmplitudeRatio() const {
- // Need to keep consistent with the value in VibratorService.
- switch (mHapticIntensity) {
- case HAPTIC_SCALE_VERY_LOW:
- return HAPTIC_SCALE_VERY_LOW_RATIO;
- case HAPTIC_SCALE_LOW:
- return HAPTIC_SCALE_LOW_RATIO;
- case HAPTIC_SCALE_NONE:
- case HAPTIC_SCALE_HIGH:
- case HAPTIC_SCALE_VERY_HIGH:
- return 1.0f;
- default:
- return 0.0f;
- }
- }
};
inline std::shared_ptr<Track> getTrack(int name) {
diff --git a/media/libaudioprocessing/tests/test-mixer.cpp b/media/libaudioprocessing/tests/test-mixer.cpp
index bc9d2a6..1bbb863 100644
--- a/media/libaudioprocessing/tests/test-mixer.cpp
+++ b/media/libaudioprocessing/tests/test-mixer.cpp
@@ -241,7 +241,8 @@
// set up the tracks.
for (size_t i = 0; i < providers.size(); ++i) {
//printf("track %d out of %d\n", i, providers.size());
- uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
+ audio_channel_mask_t channelMask =
+ audio_channel_out_mask_from_count(providers[i].getNumChannels());
const int name = i;
const status_t status = mixer->create(
name, channelMask, formats[i], AUDIO_SESSION_OUTPUT_MIX);
diff --git a/media/libeffects/data/audio_effects.xml b/media/libeffects/data/audio_effects.xml
index 2e5f529..93a2181 100644
--- a/media/libeffects/data/audio_effects.xml
+++ b/media/libeffects/data/audio_effects.xml
@@ -21,6 +21,7 @@
<library name="downmix" path="libdownmix.so"/>
<library name="loudness_enhancer" path="libldnhncr.so"/>
<library name="dynamics_processing" path="libdynproc.so"/>
+ <library name="haptic_generator" path="libhapticgenerator.so"/>
</libraries>
<!-- list of effects to load.
@@ -58,6 +59,7 @@
<effect name="downmix" library="downmix" uuid="93f04452-e4fe-41cc-91f9-e475b6d1d69f"/>
<effect name="loudness_enhancer" library="loudness_enhancer" uuid="fa415329-2034-4bea-b5dc-5b381c8d1e2c"/>
<effect name="dynamics_processing" library="dynamics_processing" uuid="e0e6539b-1781-7261-676f-6d7573696340"/>
+ <effect name="haptic_generator" library="haptic_generator" uuid="97c4acd1-8b82-4f2f-832e-c2fe5d7a9931"/>
</effects>
<!-- Audio pre processor configurations.
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
new file mode 100644
index 0000000..f947339
--- /dev/null
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -0,0 +1,51 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// HapticGenerator library
+cc_library_shared {
+ name: "libhapticgenerator",
+
+ vendor: true,
+
+ srcs: [
+ "EffectHapticGenerator.cpp",
+ "Processors.cpp",
+ ],
+
+ cflags: [
+ "-O2", // Turning on the optimization in order to reduce effect processing time.
+ // The latency is around 1/5 less than without the optimization.
+ "-Wall",
+ "-Werror",
+ "-ffast-math", // This is needed for the non-zero coefficients optimization for
+ // BiquadFilter. Try the biquad_filter_benchmark test in audio_utils
+ // with/without `-ffast-math` for more context.
+ "-fvisibility=hidden",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbinder",
+ "liblog",
+ "libutils",
+ "libvibrator",
+ ],
+
+ relative_install_path: "soundfx",
+
+ header_libs: [
+ "libaudioeffects",
+ ],
+}
+
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
new file mode 100644
index 0000000..9b93659
--- /dev/null
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -0,0 +1,519 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHG"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "EffectHapticGenerator.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include <errno.h>
+#include <inttypes.h>
+
+#include <audio_effects/effect_hapticgenerator.h>
+#include <audio_utils/format.h>
+#include <system/audio.h>
+
+// This is the only symbol that needs to be exported
+__attribute__ ((visibility ("default")))
+audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = {
+ .tag = AUDIO_EFFECT_LIBRARY_TAG,
+ .version = EFFECT_LIBRARY_API_VERSION,
+ .name = "HapticGenerator Library",
+ .implementor = "The Android Open Source Project",
+ .create_effect = android::audio_effect::haptic_generator::HapticGeneratorLib_Create,
+ .release_effect = android::audio_effect::haptic_generator::HapticGeneratorLib_Release,
+ .get_descriptor = android::audio_effect::haptic_generator::HapticGeneratorLib_GetDescriptor,
+};
+
+namespace android::audio_effect::haptic_generator {
+
+// effect_handle_t interface implementation for haptic generator effect
+const struct effect_interface_s gHapticGeneratorInterface = {
+ HapticGenerator_Process,
+ HapticGenerator_Command,
+ HapticGenerator_GetDescriptor,
+ nullptr /* no process_reverse function, no reference stream needed */
+};
+
+//-----------------------------------------------------------------------------
+// Effect Descriptor
+//-----------------------------------------------------------------------------
+
+// UUIDs for effect types have been generated from http://www.itu.int/ITU-T/asn1/uuid.html
+// Haptic Generator
+static const effect_descriptor_t gHgDescriptor = {
+ FX_IID_HAPTICGENERATOR_, // type
+ {0x97c4acd1, 0x8b82, 0x4f2f, 0x832e, {0xc2, 0xfe, 0x5d, 0x7a, 0x99, 0x31}}, // uuid
+ EFFECT_CONTROL_API_VERSION,
+ EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST,
+ 0, // FIXME what value should be reported? // cpu load
+ 0, // FIXME what value should be reported? // memory usage
+ "Haptic Generator",
+ "The Android Open Source Project"
+};
+
+//-----------------------------------------------------------------------------
+// Internal functions
+//-----------------------------------------------------------------------------
+
+namespace {
+
+int HapticGenerator_Init(struct HapticGeneratorContext *context) {
+ context->itfe = &gHapticGeneratorInterface;
+
+ context->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+ context->config.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ context->config.inputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ context->config.inputCfg.samplingRate = 0;
+ context->config.inputCfg.bufferProvider.getBuffer = nullptr;
+ context->config.inputCfg.bufferProvider.releaseBuffer = nullptr;
+ context->config.inputCfg.bufferProvider.cookie = nullptr;
+ context->config.inputCfg.mask = EFFECT_CONFIG_ALL;
+ context->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ context->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+ context->config.outputCfg.format = AUDIO_FORMAT_PCM_FLOAT;
+ context->config.outputCfg.samplingRate = 0;
+ context->config.outputCfg.bufferProvider.getBuffer = nullptr;
+ context->config.outputCfg.bufferProvider.releaseBuffer = nullptr;
+ context->config.outputCfg.bufferProvider.cookie = nullptr;
+ context->config.outputCfg.mask = EFFECT_CONFIG_ALL;
+
+ memset(context->param.hapticChannelSource, 0, sizeof(context->param.hapticChannelSource));
+ context->param.hapticChannelCount = 0;
+ context->param.audioChannelCount = 0;
+ context->param.maxHapticIntensity = os::HapticScale::MUTE;
+
+ context->state = HAPTICGENERATOR_STATE_INITIALIZED;
+ return 0;
+}
+
+void addBiquadFilter(
+ std::vector<std::function<void(float *, const float *, size_t)>> &processingChain,
+ struct HapticGeneratorProcessorsRecord &processorsRecord,
+ std::shared_ptr<HapticBiquadFilter> filter) {
+ // The process chain captures the shared pointer of the filter in lambda.
+ // The process record will keep a shared pointer to the filter so that it is possible to access
+ // the filter outside of the process chain.
+ processorsRecord.filters.push_back(filter);
+ processingChain.push_back([filter](float *out, const float *in, size_t frameCount) {
+ filter->process(out, in, frameCount);
+ });
+}
+
+/**
+ * \brief build haptic generator processing chain.
+ *
+ * \param processingChain
+ * \param processorsRecord a structure to cache all the shared pointers for processors
+ * \param sampleRate the audio sampling rate. Use a float here as it may be used to create filters
+ * \param channelCount haptic channel count
+ */
+void HapticGenerator_buildProcessingChain(
+ std::vector<std::function<void(float*, const float*, size_t)>>& processingChain,
+ struct HapticGeneratorProcessorsRecord& processorsRecord,
+ float sampleRate, size_t channelCount) {
+ float highPassCornerFrequency = 100.0f;
+ auto hpf = createHPF2(highPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, hpf);
+ float lowPassCornerFrequency = 3000.0f;
+ auto lpf = createLPF2(lowPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, lpf);
+
+ auto ramp = std::make_shared<Ramp>(channelCount);
+ // The process chain captures the shared pointer of the ramp in lambda. It will be the only
+ // reference to the ramp.
+ // The process record will keep a weak pointer to the ramp so that it is possible to access
+ // the ramp outside of the process chain.
+ processorsRecord.ramps.push_back(ramp);
+ processingChain.push_back([ramp](float *out, const float *in, size_t frameCount) {
+ ramp->process(out, in, frameCount);
+ });
+
+ highPassCornerFrequency = 60.0f;
+ hpf = createHPF2(highPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, hpf);
+ lowPassCornerFrequency = 700.0f;
+ lpf = createLPF2(lowPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, lpf);
+
+ lowPassCornerFrequency = 5.0f;
+ float normalizationPower = -0.3f;
+ // The process chain captures the shared pointer of the slow envelope in lambda. It will
+ // be the only reference to the slow envelope.
+ // The process record will keep a weak pointer to the slow envelope so that it is possible
+ // to access the slow envelope outside of the process chain.
+ auto slowEnv = std::make_shared<SlowEnvelope>(
+ lowPassCornerFrequency, sampleRate, normalizationPower, channelCount);
+ processorsRecord.slowEnvs.push_back(slowEnv);
+ processingChain.push_back([slowEnv](float *out, const float *in, size_t frameCount) {
+ slowEnv->process(out, in, frameCount);
+ });
+
+ lowPassCornerFrequency = 400.0f;
+ lpf = createLPF2(lowPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, lpf);
+ lowPassCornerFrequency = 500.0f;
+ lpf = createLPF2(lowPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, lpf);
+
+ auto apf = createAPF2(400.0f, 200.0f, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, apf);
+ apf = createAPF2(100.0f, 50.0f, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, apf);
+ float allPassCornerFrequency = 25.0f;
+ apf = createAPF(allPassCornerFrequency, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, apf);
+
+ float resonantFrequency = 150.0f;
+ float bandpassQ = 1.0f;
+ auto bpf = createBPF(resonantFrequency, bandpassQ, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, bpf);
+
+ float zeroQ = 8.0f;
+ float poleQ = 4.0f;
+ auto bsf = createBSF(resonantFrequency, zeroQ, poleQ, sampleRate, channelCount);
+ addBiquadFilter(processingChain, processorsRecord, bsf);
+}
+
+int HapticGenerator_Configure(struct HapticGeneratorContext *context, effect_config_t *config) {
+ if (config->inputCfg.samplingRate != config->outputCfg.samplingRate ||
+ config->inputCfg.format != config->outputCfg.format ||
+ config->inputCfg.format != AUDIO_FORMAT_PCM_FLOAT ||
+ config->inputCfg.channels != config->outputCfg.channels ||
+ config->inputCfg.buffer.frameCount != config->outputCfg.buffer.frameCount) {
+ return -EINVAL;
+ }
+ if (&context->config != config) {
+ context->processingChain.clear();
+ context->processorsRecord.filters.clear();
+ context->processorsRecord.ramps.clear();
+ context->processorsRecord.slowEnvs.clear();
+ memcpy(&context->config, config, sizeof(effect_config_t));
+ context->param.audioChannelCount = audio_channel_count_from_out_mask(
+ ((audio_channel_mask_t) config->inputCfg.channels) & ~AUDIO_CHANNEL_HAPTIC_ALL);
+ context->param.hapticChannelCount = audio_channel_count_from_out_mask(
+ ((audio_channel_mask_t) config->outputCfg.channels) & AUDIO_CHANNEL_HAPTIC_ALL);
+ ALOG_ASSERT(context->param.hapticChannelCount <= 2,
+ "haptic channel count(%zu) is too large",
+ context->param.hapticChannelCount);
+ context->audioDataBytesPerFrame = audio_bytes_per_frame(
+ context->param.audioChannelCount, (audio_format_t) config->inputCfg.format);
+ for (size_t i = 0; i < context->param.hapticChannelCount; ++i) {
+ // By default, use the first audio channel to generate haptic channels.
+ context->param.hapticChannelSource[i] = 0;
+ }
+
+ HapticGenerator_buildProcessingChain(context->processingChain,
+ context->processorsRecord,
+ config->inputCfg.samplingRate,
+ context->param.hapticChannelCount);
+ }
+ return 0;
+}
+
+int HapticGenerator_Reset(struct HapticGeneratorContext *context) {
+ for (auto& filter : context->processorsRecord.filters) {
+ filter->clear();
+ }
+ for (auto& slowEnv : context->processorsRecord.slowEnvs) {
+ slowEnv->clear();
+ }
+ return 0;
+}
+
+int HapticGenerator_SetParameter(struct HapticGeneratorContext *context,
+ int32_t param,
+ uint32_t size,
+ void *value) {
+ switch (param) {
+ case HG_PARAM_HAPTIC_INTENSITY: {
+ if (value == nullptr || size != (uint32_t) (2 * sizeof(int))) {
+ return -EINVAL;
+ }
+ int id = *(int *) value;
+ os::HapticScale hapticIntensity = static_cast<os::HapticScale>(*((int *) value + 1));
+ if (hapticIntensity == os::HapticScale::MUTE) {
+ context->param.id2Intensity.erase(id);
+ } else {
+ context->param.id2Intensity.emplace(id, hapticIntensity);
+ }
+ context->param.maxHapticIntensity = hapticIntensity;
+ for (const auto&[id, intensity] : context->param.id2Intensity) {
+ context->param.maxHapticIntensity = std::max(
+ context->param.maxHapticIntensity, intensity);
+ }
+ break;
+ }
+
+ default:
+ ALOGW("Unknown param: %d", param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * \brief run the processing chain to generate haptic data from audio data
+ *
+ * \param processingChain the processing chain for generating haptic data
+ * \param buf1 a buffer contains raw audio data
+ * \param buf2 a buffer that is large enough to keep all the data
+ * \param frameCount frame count of the data
+ * \return a pointer to the output buffer
+ */
+float* HapticGenerator_runProcessingChain(
+ const std::vector<std::function<void(float*, const float*, size_t)>>& processingChain,
+ float* buf1, float* buf2, size_t frameCount) {
+ float *in = buf1;
+ float *out = buf2;
+ for (const auto processingFunc : processingChain) {
+ processingFunc(out, in, frameCount);
+ std::swap(in, out);
+ }
+ return in;
+}
+
+} // namespace (anonymous)
+
+//-----------------------------------------------------------------------------
+// Effect API Implementation
+//-----------------------------------------------------------------------------
+
+/*--- Effect Library Interface Implementation ---*/
+
+int32_t HapticGeneratorLib_Create(const effect_uuid_t *uuid,
+ int32_t sessionId __unused,
+ int32_t ioId __unused,
+ effect_handle_t *handle) {
+ if (handle == nullptr || uuid == nullptr) {
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gHgDescriptor.uuid, sizeof(*uuid)) != 0) {
+ return -EINVAL;
+ }
+
+ HapticGeneratorContext *context = new HapticGeneratorContext;
+ HapticGenerator_Init(context);
+
+ *handle = (effect_handle_t) context;
+ ALOGV("%s context is %p", __func__, context);
+ return 0;
+}
+
+int32_t HapticGeneratorLib_Release(effect_handle_t handle) {
+ HapticGeneratorContext *context = (HapticGeneratorContext *) handle;
+ delete context;
+ return 0;
+}
+
+int32_t HapticGeneratorLib_GetDescriptor(const effect_uuid_t *uuid,
+ effect_descriptor_t *descriptor) {
+
+ if (descriptor == nullptr || uuid == nullptr) {
+ ALOGE("%s() called with NULL pointer", __func__);
+ return -EINVAL;
+ }
+
+ if (memcmp(uuid, &gHgDescriptor.uuid, sizeof(*uuid)) == 0) {
+ *descriptor = gHgDescriptor;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*--- Effect Control Interface Implementation ---*/
+
+int32_t HapticGenerator_Process(effect_handle_t self,
+ audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
+ HapticGeneratorContext *context = (HapticGeneratorContext *) self;
+
+ if (inBuffer == nullptr || inBuffer->raw == nullptr
+ || outBuffer == nullptr || outBuffer->raw == nullptr) {
+ return 0;
+ }
+
+ // The audio data must not be modified but just written to
+ // output buffer according the access mode.
+ size_t audioBytes = context->audioDataBytesPerFrame * inBuffer->frameCount;
+ size_t audioSampleCount = inBuffer->frameCount * context->param.audioChannelCount;
+ if (inBuffer->raw != outBuffer->raw) {
+ if (context->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE) {
+ for (size_t i = 0; i < audioSampleCount; ++i) {
+ outBuffer->f32[i] += inBuffer->f32[i];
+ }
+ } else {
+ memcpy(outBuffer->raw, inBuffer->raw, audioBytes);
+ }
+ }
+
+ if (context->state != HAPTICGENERATOR_STATE_ACTIVE) {
+ ALOGE("State(%d) is not HAPTICGENERATOR_STATE_ACTIVE when calling %s",
+ context->state, __func__);
+ return -ENODATA;
+ }
+
+ if (context->param.maxHapticIntensity == os::HapticScale::MUTE) {
+ // Haptic channels are muted, not need to generate haptic data.
+ return 0;
+ }
+
+ // Resize buffer if the haptic sample count is greater than buffer size.
+ size_t hapticSampleCount = inBuffer->frameCount * context->param.hapticChannelCount;
+ if (hapticSampleCount > context->inputBuffer.size()) {
+ // The context->inputBuffer and context->outputBuffer must have the same size,
+ // which must be at least the haptic sample count.
+ context->inputBuffer.resize(hapticSampleCount);
+ context->outputBuffer.resize(hapticSampleCount);
+ }
+
+ // Construct input buffer according to haptic channel source
+ for (size_t i = 0; i < inBuffer->frameCount; ++i) {
+ for (size_t j = 0; j < context->param.hapticChannelCount; ++j) {
+ context->inputBuffer[i * context->param.hapticChannelCount + j] =
+ inBuffer->f32[i * context->param.audioChannelCount
+ + context->param.hapticChannelSource[j]];
+ }
+ }
+
+ float* hapticOutBuffer = HapticGenerator_runProcessingChain(
+ context->processingChain, context->inputBuffer.data(),
+ context->outputBuffer.data(), inBuffer->frameCount);
+ os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity);
+
+ // For haptic data, the haptic playback thread will copy the data from effect input buffer,
+ // which contains haptic data at the end of the buffer, directly to sink buffer.
+ // In that case, copy haptic data to input buffer instead of output buffer.
+ // Note: this may not work with rpc/binder calls
+ memcpy_by_audio_format(static_cast<char*>(inBuffer->raw) + audioBytes,
+ static_cast<audio_format_t>(context->config.outputCfg.format),
+ hapticOutBuffer,
+ AUDIO_FORMAT_PCM_FLOAT,
+ hapticSampleCount);
+
+ return 0;
+}
+
+int32_t HapticGenerator_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize,
+ void *cmdData, uint32_t *replySize, void *replyData) {
+ HapticGeneratorContext *context = (HapticGeneratorContext *) self;
+
+ if (context == nullptr || context->state == HAPTICGENERATOR_STATE_UNINITIALIZED) {
+ return -EINVAL;
+ }
+
+ ALOGV("HapticGenerator_Command command %u cmdSize %u", cmdCode, cmdSize);
+
+ switch (cmdCode) {
+ case EFFECT_CMD_INIT:
+ if (replyData == nullptr || replySize == nullptr || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ *(int *) replyData = HapticGenerator_Init(context);
+ break;
+
+ case EFFECT_CMD_SET_CONFIG:
+ if (cmdData == nullptr || cmdSize != sizeof(effect_config_t)
+ || replyData == nullptr || replySize == nullptr || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ *(int *) replyData = HapticGenerator_Configure(
+ context, (effect_config_t *) cmdData);
+ break;
+
+ case EFFECT_CMD_RESET:
+ HapticGenerator_Reset(context);
+ break;
+
+ case EFFECT_CMD_GET_PARAM:
+ ALOGV("HapticGenerator_Command EFFECT_CMD_GET_PARAM cmdData %p,"
+ "*replySize %u, replyData: %p",
+ cmdData, *replySize, replyData);
+ break;
+
+ case EFFECT_CMD_SET_PARAM: {
+ ALOGV("HapticGenerator_Command EFFECT_CMD_SET_PARAM cmdSize %d cmdData %p, "
+ "*replySize %u, replyData %p", cmdSize, cmdData,
+ replySize ? *replySize : 0, replyData);
+ if (cmdData == nullptr || (cmdSize < (int) (sizeof(effect_param_t) + sizeof(int32_t)))
+ || replyData == nullptr || replySize == nullptr ||
+ *replySize != (int) sizeof(int32_t)) {
+ return -EINVAL;
+ }
+ effect_param_t *cmd = (effect_param_t *) cmdData;
+ *(int *) replyData = HapticGenerator_SetParameter(
+ context, *(int32_t *) cmd->data, cmd->vsize, cmd->data + sizeof(int32_t));
+ }
+ break;
+
+ case EFFECT_CMD_ENABLE:
+ if (replyData == nullptr || replySize == nullptr || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ if (context->state != HAPTICGENERATOR_STATE_INITIALIZED) {
+ return -ENOSYS;
+ }
+ context->state = HAPTICGENERATOR_STATE_ACTIVE;
+ ALOGV("EFFECT_CMD_ENABLE() OK");
+ *(int *) replyData = 0;
+ break;
+
+ case EFFECT_CMD_DISABLE:
+ if (replyData == nullptr || replySize == nullptr || *replySize != sizeof(int)) {
+ return -EINVAL;
+ }
+ if (context->state != HAPTICGENERATOR_STATE_ACTIVE) {
+ return -ENOSYS;
+ }
+ context->state = HAPTICGENERATOR_STATE_INITIALIZED;
+ ALOGV("EFFECT_CMD_DISABLE() OK");
+ *(int *) replyData = 0;
+ break;
+
+ case EFFECT_CMD_SET_VOLUME:
+ case EFFECT_CMD_SET_DEVICE:
+ case EFFECT_CMD_SET_AUDIO_MODE:
+ break;
+
+ default:
+ ALOGW("HapticGenerator_Command invalid command %u", cmdCode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int32_t HapticGenerator_GetDescriptor(effect_handle_t self, effect_descriptor_t *descriptor) {
+ HapticGeneratorContext *context = (HapticGeneratorContext *) self;
+
+ if (context == nullptr ||
+ context->state == HAPTICGENERATOR_STATE_UNINITIALIZED) {
+ return -EINVAL;
+ }
+
+ memcpy(descriptor, &gHgDescriptor, sizeof(effect_descriptor_t));
+
+ return 0;
+}
+
+} // namespace android::audio_effect::haptic_generator
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
new file mode 100644
index 0000000..57b4338
--- /dev/null
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_EFFECTHAPTICGENERATOR_H_
+#define ANDROID_EFFECTHAPTICGENERATOR_H_
+
+#include <functional>
+#include <vector>
+#include <map>
+
+#include <hardware/audio_effect.h>
+#include <system/audio_effect.h>
+#include <vibrator/ExternalVibrationUtils.h>
+
+#include "Processors.h"
+
+namespace android::audio_effect::haptic_generator {
+
+//-----------------------------------------------------------------------------
+// Definition
+//-----------------------------------------------------------------------------
+
+enum hapticgenerator_state_t {
+ HAPTICGENERATOR_STATE_UNINITIALIZED,
+ HAPTICGENERATOR_STATE_INITIALIZED,
+ HAPTICGENERATOR_STATE_ACTIVE,
+};
+
+// parameters for each haptic generator
+struct HapticGeneratorParam {
+ uint32_t hapticChannelSource[2]; // The audio channels used to generate haptic channels.
+ // The first channel will be used to generate HAPTIC_A,
+ // The second channel will be used to generate HAPTIC_B
+ // The value will be offset of audio channel
+ uint32_t audioChannelCount;
+ uint32_t hapticChannelCount;
+
+ // A map from track id to haptic intensity.
+ std::map<int, os::HapticScale> id2Intensity;
+ os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+};
+
+// A structure to keep all shared pointers for all processors in HapticGenerator.
+struct HapticGeneratorProcessorsRecord {
+ std::vector<std::shared_ptr<HapticBiquadFilter>> filters;
+ std::vector<std::shared_ptr<Ramp>> ramps;
+ std::vector<std::shared_ptr<SlowEnvelope>> slowEnvs;
+};
+
+// A structure to keep all the context for HapticGenerator.
+struct HapticGeneratorContext {
+ const struct effect_interface_s *itfe;
+ effect_config_t config;
+ hapticgenerator_state_t state;
+ struct HapticGeneratorParam param;
+ size_t audioDataBytesPerFrame;
+
+ // A cache for all shared pointers of the HapticGenerator
+ struct HapticGeneratorProcessorsRecord processorsRecord;
+
+ // Using a vector of functions to record the processing chain for haptic-generating algorithm.
+ // The three parameters of the processing functions are pointer to output buffer, pointer to
+ // input buffer and frame count.
+ std::vector<std::function<void(float*, const float*, size_t)>> processingChain;
+
+ // inputBuffer is where to keep input buffer for the generating algorithm. It will be
+ // constructed according to HapticGeneratorParam.hapticChannelSource.
+ std::vector<float> inputBuffer;
+
+ // outputBuffer is a buffer having the same length as inputBuffer. It can be used as
+ // intermediate buffer in the generating algorithm.
+ std::vector<float> outputBuffer;
+};
+
+//-----------------------------------------------------------------------------
+// Effect API
+//-----------------------------------------------------------------------------
+
+int32_t HapticGeneratorLib_Create(const effect_uuid_t *uuid,
+ int32_t sessionId,
+ int32_t ioId,
+ effect_handle_t *handle);
+
+int32_t HapticGeneratorLib_Release(effect_handle_t handle);
+
+int32_t HapticGeneratorLib_GetDescriptor(const effect_uuid_t *uuid,
+ effect_descriptor_t *descriptor);
+
+int32_t HapticGenerator_Process(effect_handle_t self,
+ audio_buffer_t *inBuffer,
+ audio_buffer_t *outBuffer);
+
+int32_t HapticGenerator_Command(effect_handle_t self,
+ uint32_t cmdCode,
+ uint32_t cmdSize,
+ void *cmdData,
+ uint32_t *replySize,
+ void *replyData);
+
+int32_t HapticGenerator_GetDescriptor(effect_handle_t self,
+ effect_descriptor_t *descriptor);
+
+} // namespace android::audio_effect::haptic_generator
+
+#endif // ANDROID_EFFECTHAPTICGENERATOR_H_
diff --git a/media/libeffects/hapticgenerator/MODULE_LICENSE_APACHE2 b/media/libeffects/hapticgenerator/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/media/libeffects/hapticgenerator/MODULE_LICENSE_APACHE2
diff --git a/media/libeffects/hapticgenerator/Processors.cpp b/media/libeffects/hapticgenerator/Processors.cpp
new file mode 100644
index 0000000..3157b35
--- /dev/null
+++ b/media/libeffects/hapticgenerator/Processors.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHG_Processors"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <assert.h>
+
+#include <cmath>
+
+#include "Processors.h"
+
+#if defined(__aarch64__) || defined(__ARM_NEON__)
+#ifndef USE_NEON
+#define USE_NEON (true)
+#endif
+#else
+#define USE_NEON (false)
+#endif
+#if USE_NEON
+#include <arm_neon.h>
+#endif
+
+namespace android::audio_effect::haptic_generator {
+
+float getRealPoleZ(float cornerFrequency, float sampleRate) {
+ // This will be a pole of a first order filter.
+ float realPoleS = -2 * M_PI * cornerFrequency;
+ return exp(realPoleS / sampleRate); // zero-pole matching
+}
+
+std::pair<float, float> getComplexPoleZ(float ringingFrequency, float q, float sampleRate) {
+ // This is the pole for 1/(s^2 + s/q + 1) in normalized frequency. The other pole is
+ // the complex conjugate of this.
+ float poleImagS = 2 * M_PI * ringingFrequency;
+ float poleRealS = -poleImagS / (2 * q);
+ float poleRadius = exp(poleRealS / sampleRate);
+ float poleImagZ = poleRadius * sin(poleImagS / sampleRate);
+ float poleRealZ = poleRadius * cos(poleImagS / sampleRate);
+ return {poleRealZ, poleImagZ};
+}
+
+// Implementation of Ramp
+
+Ramp::Ramp(size_t channelCount) : mChannelCount(channelCount) {}
+
+void Ramp::process(float *out, const float *in, size_t frameCount) {
+ size_t i = 0;
+#if USE_NEON
+ size_t sampleCount = frameCount * mChannelCount;
+ float32x2_t allZero = vdup_n_f32(0.0f);
+ while (i + 1 < sampleCount) {
+ vst1_f32(out, vmax_f32(vld1_f32(in), allZero));
+ in += 2;
+ out += 2;
+ i += 2;
+ }
+#endif // USE_NEON
+ for (; i < frameCount * mChannelCount; ++i) {
+ *out = *in >= 0.0f ? *in : 0.0f;
+ out++;
+ in++;
+ }
+}
+
+// Implementation of SlowEnvelope
+
+SlowEnvelope::SlowEnvelope(
+ float cornerFrequency,
+ float sampleRate,
+ float normalizationPower,
+ size_t channelCount)
+ : mLpf(createLPF(cornerFrequency, sampleRate, channelCount)),
+ mNormalizationPower(normalizationPower),
+ mChannelCount(channelCount),
+ mEnv(0.25 * (sampleRate / (2 * M_PI * cornerFrequency))) {}
+
+void SlowEnvelope::process(float* out, const float* in, size_t frameCount) {
+ size_t sampleCount = frameCount * mChannelCount;
+ if (sampleCount > mLpfInBuffer.size()) {
+ mLpfInBuffer.resize(sampleCount, mEnv);
+ mLpfOutBuffer.resize(sampleCount);
+ }
+ mLpf->process(mLpfOutBuffer.data(), mLpfInBuffer.data(), frameCount);
+ for (size_t i = 0; i < sampleCount; ++i) {
+ *out = *in * pow(mLpfOutBuffer[i], mNormalizationPower);
+ out++;
+ in++;
+ }
+}
+
+void SlowEnvelope::clear() {
+ mLpf->clear();
+}
+
+// Implementation of helper functions
+
+BiquadFilterCoefficients cascadeFirstOrderFilters(const BiquadFilterCoefficients &coefs1,
+ const BiquadFilterCoefficients &coefs2) {
+ assert(coefs1[2] == 0.0f);
+ assert(coefs2[2] == 0.0f);
+ assert(coefs1[4] == 0.0f);
+ assert(coefs2[4] == 0.0f);
+ return {coefs1[0] * coefs2[0],
+ coefs1[0] * coefs2[1] + coefs1[1] * coefs2[0],
+ coefs1[1] * coefs2[1],
+ coefs1[3] + coefs2[3],
+ coefs1[3] * coefs2[3]};
+}
+
+BiquadFilterCoefficients lpfCoefs(const float cornerFrequency, const float sampleRate) {
+ BiquadFilterCoefficients coefficient;
+ float realPoleZ = getRealPoleZ(cornerFrequency, sampleRate);
+ // This is a zero at nyquist
+ coefficient[0] = 0.5f * (1 - realPoleZ);
+ coefficient[1] = coefficient[0];
+ coefficient[2] = 0.0f;
+ coefficient[3] = -realPoleZ; // This is traditional 1/(s+1) filter
+ coefficient[4] = 0.0f;
+ return coefficient;
+}
+
+std::shared_ptr<HapticBiquadFilter> createLPF(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient = lpfCoefs(cornerFrequency, sampleRate);
+ return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
+}
+
+std::shared_ptr<HapticBiquadFilter> createLPF2(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient = lpfCoefs(cornerFrequency, sampleRate);
+ return std::make_shared<HapticBiquadFilter>(
+ channelCount, cascadeFirstOrderFilters(coefficient, coefficient));
+}
+
+std::shared_ptr<HapticBiquadFilter> createHPF2(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient;
+ // Note: this is valid only when corner frequency is less than nyquist / 2.
+ float realPoleZ = getRealPoleZ(cornerFrequency, sampleRate);
+
+ // Note: this is a zero at DC
+ coefficient[0] = 0.5f * (1 + realPoleZ);
+ coefficient[1] = -coefficient[0];
+ coefficient[2] = 0.0f;
+ coefficient[3] = -realPoleZ;
+ coefficient[4] = 0.0f;
+ return std::make_shared<HapticBiquadFilter>(
+ channelCount, cascadeFirstOrderFilters(coefficient, coefficient));
+}
+
+BiquadFilterCoefficients apfCoefs(const float cornerFrequency, const float sampleRate) {
+ BiquadFilterCoefficients coefficient;
+ float realPoleZ = getRealPoleZ(cornerFrequency, sampleRate);
+ float zeroZ = 1.0f / realPoleZ;
+ coefficient[0] = (1.0f - realPoleZ) / (1.0f - zeroZ);
+ coefficient[1] = -coefficient[0] * zeroZ;
+ coefficient[2] = 0.0f;
+ coefficient[3] = -realPoleZ;
+ coefficient[4] = 0.0f;
+ return coefficient;
+}
+
+std::shared_ptr<HapticBiquadFilter> createAPF(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient = apfCoefs(cornerFrequency, sampleRate);
+ return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
+}
+
+std::shared_ptr<HapticBiquadFilter> createAPF2(const float cornerFrequency1,
+ const float cornerFrequency2,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefs1 = apfCoefs(cornerFrequency1, sampleRate);
+ BiquadFilterCoefficients coefs2 = apfCoefs(cornerFrequency2, sampleRate);
+ return std::make_shared<HapticBiquadFilter>(
+ channelCount, cascadeFirstOrderFilters(coefs1, coefs2));
+}
+
+std::shared_ptr<HapticBiquadFilter> createBPF(const float ringingFrequency,
+ const float q,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient;
+ const auto [real, img] = getComplexPoleZ(ringingFrequency, q, sampleRate);
+ // Note: this is not a standard cookbook BPF, but a low pass filter with zero at DC
+ coefficient[0] = 1.0f;
+ coefficient[1] = -1.0f;
+ coefficient[2] = 0.0f;
+ coefficient[3] = -2 * real;
+ coefficient[4] = real * real + img * img;
+ return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
+}
+
+std::shared_ptr<HapticBiquadFilter> createBSF(const float ringingFrequency,
+ const float zq,
+ const float pq,
+ const float sampleRate,
+ const size_t channelCount) {
+ BiquadFilterCoefficients coefficient;
+ const auto [zeroReal, zeroImg] = getComplexPoleZ(ringingFrequency, zq, sampleRate);
+ float zeroCoeff1 = -2 * zeroReal;
+ float zeroCoeff2 = zeroReal* zeroReal + zeroImg * zeroImg;
+ const auto [poleReal, poleImg] = getComplexPoleZ(ringingFrequency, pq, sampleRate);
+ float poleCoeff1 = -2 * poleReal;
+ float poleCoeff2 = poleReal * poleReal + poleImg * poleImg;
+ const float norm = (1.0f + poleCoeff1 + poleCoeff2) / (1.0f + zeroCoeff1 + zeroCoeff2);
+ coefficient[0] = 1.0f * norm;
+ coefficient[1] = zeroCoeff1 * norm;
+ coefficient[2] = zeroCoeff2 * norm;
+ coefficient[3] = poleCoeff1;
+ coefficient[4] = poleCoeff2;
+ return std::make_shared<HapticBiquadFilter>(channelCount, coefficient);
+}
+
+} // namespace android::audio_effect::haptic_generator
diff --git a/media/libeffects/hapticgenerator/Processors.h b/media/libeffects/hapticgenerator/Processors.h
new file mode 100644
index 0000000..5cf0557
--- /dev/null
+++ b/media/libeffects/hapticgenerator/Processors.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _EFFECT_HAPTIC_GENERATOR_PROCESSORS_H_
+#define _EFFECT_HAPTIC_GENERATOR_PROCESSORS_H_
+
+#include <sys/types.h>
+
+#include <memory>
+#include <vector>
+
+#include <audio_utils/BiquadFilter.h>
+
+using HapticBiquadFilter = android::audio_utils::BiquadFilter<float>;
+using BiquadFilterCoefficients = std::array<float, android::audio_utils::kBiquadNumCoefs>;
+
+namespace android::audio_effect::haptic_generator {
+
+// A class providing a process function that makes input data non-negative.
+class Ramp {
+public:
+ explicit Ramp(size_t channelCount);
+
+ void process(float *out, const float *in, size_t frameCount);
+
+private:
+ const size_t mChannelCount;
+};
+
+
+class SlowEnvelope {
+public:
+ SlowEnvelope(float cornerFrequency, float sampleRate,
+ float normalizationPower, size_t channelCount);
+
+ void process(float *out, const float *in, size_t frameCount);
+
+ void clear();
+
+private:
+ const std::shared_ptr<HapticBiquadFilter> mLpf;
+ std::vector<float> mLpfInBuffer;
+ std::vector<float> mLpfOutBuffer;
+ const float mNormalizationPower;
+ const float mChannelCount;
+ const float mEnv;
+};
+
+// Helper functions
+
+BiquadFilterCoefficients cascadeFirstOrderFilters(const BiquadFilterCoefficients &coefs1,
+ const BiquadFilterCoefficients &coefs2);
+
+std::shared_ptr<HapticBiquadFilter> createLPF(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount);
+
+// Create two cascaded LPF with same corner frequency.
+std::shared_ptr<HapticBiquadFilter> createLPF2(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount);
+
+// Create two cascaded HPF with same corner frequency.
+std::shared_ptr<HapticBiquadFilter> createHPF2(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount);
+
+std::shared_ptr<HapticBiquadFilter> createAPF(const float cornerFrequency,
+ const float sampleRate,
+ const size_t channelCount);
+
+// Create two cascaded APF with two different corner frequency.
+std::shared_ptr<HapticBiquadFilter> createAPF2(const float cornerFrequency1,
+ const float cornerFrequency2,
+ const float sampleRate,
+ const size_t channelCount);
+
+std::shared_ptr<HapticBiquadFilter> createBPF(const float ringingFrequency,
+ const float q,
+ const float sampleRate,
+ const size_t channelCount);
+
+std::shared_ptr<HapticBiquadFilter> createBSF(const float ringingFrequency,
+ const float zq,
+ const float pq,
+ const float sampleRate,
+ const size_t channelCount);
+
+} // namespace android::audio_effect::haptic_generator
+
+#endif // _EFFECT_HAPTIC_GENERATOR_PROCESSORS_H_
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 973a164..670b415 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -3394,8 +3394,8 @@
return -EINVAL;
}
- uint32_t device = *(uint32_t*)pCmdData;
- pContext->pBundledContext->nOutputDevice = (audio_devices_t)device;
+ audio_devices_t device = *(audio_devices_t *)pCmdData;
+ pContext->pBundledContext->nOutputDevice = device;
if (pContext->EffectType == LVM_BASS_BOOST) {
if ((device == AUDIO_DEVICE_OUT_SPEAKER) ||
diff --git a/media/libeffects/visualizer/Android.bp b/media/libeffects/visualizer/Android.bp
new file mode 100644
index 0000000..f6c585e
--- /dev/null
+++ b/media/libeffects/visualizer/Android.bp
@@ -0,0 +1,32 @@
+// Visualizer library
+cc_library_shared {
+ name: "libvisualizer",
+
+ vendor: true,
+
+ srcs: [
+ "EffectVisualizer.cpp",
+ ],
+
+ cflags: [
+ "-O2",
+ "-fvisibility=hidden",
+
+ "-DBUILD_FLOAT",
+ "-DSUPPORT_MC",
+
+ "-Wall",
+ "-Werror",
+ ],
+
+ shared_libs: [
+ "liblog",
+ ],
+
+ relative_install_path: "soundfx",
+
+ header_libs: [
+ "libaudioeffects",
+ "libaudioutils_headers",
+ ],
+}
diff --git a/media/libeffects/visualizer/Android.mk b/media/libeffects/visualizer/Android.mk
deleted file mode 100644
index 35e2f3d..0000000
--- a/media/libeffects/visualizer/Android.mk
+++ /dev/null
@@ -1,28 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# Visualizer library
-include $(CLEAR_VARS)
-
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES:= \
- EffectVisualizer.cpp
-
-LOCAL_CFLAGS+= -O2 -fvisibility=hidden
-LOCAL_CFLAGS += -Wall -Werror
-LOCAL_CFLAGS += -DBUILD_FLOAT -DSUPPORT_MC
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- liblog \
- libdl
-
-LOCAL_MODULE_RELATIVE_PATH := soundfx
-LOCAL_MODULE:= libvisualizer
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-effects) \
- $(call include-path-for, audio-utils)
-
-
-LOCAL_HEADER_LIBRARIES += libhardware_headers
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 1caee04..39523de 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -49,28 +49,6 @@
path: "aidl",
}
-filegroup {
- name: "resourcemanager_aidl",
- srcs: [
- "aidl/android/media/IResourceManagerClient.aidl",
- "aidl/android/media/IResourceManagerService.aidl",
- "aidl/android/media/MediaResourceType.aidl",
- "aidl/android/media/MediaResourceSubType.aidl",
- "aidl/android/media/MediaResourceParcel.aidl",
- "aidl/android/media/MediaResourcePolicyParcel.aidl",
- ],
- path: "aidl",
-}
-
-aidl_interface {
- name: "resourcemanager_aidl_interface",
- unstable: true,
- local_include_dir: "aidl",
- srcs: [
- ":resourcemanager_aidl",
- ],
-}
-
cc_library_shared {
name: "libmedia_omx",
vendor_available: true,
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 20bc23d..c08d187 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -40,6 +40,7 @@
SET_DATA_SOURCE_FD,
SET_DATA_SOURCE_STREAM,
SET_DATA_SOURCE_CALLBACK,
+ SET_DATA_SOURCE_RTP,
SET_BUFFERING_SETTINGS,
GET_BUFFERING_SETTINGS,
PREPARE_ASYNC,
@@ -161,6 +162,15 @@
return reply.readInt32();
}
+ status_t setDataSource(const String8& rtpParams) {
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+ data.writeString8(rtpParams);
+ remote()->transact(SET_DATA_SOURCE_RTP, data, &reply);
+
+ return reply.readInt32();
+ }
+
// pass the buffered IGraphicBufferProducer to the media player service
status_t setVideoSurfaceTexture(const sp<IGraphicBufferProducer>& bufferProducer)
{
@@ -685,6 +695,12 @@
}
return NO_ERROR;
}
+ case SET_DATA_SOURCE_RTP: {
+ CHECK_INTERFACE(IMediaPlayer, data, reply);
+ String8 rtpParams = data.readString8();
+ reply->writeInt32(setDataSource(rtpParams));
+ return NO_ERROR;
+ }
case SET_VIDEO_SURFACETEXTURE: {
CHECK_INTERFACE(IMediaPlayer, data, reply);
sp<IGraphicBufferProducer> bufferProducer =
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 8be961c..e8839ba 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -63,7 +63,7 @@
searchDirs[1] + fileName,
searchDirs[2] + fileName,
searchDirs[3] + fileName,
- "system/etc/media_profiles_V1_0.xml" // System fallback
+ "system/etc/media_profiles.xml" // System fallback
};
}();
static std::array<char const*, 5> const cPaths = {
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index fe86d27..ec52a49 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -43,11 +43,11 @@
}
//static
-MediaResource MediaResource::CodecResource(bool secure, bool video) {
+MediaResource MediaResource::CodecResource(bool secure, bool video, int64_t instanceCount) {
return MediaResource(
secure ? Type::kSecureCodec : Type::kNonSecureCodec,
video ? SubType::kVideoCodec : SubType::kAudioCodec,
- 1);
+ instanceCount);
}
//static
diff --git a/media/libmedia/include/media/IMediaPlayer.h b/media/libmedia/include/media/IMediaPlayer.h
index a4c0ec6..3548a1e 100644
--- a/media/libmedia/include/media/IMediaPlayer.h
+++ b/media/libmedia/include/media/IMediaPlayer.h
@@ -59,6 +59,7 @@
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
virtual status_t setDataSource(const sp<IStreamSource>& source) = 0;
virtual status_t setDataSource(const sp<IDataSource>& source) = 0;
+ virtual status_t setDataSource(const String8& rtpParams) = 0;
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
virtual status_t getBufferingSettings(
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index 4927d28..4712528 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -37,7 +37,7 @@
MediaResource(Type type, SubType subType, int64_t value);
MediaResource(Type type, const std::vector<uint8_t> &id, int64_t value);
- static MediaResource CodecResource(bool secure, bool video);
+ static MediaResource CodecResource(bool secure, bool video, int64_t instanceCount = 1);
static MediaResource GraphicMemoryResource(int64_t value);
static MediaResource CpuBoostResource();
static MediaResource VideoBatteryResource();
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 138a014..1fe6ffc 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -73,6 +73,7 @@
METADATA_KEY_COLOR_RANGE = 37,
METADATA_KEY_SAMPLERATE = 38,
METADATA_KEY_BITS_PER_SAMPLE = 39,
+ METADATA_KEY_VIDEO_CODEC_MIME_TYPE = 40,
// Add more here...
};
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index 7c29e50..71c0bc5 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -62,6 +62,7 @@
MEDIA_META_DATA = 202,
MEDIA_DRM_INFO = 210,
MEDIA_TIME_DISCONTINUITY = 211,
+ MEDIA_IMS_RX_NOTICE = 300,
MEDIA_AUDIO_ROUTING_CHANGED = 10000,
};
@@ -179,7 +180,10 @@
KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300, // set only
// Set a Parcel containing the value of a parcelled Java AudioAttribute instance
- KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400 // set only
+ KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400, // set only
+
+ // Set a Parcel containing the values of RTP attribute
+ KEY_PARAMETER_RTP_ATTRIBUTES = 2000 // set only
};
// Keep INVOKE_ID_* in sync with MediaPlayer.java.
@@ -219,6 +223,7 @@
status_t setDataSource(int fd, int64_t offset, int64_t length);
status_t setDataSource(const sp<IDataSource> &source);
+ status_t setDataSource(const String8& rtpParams);
status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer);
status_t setListener(const sp<MediaPlayerListener>& listener);
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 6e2d94d..fbcdb28 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -291,6 +291,8 @@
bool mIsOutputFileSet;
Mutex mLock;
Mutex mNotifyLock;
+
+ output_format mOutputFormat;
};
}; // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 6079a2d..30c5006 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -195,6 +195,22 @@
return err;
}
+status_t MediaPlayer::setDataSource(const String8& rtpParams)
+{
+ ALOGV("setDataSource(rtpParams)");
+ status_t err = UNKNOWN_ERROR;
+ const sp<IMediaPlayerService> service(getMediaPlayerService());
+ if (service != 0) {
+ sp<IMediaPlayer> player(service->create(this, mAudioSessionId, mOpPackageName));
+ if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
+ (NO_ERROR != player->setDataSource(rtpParams))) {
+ player.clear();
+ }
+ err = attachNewPlayer(player);
+ }
+ return err;
+}
+
status_t MediaPlayer::invoke(const Parcel& request, Parcel *reply)
{
Mutex::Autolock _l(mLock);
@@ -943,6 +959,9 @@
case MEDIA_META_DATA:
ALOGV("Received timed metadata message");
break;
+ case MEDIA_IMS_RX_NOTICE:
+ ALOGV("Received IMS Rx notice message");
+ break;
default:
ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
break;
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 70655d5..d9d1f25 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -244,6 +244,7 @@
mCurrentState = MEDIA_RECORDER_ERROR;
return ret;
}
+ mOutputFormat = (output_format)of;
mCurrentState = MEDIA_RECORDER_DATASOURCE_CONFIGURED;
return ret;
}
@@ -479,6 +480,13 @@
(MEDIA_RECORDER_PREPARED |
MEDIA_RECORDER_RECORDING |
MEDIA_RECORDER_ERROR));
+
+ // For RTP video, parameter should be set dynamically.
+ if (isInvalidState) {
+ if (mCurrentState == MEDIA_RECORDER_RECORDING &&
+ mOutputFormat == OUTPUT_FORMAT_RTP_AVP)
+ isInvalidState = false;
+ }
if (isInvalidState) {
ALOGE("setParameters is called in an invalid state: %d", mCurrentState);
return INVALID_OPERATION;
@@ -737,6 +745,7 @@
mIsAudioEncoderSet = false;
mIsVideoEncoderSet = false;
mIsOutputFileSet = false;
+ mOutputFormat = OUTPUT_FORMAT_DEFAULT;
}
// Release should be OK in any state
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index b46c98a..0779a8e 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -18,7 +18,11 @@
enabled: true,
},
double_loadable: true,
- srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
+ srcs: [
+ "AudioParameter.cpp",
+ "AudioSanitizer.cpp",
+ "TypeConverter.cpp",
+ ],
cflags: [
"-Werror",
"-Wextra",
diff --git a/media/libmediahelper/AudioSanitizer.cpp b/media/libmediahelper/AudioSanitizer.cpp
new file mode 100644
index 0000000..44ca956
--- /dev/null
+++ b/media/libmediahelper/AudioSanitizer.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/AudioSanitizer.h>
+
+namespace android {
+
+ /** returns true if string overflow was prevented by zero termination */
+template <size_t size>
+bool preventStringOverflow(char (&s)[size]) {
+ if (strnlen(s, size) < size) return false;
+ s[size - 1] = '\0';
+ return true;
+}
+
+status_t safetyNetLog(status_t status, const char *bugNumber) {
+ if (status != NO_ERROR && bugNumber != nullptr) {
+ android_errorWriteLog(0x534e4554, bugNumber); // SafetyNet logging
+ }
+ return status;
+}
+
+status_t AudioSanitizer::sanitizeAudioAttributes(
+ audio_attributes_t *attr, const char *bugNumber)
+{
+ status_t status = NO_ERROR;
+ const size_t tagsMaxSize = AUDIO_ATTRIBUTES_TAGS_MAX_SIZE;
+ if (strnlen(attr->tags, tagsMaxSize) >= tagsMaxSize) {
+ status = BAD_VALUE;
+ }
+ attr->tags[tagsMaxSize - 1] = '\0';
+ return safetyNetLog(status, bugNumber);
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t AudioSanitizer::sanitizeEffectDescriptor(
+ effect_descriptor_t *desc, const char *bugNumber)
+{
+ status_t status = NO_ERROR;
+ if (preventStringOverflow(desc->name)
+ | /* always */ preventStringOverflow(desc->implementor)) {
+ status = BAD_VALUE;
+ }
+ return safetyNetLog(status, bugNumber);
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t AudioSanitizer::sanitizeAudioPortConfig(
+ struct audio_port_config *config, const char *bugNumber)
+{
+ status_t status = NO_ERROR;
+ if (config->type == AUDIO_PORT_TYPE_DEVICE &&
+ preventStringOverflow(config->ext.device.address)) {
+ status = BAD_VALUE;
+ }
+ return safetyNetLog(status, bugNumber);
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t AudioSanitizer::sanitizeAudioPort(
+ struct audio_port *port, const char *bugNumber)
+{
+ status_t status = NO_ERROR;
+ if (preventStringOverflow(port->name)) {
+ status = BAD_VALUE;
+ }
+ if (sanitizeAudioPortConfig(&port->active_config) != NO_ERROR) {
+ status = BAD_VALUE;
+ }
+ if (port->type == AUDIO_PORT_TYPE_DEVICE &&
+ preventStringOverflow(port->ext.device.address)) {
+ status = BAD_VALUE;
+ }
+ return safetyNetLog(status, bugNumber);
+}
+
+/** returns BAD_VALUE if sanitization was required. */
+status_t AudioSanitizer::sanitizeAudioPatch(
+ struct audio_patch *patch, const char *bugNumber)
+{
+ status_t status = NO_ERROR;
+ if (patch->num_sources > AUDIO_PATCH_PORTS_MAX) {
+ patch->num_sources = AUDIO_PATCH_PORTS_MAX;
+ status = BAD_VALUE;
+ }
+ if (patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+ patch->num_sinks = AUDIO_PATCH_PORTS_MAX;
+ status = BAD_VALUE;
+ }
+ for (size_t i = 0; i < patch->num_sources; i++) {
+ if (sanitizeAudioPortConfig(&patch->sources[i]) != NO_ERROR) {
+ status = BAD_VALUE;
+ }
+ }
+ for (size_t i = 0; i < patch->num_sinks; i++) {
+ if (sanitizeAudioPortConfig(&patch->sinks[i]) != NO_ERROR) {
+ status = BAD_VALUE;
+ }
+ }
+ return safetyNetLog(status, bugNumber);
+}
+
+}; // namespace android
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index 705959a..876dc45 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -32,18 +32,21 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+ // TODO(mnaganov): Remove from here, use 'audio_is_bluetooth_out_sco_device' function.
+ { "AUDIO_DEVICE_OUT_ALL_SCO", static_cast<audio_devices_t>(AUDIO_DEVICE_OUT_ALL_SCO) },
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+ // TODO(mnaganov): Remove from here, use 'audio_is_a2dp_out_device' function.
+ { "AUDIO_DEVICE_OUT_ALL_A2DP", static_cast<audio_devices_t>(AUDIO_DEVICE_OUT_ALL_A2DP) },
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_HDMI),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+ // TODO(mnaganov): Remove from here, use 'audio_is_usb_out_device' function.
+ { "AUDIO_DEVICE_OUT_ALL_USB", static_cast<audio_devices_t>(AUDIO_DEVICE_OUT_ALL_USB) },
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_LINE),
@@ -72,7 +75,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+ // TODO(mnaganov): Remove from here, use 'audio_is_bluetooth_in_sco_device' function.
+ { "AUDIO_DEVICE_IN_ALL_SCO", static_cast<audio_devices_t>(AUDIO_DEVICE_IN_ALL_SCO) },
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_HDMI),
@@ -85,7 +89,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
- MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_ALL_USB),
+ // TODO(mnaganov): Remove from here, use 'audio_is_usb_in_device' function.
+ { "AUDIO_DEVICE_IN_ALL_USB", static_cast<audio_devices_t>(AUDIO_DEVICE_IN_ALL_USB) },
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LINE),
diff --git a/media/libmediahelper/include/media/AudioSanitizer.h b/media/libmediahelper/include/media/AudioSanitizer.h
new file mode 100644
index 0000000..1475c7b
--- /dev/null
+++ b/media/libmediahelper/include/media/AudioSanitizer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_SANITIZER_H_
+#define ANDROID_AUDIO_SANITIZER_H_
+
+#include <system/audio.h>
+#include <system/audio_effect.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+
+namespace android {
+
+class AudioSanitizer {
+public:
+ static status_t sanitizeAudioAttributes(
+ audio_attributes_t *attr, const char *bugNumber = nullptr);
+
+ static status_t sanitizeEffectDescriptor(
+ effect_descriptor_t *desc, const char *bugNumber = nullptr);
+
+ static status_t sanitizeAudioPortConfig(
+ struct audio_port_config *config, const char *bugNumber = nullptr);
+
+ static status_t sanitizeAudioPort(
+ struct audio_port *port, const char *bugNumber = nullptr);
+
+ static status_t sanitizeAudioPatch(
+ struct audio_patch *patch, const char *bugNumber = nullptr);
+};
+
+}; // namespace android
+
+#endif /*ANDROID_AUDIO_SANITIZER_H_*/
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 03068c7..a63b8b4 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -53,6 +53,7 @@
visibility: [
"//cts/tests/tests/nativemedia/mediametrics",
"//frameworks/av:__subpackages__",
+ "//frameworks/base/apex/media/framework",
"//frameworks/base/core/jni",
"//frameworks/base/media/jni",
],
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 5301f5c..324f4ae 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -16,6 +16,7 @@
"android.hardware.media.c2@1.0",
"android.hardware.media.omx@1.0",
"libbase",
+ "libandroid_net",
"libaudioclient",
"libbinder",
"libcamera_client",
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 016f622..4d90d98 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1063,6 +1063,17 @@
return mStatus = setDataSource_post(p, p->setDataSource(dataSource));
}
+status_t MediaPlayerService::Client::setDataSource(
+ const String8& rtpParams) {
+ player_type playerType = NU_PLAYER;
+ sp<MediaPlayerBase> p = setDataSource_pre(playerType);
+ if (p == NULL) {
+ return NO_INIT;
+ }
+ // now set data source
+ return mStatus = setDataSource_post(p, p->setDataSource(rtpParams));
+}
+
void MediaPlayerService::Client::disconnectNativeWindow_l() {
if (mConnectedWindow != NULL) {
status_t err = nativeWindowDisconnect(
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index a7de3f3..b2f1b9b 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -372,6 +372,7 @@
virtual status_t setDataSource(const sp<IStreamSource> &source);
virtual status_t setDataSource(const sp<IDataSource> &source);
+ virtual status_t setDataSource(const String8& rtpParams);
sp<MediaPlayerBase> setDataSource_pre(player_type playerType);
diff --git a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 41b6f72..09b9145 100644
--- a/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -538,7 +538,7 @@
// The overall duration is the duration of the longest track.
int64_t maxDurationUs = 0;
- String8 timedTextLang;
+ String8 timedTextLang, videoMime;
for (size_t i = 0; i < numTracks; ++i) {
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
if (!trackMeta) {
@@ -575,6 +575,7 @@
}
} else if (!hasVideo && !strncasecmp("video/", mime, 6)) {
hasVideo = true;
+ videoMime = String8(mime);
CHECK(trackMeta->findInt32(kKeyWidth, &videoWidth));
CHECK(trackMeta->findInt32(kKeyHeight, &videoHeight));
@@ -637,6 +638,8 @@
sprintf(tmp, "%d", rotationAngle);
mMetaData.add(METADATA_KEY_VIDEO_ROTATION, String8(tmp));
+ mMetaData.add(METADATA_KEY_VIDEO_CODEC_MIME_TYPE, videoMime);
+
if (videoFrameCount > 0) {
sprintf(tmp, "%d", videoFrameCount);
mMetaData.add(METADATA_KEY_VIDEO_FRAME_COUNT, String8(tmp));
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 7897959..3e7ee50 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -17,6 +17,9 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "StagefrightRecorder"
#include <inttypes.h>
+// TODO/workaround: including base logging now as it conflicts with ADebug.h
+// and it must be included first.
+#include <android-base/logging.h>
#include <utils/Log.h>
#include "WebmWriter.h"
@@ -44,6 +47,7 @@
#include <media/stagefright/CameraSourceTimeLapse.h>
#include <media/stagefright/MPEG2TSWriter.h>
#include <media/stagefright/MPEG4Writer.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaCodecSource.h>
@@ -117,6 +121,11 @@
mAudioSource((audio_source_t)AUDIO_SOURCE_CNT), // initialize with invalid value
mPrivacySensitive(PRIVACY_SENSITIVE_DEFAULT),
mVideoSource(VIDEO_SOURCE_LIST_END),
+ mRTPCVOExtMap(-1),
+ mRTPCVODegrees(0),
+ mRTPSockDscp(0),
+ mRTPSockNetwork(0),
+ mLastSeqNo(0),
mStarted(false),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
mDeviceCallbackEnabled(false),
@@ -567,6 +576,32 @@
// range that a specific encoder supports. The mismatch between the
// the target and requested bit rate will NOT be treated as an error.
mVideoBitRate = bitRate;
+
+ // A new bitrate(TMMBR) should be applied on runtime as well if OutputFormat is RTP_AVP
+ if (mOutputFormat == OUTPUT_FORMAT_RTP_AVP) {
+ // Regular I frames may overload the network so we reduce the bitrate to allow
+ // margins for the I frame overruns.
+ // Still send requested bitrate (TMMBR) in the reply (TMMBN).
+ const float coefficient = 0.8f;
+ mVideoBitRate = (bitRate * coefficient) / 1000 * 1000;
+ }
+ if (mOutputFormat == OUTPUT_FORMAT_RTP_AVP && mStarted && mPauseStartTimeUs == 0) {
+ mVideoEncoderSource->setEncodingBitrate(mVideoBitRate);
+ ARTPWriter* rtpWriter = static_cast<ARTPWriter*>(mWriter.get());
+ rtpWriter->setTMMBNInfo(mOpponentID, bitRate);
+ }
+
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamVideoBitRateMode(int32_t bitRateMode) {
+ ALOGV("setParamVideoBitRateMode: %d", bitRateMode);
+ // TODO: clarify what bitrate mode of -1 is as these start from 0
+ if (bitRateMode < -1) {
+ ALOGE("Unsupported video bitrate mode: %d", bitRateMode);
+ return BAD_VALUE;
+ }
+ mVideoBitRateMode = bitRateMode;
return OK;
}
@@ -776,6 +811,105 @@
return OK;
}
+status_t StagefrightRecorder::setParamRtpLocalIp(const String8 &localIp) {
+ ALOGV("setParamVideoLocalIp: %s", localIp.string());
+
+ mLocalIp.setTo(localIp.string());
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamRtpLocalPort(int32_t localPort) {
+ ALOGV("setParamVideoLocalPort: %d", localPort);
+
+ mLocalPort = localPort;
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamRtpRemoteIp(const String8 &remoteIp) {
+ ALOGV("setParamVideoRemoteIp: %s", remoteIp.string());
+
+ mRemoteIp.setTo(remoteIp.string());
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamRtpRemotePort(int32_t remotePort) {
+ ALOGV("setParamVideoRemotePort: %d", remotePort);
+
+ mRemotePort = remotePort;
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamSelfID(int32_t selfID) {
+ ALOGV("setParamSelfID: %x", selfID);
+
+ mSelfID = selfID;
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamVideoOpponentID(int32_t opponentID) {
+ mOpponentID = opponentID;
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamPayloadType(int32_t payloadType) {
+ ALOGV("setParamPayloadType: %d", payloadType);
+
+ mPayloadType = payloadType;
+
+ if (mStarted && mOutputFormat == OUTPUT_FORMAT_RTP_AVP) {
+ mWriter->updatePayloadType(mPayloadType);
+ }
+
+ return OK;
+}
+
+status_t StagefrightRecorder::setRTPCVOExtMap(int32_t extmap) {
+ ALOGV("setRtpCvoExtMap: %d", extmap);
+
+ mRTPCVOExtMap = extmap;
+ return OK;
+}
+
+status_t StagefrightRecorder::setRTPCVODegrees(int32_t cvoDegrees) {
+ Mutex::Autolock autolock(mLock);
+ ALOGV("setRtpCvoDegrees: %d", cvoDegrees);
+
+ mRTPCVODegrees = cvoDegrees;
+
+ if (mStarted && mOutputFormat == OUTPUT_FORMAT_RTP_AVP) {
+ mWriter->updateCVODegrees(mRTPCVODegrees);
+ }
+
+ return OK;
+}
+
+status_t StagefrightRecorder::setParamRtpDscp(int32_t dscp) {
+ ALOGV("setParamRtpDscp: %d", dscp);
+
+ mRTPSockDscp = dscp;
+ return OK;
+}
+
+status_t StagefrightRecorder::setSocketNetwork(int64_t networkHandle) {
+ ALOGV("setSocketNetwork: %llu", (unsigned long long) networkHandle);
+
+ mRTPSockNetwork = networkHandle;
+ if (mStarted && mOutputFormat == OUTPUT_FORMAT_RTP_AVP) {
+ mWriter->updateSocketNetwork(mRTPSockNetwork);
+ }
+ return OK;
+}
+
+status_t StagefrightRecorder::requestIDRFrame() {
+ status_t ret = BAD_VALUE;
+ if (mVideoEncoderSource != NULL) {
+ ret = mVideoEncoderSource->requestIDRFrame();
+ } else {
+ ALOGV("requestIDRFrame: Encoder not ready");
+ }
+ return ret;
+}
+
status_t StagefrightRecorder::setParameter(
const String8 &key, const String8 &value) {
ALOGV("setParameter: key (%s) => value (%s)", key.string(), value.string());
@@ -844,6 +978,11 @@
if (safe_strtoi32(value.string(), &video_bitrate)) {
return setParamVideoEncodingBitRate(video_bitrate);
}
+ } else if (key == "video-param-bitrate-mode") {
+ int32_t video_bitrate_mode;
+ if (safe_strtoi32(value.string(), &video_bitrate_mode)) {
+ return setParamVideoBitRateMode(video_bitrate_mode);
+ }
} else if (key == "video-param-rotation-angle-degrees") {
int32_t degrees;
if (safe_strtoi32(value.string(), °rees)) {
@@ -884,6 +1023,61 @@
if (safe_strtod(value.string(), &fps)) {
return setParamCaptureFps(fps);
}
+ } else if (key == "rtp-param-local-ip") {
+ return setParamRtpLocalIp(value);
+ } else if (key == "rtp-param-local-port") {
+ int32_t localPort;
+ if (safe_strtoi32(value.string(), &localPort)) {
+ return setParamRtpLocalPort(localPort);
+ }
+ } else if (key == "rtp-param-remote-ip") {
+ return setParamRtpRemoteIp(value);
+ } else if (key == "rtp-param-remote-port") {
+ int32_t remotePort;
+ if (safe_strtoi32(value.string(), &remotePort)) {
+ return setParamRtpRemotePort(remotePort);
+ }
+ } else if (key == "rtp-param-self-id") {
+ int32_t selfID;
+ int64_t temp;
+ if (safe_strtoi64(value.string(), &temp)) {
+ selfID = static_cast<int32_t>(temp);
+ return setParamSelfID(selfID);
+ }
+ } else if (key == "rtp-param-opponent-id") {
+ int32_t opnId;
+ int64_t temp;
+ if (safe_strtoi64(value.string(), &temp)) {
+ opnId = static_cast<int32_t>(temp);
+ return setParamVideoOpponentID(opnId);
+ }
+ } else if (key == "rtp-param-payload-type") {
+ int32_t payloadType;
+ if (safe_strtoi32(value.string(), &payloadType)) {
+ return setParamPayloadType(payloadType);
+ }
+ } else if (key == "rtp-param-ext-cvo-extmap") {
+ int32_t extmap;
+ if (safe_strtoi32(value.string(), &extmap)) {
+ return setRTPCVOExtMap(extmap);
+ }
+ } else if (key == "rtp-param-ext-cvo-degrees") {
+ int32_t degrees;
+ if (safe_strtoi32(value.string(), °rees)) {
+ return setRTPCVODegrees(degrees);
+ }
+ } else if (key == "video-param-request-i-frame") {
+ return requestIDRFrame();
+ } else if (key == "rtp-param-set-socket-dscp") {
+ int32_t dscp;
+ if (safe_strtoi32(value.string(), &dscp)) {
+ return setParamRtpDscp(dscp);
+ }
+ } else if (key == "rtp-param-set-socket-network") {
+ int64_t networkHandle;
+ if (safe_strtoi64(value.string(), &networkHandle)) {
+ return setSocketNetwork(networkHandle);
+ }
} else {
ALOGE("setParameter: failed to find key %s", key.string());
}
@@ -1050,6 +1244,17 @@
sp<MetaData> meta = new MetaData;
int64_t startTimeUs = systemTime() / 1000;
meta->setInt64(kKeyTime, startTimeUs);
+ meta->setInt32(kKeySelfID, mSelfID);
+ meta->setInt32(kKeyPayloadType, mPayloadType);
+ meta->setInt64(kKeySocketNetwork, mRTPSockNetwork);
+ if (mRTPCVOExtMap > 0) {
+ meta->setInt32(kKeyRtpExtMap, mRTPCVOExtMap);
+ meta->setInt32(kKeyRtpCvoDegrees, mRTPCVODegrees);
+ }
+ if (mRTPSockDscp > 0) {
+ meta->setInt32(kKeyRtpDscp, mRTPSockDscp);
+ }
+
status = mWriter->start(meta.get());
break;
}
@@ -1113,7 +1318,7 @@
if (mPrivacySensitive == PRIVACY_SENSITIVE_DEFAULT) {
if (attr.source == AUDIO_SOURCE_VOICE_COMMUNICATION
|| attr.source == AUDIO_SOURCE_CAMCORDER) {
- attr.flags |= AUDIO_FLAG_CAPTURE_PRIVATE;
+ attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_CAPTURE_PRIVATE);
mPrivacySensitive = PRIVACY_SENSITIVE_ENABLED;
} else {
mPrivacySensitive = PRIVACY_SENSITIVE_DISABLED;
@@ -1129,7 +1334,7 @@
return NULL;
}
if (mPrivacySensitive == PRIVACY_SENSITIVE_ENABLED) {
- attr.flags |= AUDIO_FLAG_CAPTURE_PRIVATE;
+ attr.flags = static_cast<audio_flags_mask_t>(attr.flags | AUDIO_FLAG_CAPTURE_PRIVATE);
}
}
@@ -1330,7 +1535,7 @@
mVideoEncoderSource = source;
}
- mWriter = new ARTPWriter(mOutputFd);
+ mWriter = new ARTPWriter(mOutputFd, mLocalIp, mLocalPort, mRemoteIp, mRemotePort, mLastSeqNo);
mWriter->addSource(source);
mWriter->setListener(mListener);
@@ -1784,7 +1989,13 @@
}
}
+ if (mOutputFormat == OUTPUT_FORMAT_RTP_AVP) {
+ // This indicates that a raw image provided to encoder needs to be rotated.
+ format->setInt32("rotation-degrees", mRotationDegrees);
+ }
+
format->setInt32("bitrate", mVideoBitRate);
+ format->setInt32("bitrate-mode", mVideoBitRateMode);
format->setInt32("frame-rate", mFrameRate);
format->setInt32("i-frame-interval", mIFramesIntervalSec);
@@ -2130,6 +2341,7 @@
if (mWriter != NULL) {
err = mWriter->stop();
+ mLastSeqNo = mWriter->getSequenceNum();
mWriter.clear();
}
@@ -2206,6 +2418,8 @@
mVideoHeight = 144;
mFrameRate = -1;
mVideoBitRate = 192000;
+ // Following MediaCodec's default
+ mVideoBitRateMode = BITRATE_MODE_VBR;
mSampleRate = 8000;
mAudioChannels = 1;
mAudioBitRate = 12200;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index a725bee..0362edd 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -119,6 +119,7 @@
int32_t mVideoWidth, mVideoHeight;
int32_t mFrameRate;
int32_t mVideoBitRate;
+ int32_t mVideoBitRateMode;
int32_t mAudioBitRate;
int32_t mAudioChannels;
int32_t mSampleRate;
@@ -138,6 +139,18 @@
int32_t mLongitudex10000;
int32_t mStartTimeOffsetMs;
int32_t mTotalBitRate;
+ String8 mLocalIp;
+ String8 mRemoteIp;
+ int32_t mLocalPort;
+ int32_t mRemotePort;
+ int32_t mSelfID;
+ int32_t mOpponentID;
+ int32_t mPayloadType;
+ int32_t mRTPCVOExtMap;
+ int32_t mRTPCVODegrees;
+ int32_t mRTPSockDscp;
+ int64_t mRTPSockNetwork;
+ uint32_t mLastSeqNo;
int64_t mDurationRecordedUs;
int64_t mStartedRecordingUs;
@@ -205,6 +218,7 @@
status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
status_t setParamCaptureFps(double fps);
status_t setParamVideoEncodingBitRate(int32_t bitRate);
+ status_t setParamVideoBitRateMode(int32_t bitRateMode);
status_t setParamVideoIFramesInterval(int32_t seconds);
status_t setParamVideoEncoderProfile(int32_t profile);
status_t setParamVideoEncoderLevel(int32_t level);
@@ -219,6 +233,18 @@
status_t setParamMovieTimeScale(int32_t timeScale);
status_t setParamGeoDataLongitude(int64_t longitudex10000);
status_t setParamGeoDataLatitude(int64_t latitudex10000);
+ status_t setParamRtpLocalIp(const String8 &localIp);
+ status_t setParamRtpLocalPort(int32_t localPort);
+ status_t setParamRtpRemoteIp(const String8 &remoteIp);
+ status_t setParamRtpRemotePort(int32_t remotePort);
+ status_t setParamSelfID(int32_t selfID);
+ status_t setParamVideoOpponentID(int32_t opponentID);
+ status_t setParamPayloadType(int32_t payloadType);
+ status_t setRTPCVOExtMap(int32_t extmap);
+ status_t setRTPCVODegrees(int32_t cvoDegrees);
+ status_t setParamRtpDscp(int32_t dscp);
+ status_t setSocketNetwork(int64_t networkHandle);
+ status_t requestIDRFrame();
void clipVideoBitRate();
void clipVideoFrameRate();
void clipVideoFrameWidth();
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
index 436cb31..8d94698 100644
--- a/media/libmediaplayerservice/include/MediaPlayerInterface.h
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -60,7 +60,7 @@
#define DEFAULT_AUDIOSINK_SAMPLERATE 44100
// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
-#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
+#define CHANNEL_MASK_USE_CHANNEL_ORDER AUDIO_CHANNEL_NONE
// duration below which we do not allow deep audio buffering
#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
@@ -183,6 +183,10 @@
return INVALID_OPERATION;
}
+ virtual status_t setDataSource(const String8& /* rtpParams */) {
+ return INVALID_OPERATION;
+ }
+
// pass the buffered IGraphicBufferProducer to the media player service
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
index 684ba2e..7bee002 100644
--- a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
@@ -52,18 +52,19 @@
if (binder == NULL) {
ALOGW("could not get the power manager service");
} else {
- mPowerManager = interface_cast<IPowerManager>(binder);
+ mPowerManager = interface_cast<os::IPowerManager>(binder);
binder->linkToDeath(mDeathRecipient);
}
}
if (mPowerManager != NULL) {
sp<IBinder> binder = new BBinder();
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- status_t status = mPowerManager->acquireWakeLock(
- POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder, String16("AWakeLock"), String16("media"));
+ binder::Status status = mPowerManager->acquireWakeLock(
+ binder, POWERMANAGER_PARTIAL_WAKE_LOCK,
+ String16("AWakeLock"), String16("media"),
+ {} /* workSource */, {} /* historyTag */);
IPCThreadState::self()->restoreCallingIdentity(token);
- if (status == NO_ERROR) {
+ if (status.isOk()) {
mWakeLockToken = binder;
mWakeLockCount++;
return true;
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.h b/media/libmediaplayerservice/nuplayer/AWakeLock.h
index 323e7d7..8aa3b41 100644
--- a/media/libmediaplayerservice/nuplayer/AWakeLock.h
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.h
@@ -18,7 +18,7 @@
#define A_WAKELOCK_H_
#include <media/stagefright/foundation/ABase.h>
-#include <powermanager/IPowerManager.h>
+#include <android/os/IPowerManager.h>
#include <utils/RefBase.h>
namespace android {
@@ -37,7 +37,7 @@
virtual ~AWakeLock();
private:
- sp<IPowerManager> mPowerManager;
+ sp<os::IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
uint32_t mWakeLockCount;
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index 32c97cf..f5e44c7 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -14,6 +14,7 @@
"NuPlayerRenderer.cpp",
"NuPlayerStreamListener.cpp",
"RTSPSource.cpp",
+ "RTPSource.cpp",
"StreamingSource.cpp",
],
@@ -30,6 +31,7 @@
"frameworks/av/media/libstagefright/mpeg2ts",
"frameworks/av/media/libstagefright/rtsp",
"frameworks/av/media/libstagefright/timedtext",
+ "frameworks/native/include/android",
],
cflags: [
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index c1c4b55..47362ef 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -31,6 +31,7 @@
#include "NuPlayerDriver.h"
#include "NuPlayerRenderer.h"
#include "NuPlayerSource.h"
+#include "RTPSource.h"
#include "RTSPSource.h"
#include "StreamingSource.h"
#include "GenericSource.h"
@@ -368,6 +369,18 @@
return err;
}
+void NuPlayer::setDataSourceAsync(const String8& rtpParams) {
+ ALOGD("setDataSourceAsync for RTP = %s", rtpParams.string());
+ sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
+
+ sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
+ sp<Source> source = new RTPSource(notify, rtpParams);
+
+ msg->setObject("source", source);
+ msg->post();
+ mDataSourceType = DATA_SOURCE_TYPE_RTP;
+}
+
void NuPlayer::prepareAsync() {
ALOGV("prepareAsync");
@@ -1689,6 +1702,12 @@
updateRebufferingTimer(false /* stopping */, false /* exiting */);
}
+void NuPlayer::setTargetBitrate(int bitrate) {
+ if (mSource != NULL) {
+ mSource->setTargetBitrate(bitrate);
+ }
+}
+
void NuPlayer::onPause() {
updatePlaybackTimer(true /* stopping */, "onPause");
@@ -1915,6 +1934,11 @@
format->setInt32("priority", 0 /* realtime */);
+ if (mDataSourceType == DATA_SOURCE_TYPE_RTP) {
+ ALOGV("instantiateDecoder: set decoder error free on stream corrupt.");
+ format->setInt32("corrupt-free", true);
+ }
+
if (!audio) {
AString mime;
CHECK(format->findString("mime", &mime));
@@ -2715,6 +2739,14 @@
break;
}
+ case Source::kWhatIMSRxNotice:
+ {
+ sp<AMessage> IMSRxNotice;
+ CHECK(msg->findMessage("message", &IMSRxNotice));
+ sendIMSRxNotice(IMSRxNotice);
+ break;
+ }
+
default:
TRESPASS();
}
@@ -2817,11 +2849,74 @@
}
}
+void NuPlayer::sendIMSRxNotice(const sp<AMessage> &msg) {
+ int32_t payloadType;
+
+ CHECK(msg->findInt32("payload-type", &payloadType));
+
+ Parcel in;
+ in.writeInt32(payloadType);
+
+ switch (payloadType) {
+ case NuPlayer::RTPSource::RTCP_TSFB: // RTCP TSFB
+ case NuPlayer::RTPSource::RTCP_PSFB: // RTCP PSFB
+ case NuPlayer::RTPSource::RTP_AUTODOWN:
+ {
+ int32_t feedbackType, id;
+ CHECK(msg->findInt32("feedback-type", &feedbackType));
+ CHECK(msg->findInt32("sender", &id));
+ in.writeInt32(feedbackType);
+ in.writeInt32(id);
+ if (payloadType == NuPlayer::RTPSource::RTCP_TSFB) {
+ int32_t bitrate;
+ CHECK(msg->findInt32("bit-rate", &bitrate));
+ in.writeInt32(bitrate);
+ }
+ break;
+ }
+ case NuPlayer::RTPSource::RTP_QUALITY:
+ {
+ int32_t feedbackType, bitrate;
+ int32_t highestSeqNum, baseSeqNum, prevExpected;
+ int32_t numBufRecv, prevNumBufRecv;
+ CHECK(msg->findInt32("feedback-type", &feedbackType));
+ CHECK(msg->findInt32("bit-rate", &bitrate));
+ CHECK(msg->findInt32("highest-seq-num", &highestSeqNum));
+ CHECK(msg->findInt32("base-seq-num", &baseSeqNum));
+ CHECK(msg->findInt32("prev-expected", &prevExpected));
+ CHECK(msg->findInt32("num-buf-recv", &numBufRecv));
+ CHECK(msg->findInt32("prev-num-buf-recv", &prevNumBufRecv));
+ in.writeInt32(feedbackType);
+ in.writeInt32(bitrate);
+ in.writeInt32(highestSeqNum);
+ in.writeInt32(baseSeqNum);
+ in.writeInt32(prevExpected);
+ in.writeInt32(numBufRecv);
+ in.writeInt32(prevNumBufRecv);
+ break;
+ }
+ case NuPlayer::RTPSource::RTP_CVO:
+ {
+ int32_t cvo;
+ CHECK(msg->findInt32("cvo", &cvo));
+ in.writeInt32(cvo);
+ break;
+ }
+ default:
+ break;
+ }
+
+ notifyListener(MEDIA_IMS_RX_NOTICE, 0, 0, &in);
+}
+
const char *NuPlayer::getDataSourceType() {
switch (mDataSourceType) {
case DATA_SOURCE_TYPE_HTTP_LIVE:
return "HTTPLive";
+ case DATA_SOURCE_TYPE_RTP:
+ return "RTP";
+
case DATA_SOURCE_TYPE_RTSP:
return "RTSP";
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index ef4354c..adb7075 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -51,6 +51,8 @@
void setDataSourceAsync(const sp<DataSource> &source);
+ void setDataSourceAsync(const String8& rtpParams);
+
status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
status_t setBufferingSettings(const BufferingSettings& buffering);
@@ -100,6 +102,8 @@
void updateInternalTimers();
+ void setTargetBitrate(int bitrate /* bps */);
+
protected:
virtual ~NuPlayer();
@@ -117,6 +121,7 @@
struct GenericSource;
struct HTTPLiveSource;
struct Renderer;
+ struct RTPSource;
struct RTSPSource;
struct StreamingSource;
struct Action;
@@ -257,6 +262,7 @@
typedef enum {
DATA_SOURCE_TYPE_NONE,
DATA_SOURCE_TYPE_HTTP_LIVE,
+ DATA_SOURCE_TYPE_RTP,
DATA_SOURCE_TYPE_RTSP,
DATA_SOURCE_TYPE_GENERIC_URL,
DATA_SOURCE_TYPE_GENERIC_FD,
@@ -334,6 +340,7 @@
void sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex);
void sendTimedMetaData(const sp<ABuffer> &buffer);
void sendTimedTextData(const sp<ABuffer> &buffer);
+ void sendIMSRxNotice(const sp<AMessage> &msg);
void writeTrackInfo(Parcel* reply, const sp<AMessage>& format) const;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index f734439..8628edc 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -1050,7 +1050,7 @@
uint32_t flags = 0;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
- int32_t eos, csd;
+ int32_t eos, csd, cvo;
// we do not expect SYNCFRAME for decoder
if (buffer->meta()->findInt32("eos", &eos) && eos) {
flags |= MediaCodec::BUFFER_FLAG_EOS;
@@ -1058,6 +1058,24 @@
flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
}
+ if (buffer->meta()->findInt32("cvo", (int32_t*)&cvo)) {
+ ALOGV("[%s] cvo(%d) found at %lld us", mComponentName.c_str(), cvo, (long long)timeUs);
+ switch (cvo) {
+ case 0:
+ codecBuffer->meta()->setInt32("cvo", MediaCodec::CVO_DEGREE_0);
+ break;
+ case 1:
+ codecBuffer->meta()->setInt32("cvo", MediaCodec::CVO_DEGREE_90);
+ break;
+ case 2:
+ codecBuffer->meta()->setInt32("cvo", MediaCodec::CVO_DEGREE_180);
+ break;
+ case 3:
+ codecBuffer->meta()->setInt32("cvo", MediaCodec::CVO_DEGREE_270);
+ break;
+ }
+ }
+
// Modular DRM
MediaBufferBase *mediaBuf = NULL;
NuPlayerDrm::CryptoInfo *cryptInfo = NULL;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index dc144b2..2a50fc2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -218,6 +218,26 @@
return mAsyncResult;
}
+status_t NuPlayerDriver::setDataSource(const String8& rtpParams) {
+ ALOGV("setDataSource(%p) rtp source", this);
+ Mutex::Autolock autoLock(mLock);
+
+ if (mState != STATE_IDLE) {
+ return INVALID_OPERATION;
+ }
+
+ mState = STATE_SET_DATASOURCE_PENDING;
+
+ mPlayer->setDataSourceAsync(rtpParams);
+
+ while (mState == STATE_SET_DATASOURCE_PENDING) {
+ mCondition.wait(mLock);
+ }
+
+ return mAsyncResult;
+}
+
+
status_t NuPlayerDriver::setVideoSurfaceTexture(
const sp<IGraphicBufferProducer> &bufferProducer) {
ALOGV("setVideoSurfaceTexture(%p)", this);
@@ -797,7 +817,11 @@
}
status_t NuPlayerDriver::setParameter(
- int /* key */, const Parcel & /* request */) {
+ int key, const Parcel &request ) {
+ if (key == KEY_PARAMETER_RTP_ATTRIBUTES) {
+ mPlayer->setTargetBitrate(request.readInt32());
+ return OK;
+ }
return INVALID_OPERATION;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index f4b1968..55a0fad 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -43,6 +43,8 @@
virtual status_t setDataSource(const sp<DataSource>& dataSource);
+ virtual status_t setDataSource(const String8& rtpParams);
+
virtual status_t setVideoSurfaceTexture(
const sp<IGraphicBufferProducer> &bufferProducer);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index c30f048..13e1933 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -1928,11 +1928,12 @@
int32_t numChannels;
CHECK(format->findInt32("channel-count", &numChannels));
- int32_t channelMask;
- if (!format->findInt32("channel-mask", &channelMask)) {
- // signal to the AudioSink to derive the mask from count.
- channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
- }
+ int32_t rawChannelMask;
+ audio_channel_mask_t channelMask =
+ format->findInt32("channel-mask", &rawChannelMask) ?
+ static_cast<audio_channel_mask_t>(rawChannelMask)
+ // signal to the AudioSink to derive the mask from count.
+ : CHANNEL_MASK_USE_CHANNEL_ORDER;
int32_t sampleRate;
CHECK(format->findInt32("sample-rate", &sampleRate));
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index f137c52..bf6b539 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -58,6 +58,7 @@
kWhatInstantiateSecureDecoders,
// Modular DRM
kWhatDrmInfo,
+ kWhatIMSRxNotice,
};
// The provides message is used to notify the player about various
@@ -131,6 +132,8 @@
virtual void setOffloadAudio(bool /* offload */) {}
+ virtual void setTargetBitrate(int32_t) {}
+
// Modular DRM
virtual status_t prepareDrm(
const uint8_t /*uuid*/[16], const Vector<uint8_t> &/*drmSessionId*/,
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.cpp b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
new file mode 100644
index 0000000..b1901e8
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.cpp
@@ -0,0 +1,791 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPSource"
+#include <utils/Log.h>
+
+#include "RTPSource.h"
+
+
+
+
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <string.h>
+
+namespace android {
+
+const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+static int32_t kMaxAllowedStaleAccessUnits = 20;
+
+NuPlayer::RTPSource::RTPSource(
+ const sp<AMessage> ¬ify,
+ const String8& rtpParams)
+ : Source(notify),
+ mRTPParams(rtpParams),
+ mFlags(0),
+ mState(DISCONNECTED),
+ mFinalResult(OK),
+ mBuffering(false),
+ mInPreparationPhase(true),
+ mRTPConn(new ARTPConnection(ARTPConnection::kViLTEConnection)),
+ mEOSTimeoutAudio(0),
+ mEOSTimeoutVideo(0),
+ mLastCVOUpdated(-1) {
+ ALOGD("RTPSource initialized with rtpParams=%s", rtpParams.string());
+}
+
+NuPlayer::RTPSource::~RTPSource() {
+ if (mLooper != NULL) {
+ mLooper->unregisterHandler(id());
+ mLooper->unregisterHandler(mRTPConn->id());
+ mLooper->stop();
+ }
+}
+
+status_t NuPlayer::RTPSource::getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ *buffering = mBufferingSettings;
+ return OK;
+}
+
+status_t NuPlayer::RTPSource::setBufferingSettings(const BufferingSettings& buffering) {
+ Mutex::Autolock _l(mBufferingSettingsLock);
+ mBufferingSettings = buffering;
+ return OK;
+}
+
+void NuPlayer::RTPSource::prepareAsync() {
+ if (mLooper == NULL) {
+ mLooper = new ALooper;
+ mLooper->setName("rtp");
+ mLooper->start();
+
+ mLooper->registerHandler(this);
+ mLooper->registerHandler(mRTPConn);
+ }
+
+ CHECK_EQ(mState, (int)DISCONNECTED);
+ mState = CONNECTING;
+
+ setParameters(mRTPParams);
+
+ TrackInfo *info = NULL;
+ unsigned i;
+ for (i = 0; i < mTracks.size(); i++) {
+ info = &mTracks.editItemAt(i);
+
+ if (info == NULL)
+ break;
+
+ AString sdp;
+ ASessionDescription::SDPStringFactory(sdp, info->mLocalIp,
+ info->mIsAudio, info->mLocalPort, info->mPayloadType, info->mAS, info->mCodecName,
+ NULL, info->mWidth, info->mHeight, info->mCVOExtMap);
+ ALOGD("RTPSource SDP =>\n%s", sdp.c_str());
+
+ sp<ASessionDescription> desc = new ASessionDescription;
+ bool isValidSdp = desc->setTo(sdp.c_str(), sdp.size());
+ ALOGV("RTPSource isValidSdp => %d", isValidSdp);
+
+ int sockRtp, sockRtcp;
+ ARTPConnection::MakeRTPSocketPair(&sockRtp, &sockRtcp, info->mLocalIp, info->mRemoteIp,
+ info->mLocalPort, info->mRemotePort, info->mSocketNetwork);
+
+ sp<AMessage> notify = new AMessage('accu', this);
+
+ ALOGV("RTPSource addStream. track-index=%d", i);
+ notify->setSize("trackIndex", i);
+ // index(i) should be started from 1. 0 is reserved for [root]
+ mRTPConn->addStream(sockRtp, sockRtcp, desc, i + 1, notify, false);
+ mRTPConn->setSelfID(info->mSelfID);
+ mRTPConn->setJbTime(
+ (info->mJbTimeMs <= 3000 && info->mJbTimeMs >= 40) ? info->mJbTimeMs : 300);
+
+ info->mRTPSocket = sockRtp;
+ info->mRTCPSocket = sockRtcp;
+ info->mFirstSeqNumInSegment = 0;
+ info->mNewSegment = true;
+ info->mAllowedStaleAccessUnits = kMaxAllowedStaleAccessUnits;
+ info->mRTPAnchor = 0;
+ info->mNTPAnchorUs = -1;
+ info->mNormalPlayTimeRTP = 0;
+ info->mNormalPlayTimeUs = 0ll;
+
+ // index(i) should be started from 1. 0 is reserved for [root]
+ info->mPacketSource = new APacketSource(desc, i + 1);
+
+ int32_t timeScale;
+ sp<MetaData> format = getTrackFormat(i, &timeScale);
+ sp<AnotherPacketSource> source = new AnotherPacketSource(format);
+
+ if (info->mIsAudio) {
+ mAudioTrack = source;
+ info->mTimeScale = 16000;
+ } else {
+ mVideoTrack = source;
+ info->mTimeScale = 90000;
+ }
+
+ info->mSource = source;
+ info->mRTPTime = 0;
+ info->mNormalPlaytimeUs = 0;
+ info->mNPTMappingValid = false;
+ }
+
+ if (mInPreparationPhase) {
+ mInPreparationPhase = false;
+ notifyPrepared();
+ }
+}
+
+void NuPlayer::RTPSource::start() {
+}
+
+void NuPlayer::RTPSource::pause() {
+ mState = PAUSED;
+}
+
+void NuPlayer::RTPSource::resume() {
+ mState = CONNECTING;
+}
+
+void NuPlayer::RTPSource::stop() {
+ if (mLooper == NULL) {
+ return;
+ }
+ sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
+
+ sp<AMessage> dummy;
+ msg->postAndAwaitResponse(&dummy);
+}
+
+status_t NuPlayer::RTPSource::feedMoreTSData() {
+ Mutex::Autolock _l(mBufferingLock);
+ return mFinalResult;
+}
+
+sp<MetaData> NuPlayer::RTPSource::getFormatMeta(bool audio) {
+ sp<AnotherPacketSource> source = getSource(audio);
+
+ if (source == NULL) {
+ return NULL;
+ }
+
+ return source->getFormat();
+}
+
+bool NuPlayer::RTPSource::haveSufficientDataOnAllTracks() {
+ // We're going to buffer at least 2 secs worth data on all tracks before
+ // starting playback (both at startup and after a seek).
+
+ static const int64_t kMinDurationUs = 2000000ll;
+
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ if ((mAudioTrack != NULL && mAudioTrack->isFinished(mediaDurationUs))
+ || (mVideoTrack != NULL && mVideoTrack->isFinished(mediaDurationUs))) {
+ return true;
+ }
+
+ status_t err;
+ int64_t durationUs;
+ if (mAudioTrack != NULL
+ && (durationUs = mAudioTrack->getBufferedDurationUs(&err))
+ < kMinDurationUs
+ && err == OK) {
+ ALOGV("audio track doesn't have enough data yet. (%.2f secs buffered)",
+ durationUs / 1E6);
+ return false;
+ }
+
+ if (mVideoTrack != NULL
+ && (durationUs = mVideoTrack->getBufferedDurationUs(&err))
+ < kMinDurationUs
+ && err == OK) {
+ ALOGV("video track doesn't have enough data yet. (%.2f secs buffered)",
+ durationUs / 1E6);
+ return false;
+ }
+
+ return true;
+}
+
+status_t NuPlayer::RTPSource::dequeueAccessUnit(
+ bool audio, sp<ABuffer> *accessUnit) {
+
+ sp<AnotherPacketSource> source = getSource(audio);
+
+ if (mState == PAUSED) {
+ ALOGV("-EWOULDBLOCK");
+ return -EWOULDBLOCK;
+ }
+
+ status_t finalResult;
+ if (!source->hasBufferAvailable(&finalResult)) {
+ if (finalResult == OK) {
+ int64_t mediaDurationUs = 0;
+ getDuration(&mediaDurationUs);
+ sp<AnotherPacketSource> otherSource = getSource(!audio);
+ status_t otherFinalResult;
+
+ // If other source already signaled EOS, this source should also signal EOS
+ if (otherSource != NULL &&
+ !otherSource->hasBufferAvailable(&otherFinalResult) &&
+ otherFinalResult == ERROR_END_OF_STREAM) {
+ source->signalEOS(ERROR_END_OF_STREAM);
+ return ERROR_END_OF_STREAM;
+ }
+
+ // If this source has detected near end, give it some time to retrieve more
+ // data before signaling EOS
+ if (source->isFinished(mediaDurationUs)) {
+ int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
+ if (eosTimeout == 0) {
+ setEOSTimeout(audio, ALooper::GetNowUs());
+ } else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) {
+ setEOSTimeout(audio, 0);
+ source->signalEOS(ERROR_END_OF_STREAM);
+ return ERROR_END_OF_STREAM;
+ }
+ return -EWOULDBLOCK;
+ }
+
+ if (!(otherSource != NULL && otherSource->isFinished(mediaDurationUs))) {
+ // We should not enter buffering mode
+ // if any of the sources already have detected EOS.
+ // TODO: needs to be checked whether below line is needed or not.
+ // startBufferingIfNecessary();
+ }
+
+ return -EWOULDBLOCK;
+ }
+ return finalResult;
+ }
+
+ setEOSTimeout(audio, 0);
+
+ finalResult = source->dequeueAccessUnit(accessUnit);
+ if (finalResult != OK) {
+ return finalResult;
+ }
+
+ int32_t cvo;
+ if ((*accessUnit) != NULL && (*accessUnit)->meta()->findInt32("cvo", &cvo) &&
+ cvo != mLastCVOUpdated) {
+ sp<AMessage> msg = new AMessage();
+ msg->setInt32("payload-type", NuPlayer::RTPSource::RTP_CVO);
+ msg->setInt32("cvo", cvo);
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatIMSRxNotice);
+ notify->setMessage("message", msg);
+ notify->post();
+
+ ALOGV("notify cvo updated (%d)->(%d) to upper layer", mLastCVOUpdated, cvo);
+ mLastCVOUpdated = cvo;
+ }
+
+ return finalResult;
+}
+
+sp<AnotherPacketSource> NuPlayer::RTPSource::getSource(bool audio) {
+ return audio ? mAudioTrack : mVideoTrack;
+}
+
+void NuPlayer::RTPSource::setEOSTimeout(bool audio, int64_t timeout) {
+ if (audio) {
+ mEOSTimeoutAudio = timeout;
+ } else {
+ mEOSTimeoutVideo = timeout;
+ }
+}
+
+status_t NuPlayer::RTPSource::getDuration(int64_t *durationUs) {
+ *durationUs = 0ll;
+
+ int64_t audioDurationUs;
+ if (mAudioTrack != NULL
+ && mAudioTrack->getFormat()->findInt64(
+ kKeyDuration, &audioDurationUs)
+ && audioDurationUs > *durationUs) {
+ *durationUs = audioDurationUs;
+ }
+
+ int64_t videoDurationUs;
+ if (mVideoTrack != NULL
+ && mVideoTrack->getFormat()->findInt64(
+ kKeyDuration, &videoDurationUs)
+ && videoDurationUs > *durationUs) {
+ *durationUs = videoDurationUs;
+ }
+
+ return OK;
+}
+
+status_t NuPlayer::RTPSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
+ ALOGV("RTPSource::seekTo=%d, mode=%d", (int)seekTimeUs, mode);
+ return OK;
+}
+
+void NuPlayer::RTPSource::schedulePollBuffering() {
+ sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+ msg->post(kBufferingPollIntervalUs); // 1 second intervals
+}
+
+void NuPlayer::RTPSource::onPollBuffering() {
+ schedulePollBuffering();
+}
+
+bool NuPlayer::RTPSource::isRealTime() const {
+ ALOGD("RTPSource::isRealTime=%d", true);
+ return true;
+}
+
+void NuPlayer::RTPSource::onMessageReceived(const sp<AMessage> &msg) {
+ ALOGV("onMessageReceived =%d", msg->what());
+
+ switch (msg->what()) {
+ case kWhatAccessUnitComplete:
+ {
+ if (mState == CONNECTING) {
+ mState = CONNECTED;
+ }
+
+ int32_t timeUpdate;
+ //"time-update" raised from ARTPConnection::parseSR()
+ if (msg->findInt32("time-update", &timeUpdate) && timeUpdate) {
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ uint32_t rtpTime;
+ uint64_t ntpTime;
+ CHECK(msg->findInt32("rtp-time", (int32_t *)&rtpTime));
+ CHECK(msg->findInt64("ntp-time", (int64_t *)&ntpTime));
+
+ onTimeUpdate(trackIndex, rtpTime, ntpTime);
+ break;
+ }
+
+ int32_t firstRTCP;
+ if (msg->findInt32("first-rtcp", &firstRTCP)) {
+ // There won't be an access unit here, it's just a notification
+ // that the data communication worked since we got the first
+ // rtcp packet.
+ ALOGV("first-rtcp");
+ break;
+ }
+
+ int32_t IMSRxNotice;
+ if (msg->findInt32("rtcp-event", &IMSRxNotice)) {
+ int32_t payloadType, feedbackType;
+ CHECK(msg->findInt32("payload-type", &payloadType));
+ CHECK(msg->findInt32("feedback-type", &feedbackType));
+
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatIMSRxNotice);
+ notify->setMessage("message", msg);
+ notify->post();
+
+ ALOGV("IMSRxNotice \t\t payload : %d feedback : %d",
+ payloadType, feedbackType);
+ break;
+ }
+
+ size_t trackIndex;
+ CHECK(msg->findSize("trackIndex", &trackIndex));
+
+ sp<ABuffer> accessUnit;
+ if (msg->findBuffer("access-unit", &accessUnit) == false) {
+ break;
+ }
+
+ int32_t damaged;
+ if (accessUnit->meta()->findInt32("damaged", &damaged)
+ && damaged) {
+ ALOGD("dropping damaged access unit.");
+ break;
+ }
+
+ // Implicitly assert on valid trackIndex here, which we ensure by
+ // never removing tracks.
+ TrackInfo *info = &mTracks.editItemAt(trackIndex);
+
+ sp<AnotherPacketSource> source = info->mSource;
+ if (source != NULL) {
+ uint32_t rtpTime;
+ CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+
+ /* AnotherPacketSource make an assertion if there is no ntp provided
+ RTPSource should provide ntpUs all the times.
+ if (!info->mNPTMappingValid) {
+ // This is a live stream, we didn't receive any normal
+ // playtime mapping. We won't map to npt time.
+ source->queueAccessUnit(accessUnit);
+ break;
+ }
+
+ int64_t nptUs =
+ ((double)rtpTime - (double)info->mRTPTime)
+ / info->mTimeScale
+ * 1000000ll
+ + info->mNormalPlaytimeUs;
+
+ */
+ accessUnit->meta()->setInt64("timeUs", ALooper::GetNowUs());
+
+ source->queueAccessUnit(accessUnit);
+ }
+
+ break;
+ }
+ case kWhatDisconnect:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ TrackInfo *info = &mTracks.editItemAt(i);
+
+ if (info->mIsAudio) {
+ mAudioTrack->signalEOS(ERROR_END_OF_STREAM);
+ mAudioTrack = NULL;
+ ALOGV("mAudioTrack disconnected");
+ } else {
+ mVideoTrack->signalEOS(ERROR_END_OF_STREAM);
+ mVideoTrack = NULL;
+ ALOGV("mVideoTrack disconnected");
+ }
+
+ mRTPConn->removeStream(info->mRTPSocket, info->mRTCPSocket);
+ close(info->mRTPSocket);
+ close(info->mRTCPSocket);
+ }
+
+ mTracks.clear();
+ mFirstAccessUnit = true;
+ mAllTracksHaveTime = false;
+ mNTPAnchorUs = -1;
+ mMediaAnchorUs = -1;
+ mLastMediaTimeUs = -1;
+ mNumAccessUnitsReceived = 0;
+ mReceivedFirstRTCPPacket = false;
+ mReceivedFirstRTPPacket = false;
+ mPausing = false;
+ mPauseGeneration = 0;
+
+ (new AMessage)->postReply(replyID);
+
+ break;
+ }
+ case kWhatPollBuffering:
+ break;
+ default:
+ TRESPASS();
+ }
+}
+
+void NuPlayer::RTPSource::setTargetBitrate(int32_t bitrate) {
+ mRTPConn->setTargetBitrate(bitrate);
+}
+
+void NuPlayer::RTPSource::onTimeUpdate(int32_t trackIndex, uint32_t rtpTime, uint64_t ntpTime) {
+ ALOGV("onTimeUpdate track %d, rtpTime = 0x%08x, ntpTime = %#016llx",
+ trackIndex, rtpTime, (long long)ntpTime);
+
+ // convert ntpTime in Q32 seconds to microseconds. Note: this will not lose precision
+ // because ntpTimeUs is at most 52 bits (double holds 53 bits)
+ int64_t ntpTimeUs = (int64_t)(ntpTime * 1E6 / (1ll << 32));
+
+ TrackInfo *track = &mTracks.editItemAt(trackIndex);
+
+ track->mRTPAnchor = rtpTime;
+ track->mNTPAnchorUs = ntpTimeUs;
+
+ if (mNTPAnchorUs < 0) {
+ mNTPAnchorUs = ntpTimeUs;
+ mMediaAnchorUs = mLastMediaTimeUs;
+ }
+
+ if (!mAllTracksHaveTime) {
+ bool allTracksHaveTime = (mTracks.size() > 0);
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ TrackInfo *track = &mTracks.editItemAt(i);
+ if (track->mNTPAnchorUs < 0) {
+ allTracksHaveTime = false;
+ break;
+ }
+ }
+ if (allTracksHaveTime) {
+ mAllTracksHaveTime = true;
+ ALOGI("Time now established for all tracks.");
+ }
+ }
+ if (mAllTracksHaveTime && dataReceivedOnAllChannels()) {
+ // Time is now established, lets start timestamping immediately
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ TrackInfo *trackInfo = &mTracks.editItemAt(i);
+ while (!trackInfo->mPackets.empty()) {
+ sp<ABuffer> accessUnit = *trackInfo->mPackets.begin();
+ trackInfo->mPackets.erase(trackInfo->mPackets.begin());
+
+ if (addMediaTimestamp(i, trackInfo, accessUnit)) {
+ postQueueAccessUnit(i, accessUnit);
+ }
+ }
+ }
+ }
+}
+
+bool NuPlayer::RTPSource::addMediaTimestamp(
+ int32_t trackIndex, const TrackInfo *track,
+ const sp<ABuffer> &accessUnit) {
+
+ uint32_t rtpTime;
+ CHECK(accessUnit->meta()->findInt32(
+ "rtp-time", (int32_t *)&rtpTime));
+
+ int64_t relRtpTimeUs =
+ (((int64_t)rtpTime - (int64_t)track->mRTPAnchor) * 1000000ll)
+ / track->mTimeScale;
+
+ int64_t ntpTimeUs = track->mNTPAnchorUs + relRtpTimeUs;
+
+ int64_t mediaTimeUs = mMediaAnchorUs + ntpTimeUs - mNTPAnchorUs;
+
+ if (mediaTimeUs > mLastMediaTimeUs) {
+ mLastMediaTimeUs = mediaTimeUs;
+ }
+
+ if (mediaTimeUs < 0) {
+ ALOGV("dropping early accessUnit.");
+ return false;
+ }
+
+ ALOGV("track %d rtpTime=%u mediaTimeUs = %lld us (%.2f secs)",
+ trackIndex, rtpTime, (long long)mediaTimeUs, mediaTimeUs / 1E6);
+
+ accessUnit->meta()->setInt64("timeUs", mediaTimeUs);
+
+ return true;
+}
+
+bool NuPlayer::RTPSource::dataReceivedOnAllChannels() {
+ TrackInfo *track;
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ track = &mTracks.editItemAt(i);
+ if (track->mPackets.empty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void NuPlayer::RTPSource::postQueueAccessUnit(
+ size_t trackIndex, const sp<ABuffer> &accessUnit) {
+ sp<AMessage> msg = new AMessage(kWhatAccessUnit, this);
+ msg->setInt32("what", kWhatAccessUnit);
+ msg->setSize("trackIndex", trackIndex);
+ msg->setBuffer("accessUnit", accessUnit);
+ msg->post();
+}
+
+void NuPlayer::RTPSource::postQueueEOS(size_t trackIndex, status_t finalResult) {
+ sp<AMessage> msg = new AMessage(kWhatEOS, this);
+ msg->setInt32("what", kWhatEOS);
+ msg->setSize("trackIndex", trackIndex);
+ msg->setInt32("finalResult", finalResult);
+ msg->post();
+}
+
+sp<MetaData> NuPlayer::RTPSource::getTrackFormat(size_t index, int32_t *timeScale) {
+ CHECK_GE(index, 0u);
+ CHECK_LT(index, mTracks.size());
+
+ const TrackInfo &info = mTracks.itemAt(index);
+
+ *timeScale = info.mTimeScale;
+
+ return info.mPacketSource->getFormat();
+}
+
+void NuPlayer::RTPSource::onConnected() {
+ ALOGV("onConnected");
+ mState = CONNECTED;
+}
+
+void NuPlayer::RTPSource::onDisconnected(const sp<AMessage> &msg) {
+ if (mState == DISCONNECTED) {
+ return;
+ }
+
+ status_t err;
+ CHECK(msg->findInt32("result", &err));
+ CHECK_NE(err, (status_t)OK);
+
+// mLooper->unregisterHandler(mHandler->id());
+// mHandler.clear();
+
+ if (mState == CONNECTING) {
+ // We're still in the preparation phase, signal that it
+ // failed.
+ notifyPrepared(err);
+ }
+
+ mState = DISCONNECTED;
+// setError(err);
+
+}
+
+status_t NuPlayer::RTPSource::setParameter(const String8 &key, const String8 &value) {
+ ALOGV("setParameter: key (%s) => value (%s)", key.string(), value.string());
+
+ bool isAudioKey = key.contains("audio");
+ TrackInfo *info = NULL;
+ for (unsigned i = 0; i < mTracks.size(); ++i) {
+ info = &mTracks.editItemAt(i);
+ if (info != NULL && info->mIsAudio == isAudioKey) {
+ ALOGV("setParameter: %s track (%d) found", isAudioKey ? "audio" : "video" , i);
+ break;
+ }
+ }
+
+ if (info == NULL) {
+ TrackInfo newTrackInfo;
+ newTrackInfo.mIsAudio = isAudioKey;
+ mTracks.push(newTrackInfo);
+ info = &mTracks.editTop();
+ info->mJbTimeMs = 300;
+ }
+
+ if (key == "rtp-param-mime-type") {
+ info->mMimeType = value;
+
+ const char *mime = value.string();
+ const char *delimiter = strchr(mime, '/');
+ info->mCodecName = delimiter ? (delimiter + 1) : "<none>";
+
+ ALOGV("rtp-param-mime-type: mMimeType (%s) => mCodecName (%s)",
+ info->mMimeType.string(), info->mCodecName.string());
+ } else if (key == "video-param-decoder-profile") {
+ info->mCodecProfile = atoi(value);
+ } else if (key == "video-param-decoder-level") {
+ info->mCodecLevel = atoi(value);
+ } else if (key == "video-param-width") {
+ info->mWidth = atoi(value);
+ } else if (key == "video-param-height") {
+ info->mHeight = atoi(value);
+ } else if (key == "rtp-param-local-ip") {
+ info->mLocalIp = value;
+ } else if (key == "rtp-param-local-port") {
+ info->mLocalPort = atoi(value);
+ } else if (key == "rtp-param-remote-ip") {
+ info->mRemoteIp = value;
+ } else if (key == "rtp-param-remote-port") {
+ info->mRemotePort = atoi(value);
+ } else if (key == "rtp-param-payload-type") {
+ info->mPayloadType = atoi(value);
+ } else if (key == "rtp-param-as") {
+ //AS means guaranteed bit rate that negotiated from sdp.
+ info->mAS = atoi(value);
+ } else if (key == "rtp-param-rtp-timeout") {
+ } else if (key == "rtp-param-rtcp-timeout") {
+ } else if (key == "rtp-param-time-scale") {
+ } else if (key == "rtp-param-self-id") {
+ info->mSelfID = atoi(value);
+ } else if (key == "rtp-param-ext-cvo-extmap") {
+ info->mCVOExtMap = atoi(value);
+ } else if (key == "rtp-param-set-socket-network") {
+ int64_t networkHandle = atoll(value);
+ setSocketNetwork(networkHandle);
+ } else if (key == "rtp-param-jitter-buffer-time") {
+ info->mJbTimeMs = atoi(value);
+ }
+
+ return OK;
+}
+
+status_t NuPlayer::RTPSource::setParameters(const String8 ¶ms) {
+ ALOGV("setParameters: %s", params.string());
+ const char *cparams = params.string();
+ const char *key_start = cparams;
+ for (;;) {
+ const char *equal_pos = strchr(key_start, '=');
+ if (equal_pos == NULL) {
+ ALOGE("Parameters %s miss a value", cparams);
+ return BAD_VALUE;
+ }
+ String8 key(key_start, equal_pos - key_start);
+ TrimString(&key);
+ if (key.length() == 0) {
+ ALOGE("Parameters %s contains an empty key", cparams);
+ return BAD_VALUE;
+ }
+ const char *value_start = equal_pos + 1;
+ const char *semicolon_pos = strchr(value_start, ';');
+ String8 value;
+ if (semicolon_pos == NULL) {
+ value.setTo(value_start);
+ } else {
+ value.setTo(value_start, semicolon_pos - value_start);
+ }
+ if (setParameter(key, value) != OK) {
+ return BAD_VALUE;
+ }
+ if (semicolon_pos == NULL) {
+ break; // Reaches the end
+ }
+ key_start = semicolon_pos + 1;
+ }
+ return OK;
+}
+
+void NuPlayer::RTPSource::setSocketNetwork(int64_t networkHandle) {
+ ALOGV("setSocketNetwork: %llu", (unsigned long long)networkHandle);
+
+ TrackInfo *info = NULL;
+ for (size_t i = 0; i < mTracks.size(); ++i) {
+ info = &mTracks.editItemAt(i);
+
+ if (info == NULL)
+ break;
+
+ info->mSocketNetwork = networkHandle;
+ }
+}
+
+// Trim both leading and trailing whitespace from the given string.
+//static
+void NuPlayer::RTPSource::TrimString(String8 *s) {
+ size_t num_bytes = s->bytes();
+ const char *data = s->string();
+
+ size_t leading_space = 0;
+ while (leading_space < num_bytes && isspace(data[leading_space])) {
+ ++leading_space;
+ }
+
+ size_t i = num_bytes;
+ while (i > leading_space && isspace(data[i - 1])) {
+ --i;
+ }
+
+ s->setTo(String8(&data[leading_space], i - leading_space));
+}
+
+} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/RTPSource.h b/media/libmediaplayerservice/nuplayer/RTPSource.h
new file mode 100644
index 0000000..fb2d3b9
--- /dev/null
+++ b/media/libmediaplayerservice/nuplayer/RTPSource.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_SOURCE_H_
+
+#define RTP_SOURCE_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/Utils.h>
+#include <media/BufferingSettings.h>
+
+#include <utils/KeyedVector.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+
+#include "AnotherPacketSource.h"
+#include "APacketSource.h"
+#include "ARTPConnection.h"
+#include "ASessionDescription.h"
+#include "NuPlayerSource.h"
+
+
+
+
+
+
+namespace android {
+
+struct ALooper;
+struct AnotherPacketSource;
+
+struct NuPlayer::RTPSource : public NuPlayer::Source {
+ RTPSource(
+ const sp<AMessage> ¬ify,
+ const String8& rtpParams);
+
+ enum {
+ RTP_FIRST_PACKET = 100,
+ RTCP_FIRST_PACKET = 101,
+ RTP_QUALITY = 102,
+ RTCP_TSFB = 205,
+ RTCP_PSFB = 206,
+ RTP_CVO = 300,
+ RTP_AUTODOWN = 400,
+ };
+
+ virtual status_t getBufferingSettings(
+ BufferingSettings* buffering /* nonnull */) override;
+ virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
+
+ virtual void prepareAsync();
+ virtual void start();
+ virtual void stop();
+ virtual void pause();
+ virtual void resume();
+
+ virtual status_t feedMoreTSData();
+
+ virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+
+ virtual status_t getDuration(int64_t *durationUs);
+ virtual status_t seekTo(
+ int64_t seekTimeUs,
+ MediaPlayerSeekMode mode = MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC) override;
+
+ virtual bool isRealTime() const;
+
+ void onMessageReceived(const sp<AMessage> &msg);
+
+ virtual void setTargetBitrate(int32_t bitrate) override;
+
+protected:
+ virtual ~RTPSource();
+
+ virtual sp<MetaData> getFormatMeta(bool audio);
+
+private:
+ enum {
+ kWhatAccessUnit = 'accU',
+ kWhatAccessUnitComplete = 'accu',
+ kWhatDisconnect = 'disc',
+ kWhatEOS = 'eos!',
+ kWhatPollBuffering = 'poll',
+ kWhatSetBufferingSettings = 'sBuS',
+ };
+
+ const int64_t kBufferingPollIntervalUs = 1000000ll;
+
+ enum State {
+ DISCONNECTED,
+ CONNECTING,
+ CONNECTED,
+ PAUSED,
+ };
+
+ struct TrackInfo {
+
+ /* SDP of track */
+ bool mIsAudio;
+ int32_t mPayloadType;
+ String8 mMimeType;
+ String8 mCodecName;
+ int32_t mCodecProfile;
+ int32_t mCodecLevel;
+ int32_t mWidth;
+ int32_t mHeight;
+ String8 mLocalIp;
+ String8 mRemoteIp;
+ int32_t mLocalPort;
+ int32_t mRemotePort;
+ int64_t mSocketNetwork;
+ int32_t mTimeScale;
+ int32_t mAS;
+
+ /* RTP jitter buffer time in milliseconds */
+ uint32_t mJbTimeMs;
+ /* Unique ID indicates itself */
+ uint32_t mSelfID;
+ /* extmap:<value> for CVO will be set to here */
+ int32_t mCVOExtMap;
+
+ /* a copy of TrackInfo in RTSPSource */
+ sp<AnotherPacketSource> mSource;
+ uint32_t mRTPTime;
+ int64_t mNormalPlaytimeUs;
+ bool mNPTMappingValid;
+
+ /* a copy of TrackInfo in MyHandler.h */
+ int mRTPSocket;
+ int mRTCPSocket;
+ uint32_t mFirstSeqNumInSegment;
+ bool mNewSegment;
+ int32_t mAllowedStaleAccessUnits;
+ uint32_t mRTPAnchor;
+ int64_t mNTPAnchorUs;
+ bool mEOSReceived;
+ uint32_t mNormalPlayTimeRTP;
+ int64_t mNormalPlayTimeUs;
+ sp<APacketSource> mPacketSource;
+ List<sp<ABuffer>> mPackets;
+ };
+
+ const String8 mRTPParams;
+ uint32_t mFlags;
+ State mState;
+ status_t mFinalResult;
+
+ // below 3 parameters need to be checked whether it needed or not.
+ Mutex mBufferingLock;
+ bool mBuffering;
+ bool mInPreparationPhase;
+ Mutex mBufferingSettingsLock;
+ BufferingSettings mBufferingSettings;
+
+ sp<ALooper> mLooper;
+
+ sp<ARTPConnection> mRTPConn;
+
+ Vector<TrackInfo> mTracks;
+ sp<AnotherPacketSource> mAudioTrack;
+ sp<AnotherPacketSource> mVideoTrack;
+
+ int64_t mEOSTimeoutAudio;
+ int64_t mEOSTimeoutVideo;
+
+ /* MyHandler.h */
+ bool mFirstAccessUnit;
+ bool mAllTracksHaveTime;
+ int64_t mNTPAnchorUs;
+ int64_t mMediaAnchorUs;
+ int64_t mLastMediaTimeUs;
+ int64_t mNumAccessUnitsReceived;
+ int32_t mLastCVOUpdated;
+ bool mReceivedFirstRTCPPacket;
+ bool mReceivedFirstRTPPacket;
+ bool mPausing;
+ int32_t mPauseGeneration;
+
+ sp<AnotherPacketSource> getSource(bool audio);
+
+ /* MyHandler.h */
+ void onTimeUpdate(int32_t trackIndex, uint32_t rtpTime, uint64_t ntpTime);
+ bool addMediaTimestamp(int32_t trackIndex, const TrackInfo *track,
+ const sp<ABuffer> &accessUnit);
+ bool dataReceivedOnAllChannels();
+ void postQueueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
+ void postQueueEOS(size_t trackIndex, status_t finalResult);
+ sp<MetaData> getTrackFormat(size_t index, int32_t *timeScale);
+ void onConnected();
+ void onDisconnected(const sp<AMessage> &msg);
+
+ void schedulePollBuffering();
+ void onPollBuffering();
+
+ bool haveSufficientDataOnAllTracks();
+
+ void setEOSTimeout(bool audio, int64_t timeout);
+
+ status_t setParameters(const String8 ¶ms);
+ status_t setParameter(const String8 &key, const String8 &value);
+ void setSocketNetwork(int64_t networkHandle);
+ static void TrimString(String8 *s);
+
+ DISALLOW_EVIL_CONSTRUCTORS(RTPSource);
+};
+
+} // namespace android
+
+#endif // RTP_SOURCE_H_
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
index f114046..c81a659 100644
--- a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -65,6 +65,14 @@
return true;
}
+ virtual bool overrideProcessInfo(
+ int /* pid */, int /* procState */, int /* oomScore */) {
+ return true;
+ }
+
+ virtual void removeProcessInfoOverride(int /* pid */) {
+ }
+
private:
DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
};
diff --git a/media/libmediatranscoding/.clang-format b/media/libmediatranscoding/.clang-format
index 3198d00..f23b842 100644
--- a/media/libmediatranscoding/.clang-format
+++ b/media/libmediatranscoding/.clang-format
@@ -26,4 +26,26 @@
DerivePointerAlignment: false
IndentWidth: 4
PointerAlignment: Left
-TabWidth: 4
\ No newline at end of file
+TabWidth: 4
+
+# Deviations from the above file:
+# "Don't indent the section label"
+AccessModifierOffset: -4
+# "Each line of text in your code should be at most 100 columns long."
+ColumnLimit: 100
+# "Constructor initializer lists can be all on one line or with subsequent
+# lines indented eight spaces.". clang-format does not support having the colon
+# on the same line as the constructor function name, so this is the best
+# approximation of that rule, which makes all entries in the list (except the
+# first one) have an eight space indentation.
+ConstructorInitializerIndentWidth: 6
+# There is nothing in go/droidcppstyle about case labels, but there seems to be
+# more code that does not indent the case labels in frameworks/base.
+IndentCaseLabels: false
+# There have been some bugs in which subsequent formatting operations introduce
+# weird comment jumps.
+ReflowComments: false
+# Android supports C++17 now, but it seems only Cpp11 will work now.
+# "Cpp11 is a deprecated alias for Latest" according to
+# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
+Standard: Cpp11
diff --git a/media/libmediatranscoding/Android.bp b/media/libmediatranscoding/Android.bp
index f948bd8..7329c63 100644
--- a/media/libmediatranscoding/Android.bp
+++ b/media/libmediatranscoding/Android.bp
@@ -14,29 +14,49 @@
* limitations under the License.
*/
+filegroup {
+ name: "libmediatranscoding_aidl",
+ srcs: [
+ "aidl/android/media/IMediaTranscodingService.aidl",
+ "aidl/android/media/ITranscodingClient.aidl",
+ "aidl/android/media/ITranscodingClientCallback.aidl",
+ "aidl/android/media/TranscodingErrorCode.aidl",
+ "aidl/android/media/TranscodingSessionPriority.aidl",
+ "aidl/android/media/TranscodingSessionStats.aidl",
+ "aidl/android/media/TranscodingType.aidl",
+ "aidl/android/media/TranscodingVideoCodecType.aidl",
+ "aidl/android/media/TranscodingVideoTrackFormat.aidl",
+ "aidl/android/media/TranscodingSessionParcel.aidl",
+ "aidl/android/media/TranscodingRequestParcel.aidl",
+ "aidl/android/media/TranscodingResultParcel.aidl",
+ "aidl/android/media/TranscodingTestConfig.aidl",
+ ],
+ path: "aidl",
+}
+
// AIDL interfaces of MediaTranscoding.
aidl_interface {
name: "mediatranscoding_aidl_interface",
unstable: true,
local_include_dir: "aidl",
- srcs: [
- "aidl/android/media/IMediaTranscodingService.aidl",
- "aidl/android/media/ITranscodingServiceClient.aidl",
- "aidl/android/media/TranscodingErrorCode.aidl",
- "aidl/android/media/TranscodingJobPriority.aidl",
- "aidl/android/media/TranscodingType.aidl",
- "aidl/android/media/TranscodingVideoCodecType.aidl",
- "aidl/android/media/TranscodingJobParcel.aidl",
- "aidl/android/media/TranscodingRequestParcel.aidl",
- "aidl/android/media/TranscodingResultParcel.aidl",
- ],
+ srcs: [":libmediatranscoding_aidl"],
+ backend:
+ {
+ java: {
+ enabled: true,
+ },
+ },
}
cc_library_shared {
name: "libmediatranscoding",
srcs: [
- "TranscodingClientManager.cpp"
+ "TranscodingClientManager.cpp",
+ "TranscodingSessionController.cpp",
+ "TranscodingResourcePolicy.cpp",
+ "TranscodingUidPolicy.cpp",
+ "TranscoderWrapper.cpp",
],
shared_libs: [
@@ -44,18 +64,30 @@
"libcutils",
"liblog",
"libutils",
+ "libmediatranscoder",
+ "libbinder",
+ "libmediandk",
+ ],
+ export_shared_lib_headers: [
+ "libmediandk",
],
export_include_dirs: ["include"],
static_libs: [
"mediatranscoding_aidl_interface-ndk_platform",
+ "resourcemanager_aidl_interface-ndk_platform",
+ "resourceobserver_aidl_interface-ndk_platform",
],
cflags: [
- "-Werror",
- "-Wno-error=deprecated-declarations",
"-Wall",
+ "-Werror",
+ "-Wformat",
+ "-Wno-error=deprecated-declarations",
+ "-Wthread-safety",
+ "-Wunused",
+ "-Wunreachable-code",
],
sanitize: {
diff --git a/media/libmediatranscoding/OWNERS b/media/libmediatranscoding/OWNERS
index 02287cb..b08d573 100644
--- a/media/libmediatranscoding/OWNERS
+++ b/media/libmediatranscoding/OWNERS
@@ -1,3 +1,5 @@
-akersten@google.com
+chz@google.com
+gokrishnan@google.com
hkuang@google.com
lnilsson@google.com
+pawin@google.com
diff --git a/media/libmediatranscoding/TranscoderWrapper.cpp b/media/libmediatranscoding/TranscoderWrapper.cpp
new file mode 100644
index 0000000..61e767c
--- /dev/null
+++ b/media/libmediatranscoding/TranscoderWrapper.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TranscoderWrapper"
+
+#include <aidl/android/media/TranscodingErrorCode.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+#include <media/MediaTranscoder.h>
+#include <media/NdkCommon.h>
+#include <media/TranscoderWrapper.h>
+#include <utils/Log.h>
+
+#include <thread>
+
+namespace android {
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::TranscodingErrorCode;
+using ::aidl::android::media::TranscodingVideoCodecType;
+using ::aidl::android::media::TranscodingVideoTrackFormat;
+
+static TranscodingErrorCode toTranscodingError(media_status_t status) {
+ switch (status) {
+ case AMEDIA_OK:
+ return TranscodingErrorCode::kNoError;
+ case AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE: // FALLTHRU
+ case AMEDIACODEC_ERROR_RECLAIMED:
+ return TranscodingErrorCode::kInsufficientResources;
+ case AMEDIA_ERROR_MALFORMED:
+ return TranscodingErrorCode::kMalformed;
+ case AMEDIA_ERROR_UNSUPPORTED:
+ return TranscodingErrorCode::kUnsupported;
+ case AMEDIA_ERROR_INVALID_OBJECT: // FALLTHRU
+ case AMEDIA_ERROR_INVALID_PARAMETER:
+ return TranscodingErrorCode::kInvalidParameter;
+ case AMEDIA_ERROR_INVALID_OPERATION:
+ return TranscodingErrorCode::kInvalidOperation;
+ case AMEDIA_ERROR_IO:
+ return TranscodingErrorCode::kErrorIO;
+ case AMEDIA_ERROR_UNKNOWN: // FALLTHRU
+ default:
+ return TranscodingErrorCode::kUnknown;
+ }
+}
+
+static AMediaFormat* getVideoFormat(
+ const char* originalMime,
+ const std::optional<TranscodingVideoTrackFormat>& requestedFormat) {
+ if (requestedFormat == std::nullopt) {
+ return nullptr;
+ }
+
+ AMediaFormat* format = AMediaFormat_new();
+ bool changed = false;
+ if (requestedFormat->codecType == TranscodingVideoCodecType::kHevc &&
+ strcmp(originalMime, AMEDIA_MIMETYPE_VIDEO_HEVC)) {
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, AMEDIA_MIMETYPE_VIDEO_HEVC);
+ changed = true;
+ } else if (requestedFormat->codecType == TranscodingVideoCodecType::kAvc &&
+ strcmp(originalMime, AMEDIA_MIMETYPE_VIDEO_AVC)) {
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, AMEDIA_MIMETYPE_VIDEO_AVC);
+ changed = true;
+ }
+ if (requestedFormat->bitrateBps > 0) {
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_BIT_RATE, requestedFormat->bitrateBps);
+ changed = true;
+ }
+ // TODO: translate other fields from requestedFormat to the format for MediaTranscoder.
+ // Also need to determine more settings to expose in TranscodingVideoTrackFormat.
+ if (!changed) {
+ AMediaFormat_delete(format);
+ // Use null format for passthru.
+ format = nullptr;
+ }
+ return format;
+}
+
+//static
+std::string TranscoderWrapper::toString(const Event& event) {
+ std::string typeStr;
+ switch (event.type) {
+ case Event::Start:
+ typeStr = "Start";
+ break;
+ case Event::Pause:
+ typeStr = "Pause";
+ break;
+ case Event::Resume:
+ typeStr = "Resume";
+ break;
+ case Event::Stop:
+ typeStr = "Stop";
+ break;
+ case Event::Finish:
+ typeStr = "Finish";
+ break;
+ case Event::Error:
+ typeStr = "Error";
+ break;
+ case Event::Progress:
+ typeStr = "Progress";
+ break;
+ default:
+ return "(unknown)";
+ }
+ std::string result;
+ result = "session {" + std::to_string(event.clientId) + "," + std::to_string(event.sessionId) +
+ "}: " + typeStr;
+ if (event.type == Event::Error || event.type == Event::Progress) {
+ result += " " + std::to_string(event.arg);
+ }
+ return result;
+}
+
+class TranscoderWrapper::CallbackImpl : public MediaTranscoder::CallbackInterface {
+public:
+ CallbackImpl(const std::shared_ptr<TranscoderWrapper>& owner, ClientIdType clientId,
+ SessionIdType sessionId)
+ : mOwner(owner), mClientId(clientId), mSessionId(sessionId) {}
+
+ virtual void onFinished(const MediaTranscoder* transcoder __unused) override {
+ auto owner = mOwner.lock();
+ if (owner != nullptr) {
+ owner->onFinish(mClientId, mSessionId);
+ }
+ }
+
+ virtual void onError(const MediaTranscoder* transcoder __unused,
+ media_status_t error) override {
+ auto owner = mOwner.lock();
+ if (owner != nullptr) {
+ owner->onError(mClientId, mSessionId, error);
+ }
+ }
+
+ virtual void onProgressUpdate(const MediaTranscoder* transcoder __unused,
+ int32_t progress) override {
+ auto owner = mOwner.lock();
+ if (owner != nullptr) {
+ owner->onProgress(mClientId, mSessionId, progress);
+ }
+ }
+
+ virtual void onCodecResourceLost(const MediaTranscoder* transcoder __unused,
+ const std::shared_ptr<const Parcel>& pausedState
+ __unused) override {
+ ALOGV("%s: session {%lld, %d}", __FUNCTION__, (long long)mClientId, mSessionId);
+ }
+
+private:
+ std::weak_ptr<TranscoderWrapper> mOwner;
+ ClientIdType mClientId;
+ SessionIdType mSessionId;
+};
+
+TranscoderWrapper::TranscoderWrapper() : mCurrentClientId(0), mCurrentSessionId(-1) {
+ std::thread(&TranscoderWrapper::threadLoop, this).detach();
+}
+
+void TranscoderWrapper::setCallback(const std::shared_ptr<TranscoderCallbackInterface>& cb) {
+ mCallback = cb;
+}
+
+static bool isResourceError(media_status_t err) {
+ return err == AMEDIACODEC_ERROR_RECLAIMED || err == AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+}
+
+void TranscoderWrapper::reportError(ClientIdType clientId, SessionIdType sessionId,
+ media_status_t err) {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ if (isResourceError(err)) {
+ // Add a placeholder pause state to mPausedStateMap. This is required when resuming.
+ // TODO: remove this when transcoder pause/resume logic is ready. New logic will
+ // no longer use the pause states.
+ auto it = mPausedStateMap.find(SessionKeyType(clientId, sessionId));
+ if (it == mPausedStateMap.end()) {
+ mPausedStateMap.emplace(SessionKeyType(clientId, sessionId),
+ std::shared_ptr<const Parcel>());
+ }
+
+ callback->onResourceLost();
+ } else {
+ callback->onError(clientId, sessionId, toTranscodingError(err));
+ }
+ }
+}
+
+void TranscoderWrapper::start(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
+ queueEvent(Event::Start, clientId, sessionId, [=] {
+ media_status_t err = handleStart(clientId, sessionId, request, clientCb);
+
+ if (err != AMEDIA_OK) {
+ cleanup();
+ reportError(clientId, sessionId, err);
+ } else {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onStarted(clientId, sessionId);
+ }
+ }
+ });
+}
+
+void TranscoderWrapper::pause(ClientIdType clientId, SessionIdType sessionId) {
+ queueEvent(Event::Pause, clientId, sessionId, [=] {
+ media_status_t err = handlePause(clientId, sessionId);
+
+ cleanup();
+
+ if (err != AMEDIA_OK) {
+ reportError(clientId, sessionId, err);
+ } else {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onPaused(clientId, sessionId);
+ }
+ }
+ });
+}
+
+void TranscoderWrapper::resume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
+ queueEvent(Event::Resume, clientId, sessionId, [=] {
+ media_status_t err = handleResume(clientId, sessionId, request, clientCb);
+
+ if (err != AMEDIA_OK) {
+ cleanup();
+ reportError(clientId, sessionId, err);
+ } else {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onResumed(clientId, sessionId);
+ }
+ }
+ });
+}
+
+void TranscoderWrapper::stop(ClientIdType clientId, SessionIdType sessionId) {
+ queueEvent(Event::Stop, clientId, sessionId, [=] {
+ if (mTranscoder != nullptr && clientId == mCurrentClientId &&
+ sessionId == mCurrentSessionId) {
+ // Cancelling the currently running session.
+ media_status_t err = mTranscoder->cancel();
+ if (err != AMEDIA_OK) {
+ ALOGW("failed to stop transcoder: %d", err);
+ } else {
+ ALOGI("transcoder stopped");
+ }
+ cleanup();
+ } else {
+ // For sessions that's not currently running, release any pausedState for the session.
+ mPausedStateMap.erase(SessionKeyType(clientId, sessionId));
+ }
+ // No callback needed for stop.
+ });
+}
+
+void TranscoderWrapper::onFinish(ClientIdType clientId, SessionIdType sessionId) {
+ queueEvent(Event::Finish, clientId, sessionId, [=] {
+ if (mTranscoder != nullptr && clientId == mCurrentClientId &&
+ sessionId == mCurrentSessionId) {
+ cleanup();
+ }
+
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onFinish(clientId, sessionId);
+ }
+ });
+}
+
+void TranscoderWrapper::onError(ClientIdType clientId, SessionIdType sessionId,
+ media_status_t error) {
+ queueEvent(
+ Event::Error, clientId, sessionId,
+ [=] {
+ if (mTranscoder != nullptr && clientId == mCurrentClientId &&
+ sessionId == mCurrentSessionId) {
+ cleanup();
+ }
+ reportError(clientId, sessionId, error);
+ },
+ error);
+}
+
+void TranscoderWrapper::onProgress(ClientIdType clientId, SessionIdType sessionId,
+ int32_t progress) {
+ queueEvent(
+ Event::Progress, clientId, sessionId,
+ [=] {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onProgressUpdate(clientId, sessionId, progress);
+ }
+ },
+ progress);
+}
+
+media_status_t TranscoderWrapper::setupTranscoder(
+ ClientIdType clientId, SessionIdType sessionId, const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCb,
+ const std::shared_ptr<const Parcel>& pausedState) {
+ if (clientCb == nullptr) {
+ ALOGE("client callback is null");
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (mTranscoder != nullptr) {
+ ALOGE("transcoder already running");
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ Status status;
+ ::ndk::ScopedFileDescriptor srcFd, dstFd;
+ status = clientCb->openFileDescriptor(request.sourceFilePath, "r", &srcFd);
+ if (!status.isOk() || srcFd.get() < 0) {
+ ALOGE("failed to open source");
+ return AMEDIA_ERROR_IO;
+ }
+
+ // Open dest file with "rw", as the transcoder could potentially reuse part of it
+ // for resume case. We might want the further differentiate and open with "w" only
+ // for start.
+ status = clientCb->openFileDescriptor(request.destinationFilePath, "rw", &dstFd);
+ if (!status.isOk() || dstFd.get() < 0) {
+ ALOGE("failed to open destination");
+ return AMEDIA_ERROR_IO;
+ }
+
+ mCurrentClientId = clientId;
+ mCurrentSessionId = sessionId;
+ mTranscoderCb = std::make_shared<CallbackImpl>(shared_from_this(), clientId, sessionId);
+ mTranscoder = MediaTranscoder::create(mTranscoderCb, pausedState);
+ if (mTranscoder == nullptr) {
+ ALOGE("failed to create transcoder");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ media_status_t err = mTranscoder->configureSource(srcFd.get());
+ if (err != AMEDIA_OK) {
+ ALOGE("failed to configure source: %d", err);
+ return err;
+ }
+
+ std::vector<std::shared_ptr<AMediaFormat>> trackFormats = mTranscoder->getTrackFormats();
+ if (trackFormats.size() == 0) {
+ ALOGE("failed to get track formats!");
+ return AMEDIA_ERROR_MALFORMED;
+ }
+
+ for (int i = 0; i < trackFormats.size(); ++i) {
+ AMediaFormat* format = nullptr;
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormats[i].get(), AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if (!strncmp(mime, "video/", 6)) {
+ format = getVideoFormat(mime, request.requestedVideoTrackFormat);
+ }
+
+ err = mTranscoder->configureTrackFormat(i, format);
+ if (format != nullptr) {
+ AMediaFormat_delete(format);
+ }
+ if (err != AMEDIA_OK) {
+ ALOGE("failed to configure track format for track %d: %d", i, err);
+ return err;
+ }
+ }
+
+ err = mTranscoder->configureDestination(dstFd.get());
+ if (err != AMEDIA_OK) {
+ ALOGE("failed to configure dest: %d", err);
+ return err;
+ }
+
+ return AMEDIA_OK;
+}
+
+media_status_t TranscoderWrapper::handleStart(
+ ClientIdType clientId, SessionIdType sessionId, const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
+ ALOGI("%s: setting up transcoder for start", __FUNCTION__);
+ media_status_t err = setupTranscoder(clientId, sessionId, request, clientCb);
+ if (err != AMEDIA_OK) {
+ ALOGI("%s: failed to setup transcoder", __FUNCTION__);
+ return err;
+ }
+
+ err = mTranscoder->start();
+ if (err != AMEDIA_OK) {
+ ALOGE("%s: failed to start transcoder: %d", __FUNCTION__, err);
+ return err;
+ }
+
+ ALOGI("%s: transcoder started", __FUNCTION__);
+ return AMEDIA_OK;
+}
+
+media_status_t TranscoderWrapper::handlePause(ClientIdType clientId, SessionIdType sessionId) {
+ if (mTranscoder == nullptr) {
+ ALOGE("%s: transcoder is not running", __FUNCTION__);
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ if (clientId != mCurrentClientId || sessionId != mCurrentSessionId) {
+ ALOGW("%s: stopping session {%lld, %d} that's not current session {%lld, %d}", __FUNCTION__,
+ (long long)clientId, sessionId, (long long)mCurrentClientId, mCurrentSessionId);
+ }
+
+ ALOGI("%s: pausing transcoder", __FUNCTION__);
+
+ std::shared_ptr<const Parcel> pauseStates;
+ media_status_t err = mTranscoder->pause(&pauseStates);
+ if (err != AMEDIA_OK) {
+ ALOGE("%s: failed to pause transcoder: %d", __FUNCTION__, err);
+ return err;
+ }
+ mPausedStateMap[SessionKeyType(clientId, sessionId)] = pauseStates;
+
+ ALOGI("%s: transcoder paused", __FUNCTION__);
+ return AMEDIA_OK;
+}
+
+media_status_t TranscoderWrapper::handleResume(
+ ClientIdType clientId, SessionIdType sessionId, const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCb) {
+ std::shared_ptr<const Parcel> pausedState;
+ auto it = mPausedStateMap.find(SessionKeyType(clientId, sessionId));
+ if (it != mPausedStateMap.end()) {
+ pausedState = it->second;
+ mPausedStateMap.erase(it);
+ } else {
+ ALOGE("%s: can't find paused state", __FUNCTION__);
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ ALOGI("%s: setting up transcoder for resume", __FUNCTION__);
+ media_status_t err = setupTranscoder(clientId, sessionId, request, clientCb, pausedState);
+ if (err != AMEDIA_OK) {
+ ALOGE("%s: failed to setup transcoder: %d", __FUNCTION__, err);
+ return err;
+ }
+
+ err = mTranscoder->resume();
+ if (err != AMEDIA_OK) {
+ ALOGE("%s: failed to resume transcoder: %d", __FUNCTION__, err);
+ return err;
+ }
+
+ ALOGI("%s: transcoder resumed", __FUNCTION__);
+ return AMEDIA_OK;
+}
+
+void TranscoderWrapper::cleanup() {
+ mCurrentClientId = 0;
+ mCurrentSessionId = -1;
+ mTranscoderCb = nullptr;
+ mTranscoder = nullptr;
+}
+
+void TranscoderWrapper::queueEvent(Event::Type type, ClientIdType clientId, SessionIdType sessionId,
+ const std::function<void()> runnable, int32_t arg) {
+ std::scoped_lock lock{mLock};
+
+ mQueue.push_back({type, clientId, sessionId, runnable, arg});
+ mCondition.notify_one();
+}
+
+void TranscoderWrapper::threadLoop() {
+ std::unique_lock<std::mutex> lock{mLock};
+ // TranscoderWrapper currently lives in the transcoding service, as long as
+ // MediaTranscodingService itself.
+ while (true) {
+ // Wait for the next event.
+ while (mQueue.empty()) {
+ mCondition.wait(lock);
+ }
+
+ Event event = *mQueue.begin();
+ mQueue.pop_front();
+
+ ALOGD("%s: %s", __FUNCTION__, toString(event).c_str());
+
+ lock.unlock();
+ event.runnable();
+ lock.lock();
+ }
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/TranscodingClientManager.cpp b/media/libmediatranscoding/TranscodingClientManager.cpp
index 7252437..b57baa5 100644
--- a/media/libmediatranscoding/TranscodingClientManager.cpp
+++ b/media/libmediatranscoding/TranscodingClientManager.cpp
@@ -17,129 +17,383 @@
// #define LOG_NDEBUG 0
#define LOG_TAG "TranscodingClientManager"
+#include <aidl/android/media/BnTranscodingClient.h>
+#include <aidl/android/media/IMediaTranscodingService.h>
+#include <android/binder_ibinder.h>
#include <inttypes.h>
#include <media/TranscodingClientManager.h>
+#include <media/TranscodingRequest.h>
+#include <media/TranscodingUidPolicy.h>
+#include <private/android_filesystem_config.h>
#include <utils/Log.h>
-
+#include <utils/String16.h>
namespace android {
-using Status = ::ndk::ScopedAStatus;
+static_assert(sizeof(ClientIdType) == sizeof(void*), "ClientIdType should be pointer-sized");
-// static
-TranscodingClientManager& TranscodingClientManager::getInstance() {
- static TranscodingClientManager gInstance{};
- return gInstance;
+static constexpr const char* MEDIA_PROVIDER_PKG_NAME = "com.google.android.providers.media.module";
+
+using ::aidl::android::media::BnTranscodingClient;
+using ::aidl::android::media::IMediaTranscodingService; // For service error codes
+using ::aidl::android::media::TranscodingRequestParcel;
+using ::aidl::android::media::TranscodingSessionParcel;
+using Status = ::ndk::ScopedAStatus;
+using ::ndk::SpAIBinder;
+
+//static
+std::atomic<ClientIdType> TranscodingClientManager::sCookieCounter = 0;
+//static
+std::mutex TranscodingClientManager::sCookie2ClientLock;
+//static
+std::map<ClientIdType, std::shared_ptr<TranscodingClientManager::ClientImpl>>
+ TranscodingClientManager::sCookie2Client;
+///////////////////////////////////////////////////////////////////////////////
+
+// Convenience methods for constructing binder::Status objects for error returns
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ Status::fromServiceSpecificErrorWithMessage( \
+ errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, ##__VA_ARGS__))
+
+/**
+ * ClientImpl implements a single client and contains all its information.
+ */
+struct TranscodingClientManager::ClientImpl : public BnTranscodingClient {
+ /* The remote client callback that this ClientInfo is associated with.
+ * Once the ClientInfo is created, we hold an SpAIBinder so that the binder
+ * object doesn't get created again, otherwise the binder object pointer
+ * may not be unique.
+ */
+ SpAIBinder mClientBinder;
+ std::shared_ptr<ITranscodingClientCallback> mClientCallback;
+ /* A unique id assigned to the client by the service. This number is used
+ * by the service for indexing. Here we use the binder object's pointer
+ * (casted to int64t_t) as the client id.
+ */
+ ClientIdType mClientId;
+ std::string mClientName;
+ std::string mClientOpPackageName;
+
+ // Next sessionId to assign.
+ std::atomic<int32_t> mNextSessionId;
+ // Whether this client has been unregistered already.
+ std::atomic<bool> mAbandoned;
+ // Weak pointer to the client manager for this client.
+ std::weak_ptr<TranscodingClientManager> mOwner;
+
+ ClientImpl(const std::shared_ptr<ITranscodingClientCallback>& callback,
+ const std::string& clientName, const std::string& opPackageName,
+ const std::weak_ptr<TranscodingClientManager>& owner);
+
+ Status submitRequest(const TranscodingRequestParcel& /*in_request*/,
+ TranscodingSessionParcel* /*out_session*/,
+ bool* /*_aidl_return*/) override;
+
+ Status cancelSession(int32_t /*in_sessionId*/, bool* /*_aidl_return*/) override;
+
+ Status getSessionWithId(int32_t /*in_sessionId*/, TranscodingSessionParcel* /*out_session*/,
+ bool* /*_aidl_return*/) override;
+
+ Status unregister() override;
+};
+
+TranscodingClientManager::ClientImpl::ClientImpl(
+ const std::shared_ptr<ITranscodingClientCallback>& callback, const std::string& clientName,
+ const std::string& opPackageName, const std::weak_ptr<TranscodingClientManager>& owner)
+ : mClientBinder((callback != nullptr) ? callback->asBinder() : nullptr),
+ mClientCallback(callback),
+ mClientId(sCookieCounter.fetch_add(1, std::memory_order_relaxed)),
+ mClientName(clientName),
+ mClientOpPackageName(opPackageName),
+ mNextSessionId(0),
+ mAbandoned(false),
+ mOwner(owner) {}
+
+Status TranscodingClientManager::ClientImpl::submitRequest(
+ const TranscodingRequestParcel& in_request, TranscodingSessionParcel* out_session,
+ bool* _aidl_return) {
+ *_aidl_return = false;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_request.sourceFilePath.empty() || in_request.destinationFilePath.empty()) {
+ return Status::ok();
+ }
+
+ int32_t callingPid = AIBinder_getCallingPid();
+ int32_t callingUid = AIBinder_getCallingUid();
+ int32_t in_clientUid = in_request.clientUid;
+ int32_t in_clientPid = in_request.clientPid;
+
+ // Check if we can trust clientUid. Only privilege caller could forward the
+ // uid on app client's behalf.
+ if (in_clientUid == IMediaTranscodingService::USE_CALLING_UID) {
+ in_clientUid = callingUid;
+ } else if (in_clientUid < 0) {
+ return Status::ok();
+ } else if (in_clientUid != callingUid && !owner->isTrustedCallingUid(callingUid)) {
+ ALOGE("MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(
+ IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ }
+
+ // Check if we can trust clientPid. Only privilege caller could forward the
+ // pid on app client's behalf.
+ if (in_clientPid == IMediaTranscodingService::USE_CALLING_PID) {
+ in_clientPid = callingPid;
+ } else if (in_clientPid < 0) {
+ return Status::ok();
+ } else if (in_clientPid != callingPid && !owner->isTrustedCallingUid(callingUid)) {
+ ALOGE("MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ return STATUS_ERROR_FMT(
+ IMediaTranscodingService::ERROR_PERMISSION_DENIED,
+ "MediaTranscodingService::registerClient rejected (clientPid %d, clientUid %d) "
+ "(don't trust callingUid %d)",
+ in_clientPid, in_clientUid, callingUid);
+ }
+
+ int32_t sessionId = mNextSessionId.fetch_add(1);
+
+ *_aidl_return = owner->mSessionController->submit(mClientId, sessionId, in_clientUid,
+ in_request, mClientCallback);
+
+ if (*_aidl_return) {
+ out_session->sessionId = sessionId;
+
+ // TODO(chz): is some of this coming from SessionController?
+ *(TranscodingRequest*)&out_session->request = in_request;
+ out_session->awaitNumberOfSessions = 0;
+ }
+
+ return Status::ok();
}
+Status TranscodingClientManager::ClientImpl::cancelSession(int32_t in_sessionId,
+ bool* _aidl_return) {
+ *_aidl_return = false;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ *_aidl_return = owner->mSessionController->cancel(mClientId, in_sessionId);
+ return Status::ok();
+}
+
+Status TranscodingClientManager::ClientImpl::getSessionWithId(int32_t in_sessionId,
+ TranscodingSessionParcel* out_session,
+ bool* _aidl_return) {
+ *_aidl_return = false;
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (mAbandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ if (in_sessionId < 0) {
+ return Status::ok();
+ }
+
+ *_aidl_return =
+ owner->mSessionController->getSession(mClientId, in_sessionId, &out_session->request);
+
+ if (*_aidl_return) {
+ out_session->sessionId = in_sessionId;
+ out_session->awaitNumberOfSessions = 0;
+ }
+ return Status::ok();
+}
+
+Status TranscodingClientManager::ClientImpl::unregister() {
+ bool abandoned = mAbandoned.exchange(true);
+
+ std::shared_ptr<TranscodingClientManager> owner;
+ if (abandoned || (owner = mOwner.lock()) == nullptr) {
+ return Status::fromServiceSpecificError(IMediaTranscodingService::ERROR_DISCONNECTED);
+ }
+
+ // Use sessionId == -1 to cancel all realtime sessions for this client with the controller.
+ owner->mSessionController->cancel(mClientId, -1);
+ owner->removeClient(mClientId);
+
+ return Status::ok();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
// static
void TranscodingClientManager::BinderDiedCallback(void* cookie) {
- int32_t clientId = static_cast<int32_t>(reinterpret_cast<intptr_t>(cookie));
- ALOGD("Client %" PRId32 " is dead", clientId);
- // Don't check for pid validity since we know it's already dead.
- TranscodingClientManager& manager = TranscodingClientManager::getInstance();
- manager.removeClient(clientId);
+ ClientIdType clientId = reinterpret_cast<ClientIdType>(cookie);
+
+ ALOGD("Client %lld is dead", (long long)clientId);
+
+ std::shared_ptr<ClientImpl> client;
+
+ {
+ std::scoped_lock lock{sCookie2ClientLock};
+
+ auto it = sCookie2Client.find(clientId);
+ if (it != sCookie2Client.end()) {
+ client = it->second;
+ }
+ }
+
+ if (client != nullptr) {
+ client->unregister();
+ }
}
-TranscodingClientManager::TranscodingClientManager()
- : mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)) {
+TranscodingClientManager::TranscodingClientManager(
+ const std::shared_ptr<ControllerClientInterface>& controller)
+ : mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)),
+ mSessionController(controller),
+ mMediaProviderUid(-1) {
ALOGD("TranscodingClientManager started");
+ uid_t mpuid;
+ if (TranscodingUidPolicy::getUidForPackage(String16(MEDIA_PROVIDER_PKG_NAME), mpuid) ==
+ NO_ERROR) {
+ ALOGI("Found MediaProvider uid: %d", mpuid);
+ mMediaProviderUid = mpuid;
+ } else {
+ ALOGW("Couldn't get uid for MediaProvider.");
+ }
}
TranscodingClientManager::~TranscodingClientManager() {
ALOGD("TranscodingClientManager exited");
}
-bool TranscodingClientManager::isClientIdRegistered(int32_t clientId) const {
- std::scoped_lock lock{mLock};
- return mClientIdToClientInfoMap.find(clientId) != mClientIdToClientInfoMap.end();
-}
-
void TranscodingClientManager::dumpAllClients(int fd, const Vector<String16>& args __unused) {
String8 result;
const size_t SIZE = 256;
char buffer[SIZE];
+ std::scoped_lock lock{mLock};
- snprintf(buffer, SIZE, " Total num of Clients: %zu\n", mClientIdToClientInfoMap.size());
- result.append(buffer);
-
- if (mClientIdToClientInfoMap.size() > 0) {
- snprintf(buffer, SIZE, "========== Dumping all clients =========\n");
+ if (mClientIdToClientMap.size() > 0) {
+ snprintf(buffer, SIZE, "\n========== Dumping all clients =========\n");
result.append(buffer);
}
- for (const auto& iter : mClientIdToClientInfoMap) {
- const std::shared_ptr<ITranscodingServiceClient> client = iter.second->mClient;
- std::string clientName;
- Status status = client->getName(&clientName);
- if (!status.isOk()) {
- ALOGE("Failed to get client: %d information", iter.first);
- continue;
- }
- snprintf(buffer, SIZE, " -- Clients: %d name: %s\n", iter.first, clientName.c_str());
+ snprintf(buffer, SIZE, " Total num of Clients: %zu\n", mClientIdToClientMap.size());
+ result.append(buffer);
+
+ for (const auto& iter : mClientIdToClientMap) {
+ snprintf(buffer, SIZE, " Client %lld: pkg: %s\n", (long long)iter.first,
+ iter.second->mClientName.c_str());
result.append(buffer);
}
write(fd, result.string(), result.size());
}
-status_t TranscodingClientManager::addClient(std::unique_ptr<ClientInfo> client) {
- // Validate the client.
- if (client == nullptr || client->mClientId < 0 || client->mClientPid < 0 ||
- client->mClientUid < 0 || client->mClientOpPackageName.empty() ||
- client->mClientOpPackageName == "") {
- ALOGE("Invalid client");
- return BAD_VALUE;
+bool TranscodingClientManager::isTrustedCallingUid(uid_t uid) {
+ if (uid > 0 && uid == mMediaProviderUid) {
+ return true;
}
+ switch (uid) {
+ case AID_ROOT: // root user
+ case AID_SYSTEM:
+ case AID_SHELL:
+ case AID_MEDIA: // mediaserver
+ return true;
+ default:
+ return false;
+ }
+}
+
+status_t TranscodingClientManager::addClient(
+ const std::shared_ptr<ITranscodingClientCallback>& callback, const std::string& clientName,
+ const std::string& opPackageName, std::shared_ptr<ITranscodingClient>* outClient) {
+ // Validate the client.
+ if (callback == nullptr || clientName.empty() || opPackageName.empty()) {
+ ALOGE("Invalid client");
+ return IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT;
+ }
+
+ SpAIBinder binder = callback->asBinder();
+
std::scoped_lock lock{mLock};
- // Check if the client already exists.
- if (mClientIdToClientInfoMap.count(client->mClientId) != 0) {
- ALOGW("Client already exists.");
- return ALREADY_EXISTS;
+ // Checks if the client already registers.
+ if (mRegisteredCallbacks.count((uintptr_t)binder.get()) > 0) {
+ return IMediaTranscodingService::ERROR_ALREADY_EXISTS;
}
- ALOGD("Adding client id %d pid: %d uid: %d %s", client->mClientId, client->mClientPid,
- client->mClientUid, client->mClientOpPackageName.c_str());
+ // Creates the client (with the id assigned by ClientImpl).
+ std::shared_ptr<ClientImpl> client = ::ndk::SharedRefBase::make<ClientImpl>(
+ callback, clientName, opPackageName, shared_from_this());
- AIBinder_linkToDeath(client->mClient->asBinder().get(), mDeathRecipient.get(),
+ ALOGD("Adding client id %lld, name %s, package %s", (long long)client->mClientId,
+ client->mClientName.c_str(), client->mClientOpPackageName.c_str());
+
+ {
+ std::scoped_lock lock{sCookie2ClientLock};
+ sCookie2Client.emplace(std::make_pair(client->mClientId, client));
+ }
+
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(),
reinterpret_cast<void*>(client->mClientId));
// Adds the new client to the map.
- mClientIdToClientInfoMap[client->mClientId] = std::move(client);
+ mRegisteredCallbacks.insert((uintptr_t)binder.get());
+ mClientIdToClientMap[client->mClientId] = client;
+
+ *outClient = client;
return OK;
}
-status_t TranscodingClientManager::removeClient(int32_t clientId) {
- ALOGD("Removing client id %d", clientId);
+status_t TranscodingClientManager::removeClient(ClientIdType clientId) {
+ ALOGD("Removing client id %lld", (long long)clientId);
std::scoped_lock lock{mLock};
// Checks if the client is valid.
- auto it = mClientIdToClientInfoMap.find(clientId);
- if (it == mClientIdToClientInfoMap.end()) {
- ALOGE("Client id %d does not exist", clientId);
- return INVALID_OPERATION;
+ auto it = mClientIdToClientMap.find(clientId);
+ if (it == mClientIdToClientMap.end()) {
+ ALOGE("Client id %lld does not exist", (long long)clientId);
+ return IMediaTranscodingService::ERROR_INVALID_OPERATION;
}
- std::shared_ptr<ITranscodingServiceClient> client = it->second->mClient;
+ SpAIBinder binder = it->second->mClientBinder;
// Check if the client still live. If alive, unlink the death.
- if (client) {
- AIBinder_unlinkToDeath(client->asBinder().get(), mDeathRecipient.get(),
- reinterpret_cast<void*>(clientId));
+ if (binder.get() != nullptr) {
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(),
+ reinterpret_cast<void*>(it->second->mClientId));
+ }
+
+ {
+ std::scoped_lock lock{sCookie2ClientLock};
+ sCookie2Client.erase(it->second->mClientId);
}
// Erase the entry.
- mClientIdToClientInfoMap.erase(it);
+ mClientIdToClientMap.erase(it);
+ mRegisteredCallbacks.erase((uintptr_t)binder.get());
return OK;
}
size_t TranscodingClientManager::getNumOfClients() const {
std::scoped_lock lock{mLock};
- return mClientIdToClientInfoMap.size();
+ return mClientIdToClientMap.size();
}
} // namespace android
diff --git a/media/libmediatranscoding/TranscodingResourcePolicy.cpp b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
new file mode 100644
index 0000000..4fd8338
--- /dev/null
+++ b/media/libmediatranscoding/TranscodingResourcePolicy.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TranscodingResourcePolicy"
+
+#include <aidl/android/media/BnResourceObserver.h>
+#include <aidl/android/media/IResourceObserverService.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/IServiceManager.h>
+#include <media/TranscodingResourcePolicy.h>
+#include <utils/Log.h>
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceObserver;
+using ::aidl::android::media::IResourceObserverService;
+using ::aidl::android::media::MediaObservableEvent;
+using ::aidl::android::media::MediaObservableFilter;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+
+static std::string toString(const MediaObservableParcel& observable) {
+ return "{" + ::aidl::android::media::toString(observable.type) + ", " +
+ std::to_string(observable.value) + "}";
+}
+
+struct TranscodingResourcePolicy::ResourceObserver : public BnResourceObserver {
+ explicit ResourceObserver(TranscodingResourcePolicy* owner) : mOwner(owner), mPid(getpid()) {}
+
+ // IResourceObserver
+ ::ndk::ScopedAStatus onStatusChanged(
+ MediaObservableEvent event, int32_t uid, int32_t pid,
+ const std::vector<MediaObservableParcel>& observables) override {
+ ALOGD("%s: %s, uid %d, pid %d, %s", __FUNCTION__,
+ ::aidl::android::media::toString(event).c_str(), uid, pid,
+ toString(observables[0]).c_str());
+
+ // Only report kIdle event for codec resources from other processes.
+ if (((uint64_t)event & (uint64_t)MediaObservableEvent::kIdle) != 0 && (pid != mPid)) {
+ for (auto& observable : observables) {
+ if (observable.type == MediaObservableType::kVideoSecureCodec ||
+ observable.type == MediaObservableType::kVideoNonSecureCodec) {
+ mOwner->onResourceAvailable();
+ break;
+ }
+ }
+ }
+ return ::ndk::ScopedAStatus::ok();
+ }
+
+ TranscodingResourcePolicy* mOwner;
+ const pid_t mPid;
+};
+
+// static
+void TranscodingResourcePolicy::BinderDiedCallback(void* cookie) {
+ TranscodingResourcePolicy* owner = reinterpret_cast<TranscodingResourcePolicy*>(cookie);
+ if (owner != nullptr) {
+ owner->unregisterSelf();
+ }
+ // TODO(chz): retry to connecting to IResourceObserverService after failure.
+ // Also need to have back-up logic if IResourceObserverService is offline for
+ // Prolonged period of time. A possible alternative could be, during period where
+ // IResourceObserverService is not available, trigger onResourceAvailable() everytime
+ // when top uid changes (in hope that'll free up some codec instances that we could
+ // reclaim).
+}
+
+TranscodingResourcePolicy::TranscodingResourcePolicy()
+ : mRegistered(false), mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)) {
+ registerSelf();
+}
+
+TranscodingResourcePolicy::~TranscodingResourcePolicy() {
+ unregisterSelf();
+}
+
+void TranscodingResourcePolicy::registerSelf() {
+ ALOGI("TranscodingResourcePolicy: registerSelf");
+
+ ::ndk::SpAIBinder binder(AServiceManager_getService("media.resource_observer"));
+
+ std::scoped_lock lock{mRegisteredLock};
+
+ if (mRegistered) {
+ return;
+ }
+
+ // TODO(chz): retry to connecting to IResourceObserverService after failure.
+ mService = IResourceObserverService::fromBinder(binder);
+ if (mService == nullptr) {
+ ALOGE("Failed to get IResourceObserverService");
+ return;
+ }
+
+ // Only register filters for codec resource available.
+ mObserver = ::ndk::SharedRefBase::make<ResourceObserver>(this);
+ std::vector<MediaObservableFilter> filters = {
+ {MediaObservableType::kVideoSecureCodec, MediaObservableEvent::kIdle},
+ {MediaObservableType::kVideoNonSecureCodec, MediaObservableEvent::kIdle}};
+
+ Status status = mService->registerObserver(mObserver, filters);
+ if (!status.isOk()) {
+ ALOGE("failed to register: error %d", status.getServiceSpecificError());
+ mService = nullptr;
+ mObserver = nullptr;
+ return;
+ }
+
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), reinterpret_cast<void*>(this));
+
+ ALOGD("@@@ registered observer");
+ mRegistered = true;
+}
+
+void TranscodingResourcePolicy::unregisterSelf() {
+ ALOGI("TranscodingResourcePolicy: unregisterSelf");
+
+ std::scoped_lock lock{mRegisteredLock};
+
+ if (!mRegistered) {
+ return;
+ }
+
+ ::ndk::SpAIBinder binder = mService->asBinder();
+ if (binder.get() != nullptr) {
+ Status status = mService->unregisterObserver(mObserver);
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(), reinterpret_cast<void*>(this));
+ }
+
+ mService = nullptr;
+ mObserver = nullptr;
+ mRegistered = false;
+}
+
+void TranscodingResourcePolicy::setCallback(
+ const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) {
+ std::scoped_lock lock{mCallbackLock};
+ mResourcePolicyCallback = cb;
+}
+
+void TranscodingResourcePolicy::onResourceAvailable() {
+ std::shared_ptr<ResourcePolicyCallbackInterface> cb;
+ {
+ std::scoped_lock lock{mCallbackLock};
+ cb = mResourcePolicyCallback.lock();
+ }
+
+ if (cb != nullptr) {
+ cb->onResourceAvailable();
+ }
+}
+} // namespace android
diff --git a/media/libmediatranscoding/TranscodingSessionController.cpp b/media/libmediatranscoding/TranscodingSessionController.cpp
new file mode 100644
index 0000000..2306395
--- /dev/null
+++ b/media/libmediatranscoding/TranscodingSessionController.cpp
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TranscodingSessionController"
+
+#define VALIDATE_STATE 1
+
+#include <inttypes.h>
+#include <media/TranscodingSessionController.h>
+#include <media/TranscodingUidPolicy.h>
+#include <utils/Log.h>
+
+#include <utility>
+
+namespace android {
+
+static_assert((SessionIdType)-1 < 0, "SessionIdType should be signed");
+
+constexpr static uid_t OFFLINE_UID = -1;
+
+//static
+String8 TranscodingSessionController::sessionToString(const SessionKeyType& sessionKey) {
+ return String8::format("{client:%lld, session:%d}", (long long)sessionKey.first,
+ sessionKey.second);
+}
+
+//static
+const char* TranscodingSessionController::sessionStateToString(const Session::State sessionState) {
+ switch (sessionState) {
+ case Session::State::NOT_STARTED:
+ return "NOT_STARTED";
+ case Session::State::RUNNING:
+ return "RUNNING";
+ case Session::State::PAUSED:
+ return "PAUSED";
+ default:
+ break;
+ }
+ return "(unknown)";
+}
+
+TranscodingSessionController::TranscodingSessionController(
+ const std::shared_ptr<TranscoderInterface>& transcoder,
+ const std::shared_ptr<UidPolicyInterface>& uidPolicy,
+ const std::shared_ptr<ResourcePolicyInterface>& resourcePolicy)
+ : mTranscoder(transcoder),
+ mUidPolicy(uidPolicy),
+ mResourcePolicy(resourcePolicy),
+ mCurrentSession(nullptr),
+ mResourceLost(false) {
+ // Only push empty offline queue initially. Realtime queues are added when requests come in.
+ mUidSortedList.push_back(OFFLINE_UID);
+ mOfflineUidIterator = mUidSortedList.begin();
+ mSessionQueues.emplace(OFFLINE_UID, SessionQueueType());
+}
+
+TranscodingSessionController::~TranscodingSessionController() {}
+
+void TranscodingSessionController::dumpAllSessions(int fd, const Vector<String16>& args __unused) {
+ String8 result;
+
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ std::scoped_lock lock{mLock};
+
+ snprintf(buffer, SIZE, "\n========== Dumping all sessions queues =========\n");
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Total num of Sessions: %zu\n", mSessionMap.size());
+ result.append(buffer);
+
+ std::vector<int32_t> uids(mUidSortedList.begin(), mUidSortedList.end());
+ // Exclude last uid, which is for offline queue
+ uids.pop_back();
+ std::vector<std::string> packageNames;
+ if (TranscodingUidPolicy::getNamesForUids(uids, &packageNames)) {
+ uids.push_back(OFFLINE_UID);
+ packageNames.push_back("(offline)");
+ }
+
+ for (int32_t i = 0; i < uids.size(); i++) {
+ const uid_t uid = uids[i];
+
+ if (mSessionQueues[uid].empty()) {
+ continue;
+ }
+ snprintf(buffer, SIZE, " Uid: %d, pkg: %s\n", uid,
+ packageNames.empty() ? "(unknown)" : packageNames[i].c_str());
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Num of sessions: %zu\n", mSessionQueues[uid].size());
+ result.append(buffer);
+ for (auto& sessionKey : mSessionQueues[uid]) {
+ auto sessionIt = mSessionMap.find(sessionKey);
+ if (sessionIt == mSessionMap.end()) {
+ snprintf(buffer, SIZE, "Failed to look up Session %s \n",
+ sessionToString(sessionKey).c_str());
+ result.append(buffer);
+ continue;
+ }
+ Session& session = sessionIt->second;
+ TranscodingRequestParcel& request = session.request;
+ snprintf(buffer, SIZE, " Session: %s, %s, %d%%\n",
+ sessionToString(sessionKey).c_str(), sessionStateToString(session.state),
+ session.lastProgress);
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Src: %s\n", request.sourceFilePath.c_str());
+ result.append(buffer);
+ snprintf(buffer, SIZE, " Dst: %s\n", request.destinationFilePath.c_str());
+ result.append(buffer);
+ }
+ }
+
+ write(fd, result.string(), result.size());
+}
+
+TranscodingSessionController::Session* TranscodingSessionController::getTopSession_l() {
+ if (mSessionMap.empty()) {
+ return nullptr;
+ }
+ uid_t topUid = *mUidSortedList.begin();
+ SessionKeyType topSessionKey = *mSessionQueues[topUid].begin();
+ return &mSessionMap[topSessionKey];
+}
+
+void TranscodingSessionController::updateCurrentSession_l() {
+ Session* topSession = getTopSession_l();
+ Session* curSession = mCurrentSession;
+ ALOGV("updateCurrentSession: topSession is %s, curSession is %s",
+ topSession == nullptr ? "null" : sessionToString(topSession->key).c_str(),
+ curSession == nullptr ? "null" : sessionToString(curSession->key).c_str());
+
+ // If we found a topSession that should be run, and it's not already running,
+ // take some actions to ensure it's running.
+ if (topSession != nullptr &&
+ (topSession != curSession || topSession->state != Session::RUNNING)) {
+ // If another session is currently running, pause it first.
+ if (curSession != nullptr && curSession->state == Session::RUNNING) {
+ mTranscoder->pause(curSession->key.first, curSession->key.second);
+ curSession->state = Session::PAUSED;
+ }
+ // If we are not experiencing resource loss, we can start or resume
+ // the topSession now.
+ if (!mResourceLost) {
+ if (topSession->state == Session::NOT_STARTED) {
+ mTranscoder->start(topSession->key.first, topSession->key.second,
+ topSession->request, topSession->callback.lock());
+ } else if (topSession->state == Session::PAUSED) {
+ mTranscoder->resume(topSession->key.first, topSession->key.second,
+ topSession->request, topSession->callback.lock());
+ }
+ topSession->state = Session::RUNNING;
+ }
+ }
+ mCurrentSession = topSession;
+}
+
+void TranscodingSessionController::removeSession_l(const SessionKeyType& sessionKey) {
+ ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return;
+ }
+
+ // Remove session from uid's queue.
+ const uid_t uid = mSessionMap[sessionKey].uid;
+ SessionQueueType& sessionQueue = mSessionQueues[uid];
+ auto it = std::find(sessionQueue.begin(), sessionQueue.end(), sessionKey);
+ if (it == sessionQueue.end()) {
+ ALOGE("couldn't find session %s in queue for uid %d", sessionToString(sessionKey).c_str(),
+ uid);
+ return;
+ }
+ sessionQueue.erase(it);
+
+ // If this is the last session in a real-time queue, remove this uid's queue.
+ if (uid != OFFLINE_UID && sessionQueue.empty()) {
+ mUidSortedList.remove(uid);
+ mSessionQueues.erase(uid);
+ mUidPolicy->unregisterMonitorUid(uid);
+
+ std::unordered_set<uid_t> topUids = mUidPolicy->getTopUids();
+ moveUidsToTop_l(topUids, false /*preserveTopUid*/);
+ }
+
+ // Clear current session.
+ if (mCurrentSession == &mSessionMap[sessionKey]) {
+ mCurrentSession = nullptr;
+ }
+
+ // Remove session from session map.
+ mSessionMap.erase(sessionKey);
+}
+
+/**
+ * Moves the set of uids to the front of mUidSortedList (which is used to pick
+ * the next session to run).
+ *
+ * This is called when 1) we received a onTopUidsChanged() callback from UidPolicy,
+ * or 2) we removed the session queue for a uid because it becomes empty.
+ *
+ * In case of 1), if there are multiple uids in the set, and the current front
+ * uid in mUidSortedList is still in the set, we try to keep that uid at front
+ * so that current session run is not interrupted. (This is not a concern for case 2)
+ * because the queue for a uid was just removed entirely.)
+ */
+void TranscodingSessionController::moveUidsToTop_l(const std::unordered_set<uid_t>& uids,
+ bool preserveTopUid) {
+ // If uid set is empty, nothing to do. Do not change the queue status.
+ if (uids.empty()) {
+ return;
+ }
+
+ // Save the current top uid.
+ uid_t curTopUid = *mUidSortedList.begin();
+ bool pushCurTopToFront = false;
+ int32_t numUidsMoved = 0;
+
+ // Go through the sorted uid list once, and move the ones in top set to front.
+ for (auto it = mUidSortedList.begin(); it != mUidSortedList.end();) {
+ uid_t uid = *it;
+
+ if (uid != OFFLINE_UID && uids.count(uid) > 0) {
+ it = mUidSortedList.erase(it);
+
+ // If this is the top we're preserving, don't push it here, push
+ // it after the for-loop.
+ if (uid == curTopUid && preserveTopUid) {
+ pushCurTopToFront = true;
+ } else {
+ mUidSortedList.push_front(uid);
+ }
+
+ // If we found all uids in the set, break out.
+ if (++numUidsMoved == uids.size()) {
+ break;
+ }
+ } else {
+ ++it;
+ }
+ }
+
+ if (pushCurTopToFront) {
+ mUidSortedList.push_front(curTopUid);
+ }
+}
+
+bool TranscodingSessionController::submit(
+ ClientIdType clientId, SessionIdType sessionId, uid_t uid,
+ const TranscodingRequestParcel& request,
+ const std::weak_ptr<ITranscodingClientCallback>& callback) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ ALOGV("%s: session %s, uid %d, prioirty %d", __FUNCTION__, sessionToString(sessionKey).c_str(),
+ uid, (int32_t)request.priority);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) > 0) {
+ ALOGE("session %s already exists", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ // TODO(chz): only support offline vs real-time for now. All kUnspecified sessions
+ // go to offline queue.
+ if (request.priority == TranscodingSessionPriority::kUnspecified) {
+ uid = OFFLINE_UID;
+ }
+
+ // Add session to session map.
+ mSessionMap[sessionKey].key = sessionKey;
+ mSessionMap[sessionKey].uid = uid;
+ mSessionMap[sessionKey].state = Session::NOT_STARTED;
+ mSessionMap[sessionKey].lastProgress = 0;
+ mSessionMap[sessionKey].request = request;
+ mSessionMap[sessionKey].callback = callback;
+
+ // If it's an offline session, the queue was already added in constructor.
+ // If it's a real-time sessions, check if a queue is already present for the uid,
+ // and add a new queue if needed.
+ if (uid != OFFLINE_UID) {
+ if (mSessionQueues.count(uid) == 0) {
+ mUidPolicy->registerMonitorUid(uid);
+ if (mUidPolicy->isUidOnTop(uid)) {
+ mUidSortedList.push_front(uid);
+ } else {
+ // Shouldn't be submitting real-time requests from non-top app,
+ // put it in front of the offline queue.
+ mUidSortedList.insert(mOfflineUidIterator, uid);
+ }
+ } else if (uid != *mUidSortedList.begin()) {
+ if (mUidPolicy->isUidOnTop(uid)) {
+ mUidSortedList.remove(uid);
+ mUidSortedList.push_front(uid);
+ }
+ }
+ }
+ // Append this session to the uid's queue.
+ mSessionQueues[uid].push_back(sessionKey);
+
+ updateCurrentSession_l();
+
+ validateState_l();
+ return true;
+}
+
+bool TranscodingSessionController::cancel(ClientIdType clientId, SessionIdType sessionId) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ ALOGV("%s: session %s", __FUNCTION__, sessionToString(sessionKey).c_str());
+
+ std::list<SessionKeyType> sessionsToRemove;
+
+ std::scoped_lock lock{mLock};
+
+ if (sessionId < 0) {
+ for (auto it = mSessionMap.begin(); it != mSessionMap.end(); ++it) {
+ if (it->first.first == clientId && it->second.uid != OFFLINE_UID) {
+ sessionsToRemove.push_back(it->first);
+ }
+ }
+ } else {
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+ sessionsToRemove.push_back(sessionKey);
+ }
+
+ for (auto it = sessionsToRemove.begin(); it != sessionsToRemove.end(); ++it) {
+ // If the session has ever been started, stop it now.
+ // Note that stop() is needed even if the session is currently paused. This instructs
+ // the transcoder to discard any states for the session, otherwise the states may
+ // never be discarded.
+ if (mSessionMap[*it].state != Session::NOT_STARTED) {
+ mTranscoder->stop(it->first, it->second);
+ }
+
+ // Remove the session.
+ removeSession_l(*it);
+ }
+
+ // Start next session.
+ updateCurrentSession_l();
+
+ validateState_l();
+ return true;
+}
+
+bool TranscodingSessionController::getSession(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingRequestParcel* request) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGE("session %s doesn't exist", sessionToString(sessionKey).c_str());
+ return false;
+ }
+
+ *(TranscodingRequest*)request = mSessionMap[sessionKey].request;
+ return true;
+}
+
+void TranscodingSessionController::notifyClient(ClientIdType clientId, SessionIdType sessionId,
+ const char* reason,
+ std::function<void(const SessionKeyType&)> func) {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ std::scoped_lock lock{mLock};
+
+ if (mSessionMap.count(sessionKey) == 0) {
+ ALOGW("%s: ignoring %s for session %s that doesn't exist", __FUNCTION__, reason,
+ sessionToString(sessionKey).c_str());
+ return;
+ }
+
+ // Only ignore if session was never started. In particular, propagate the status
+ // to client if the session is paused. Transcoder could have posted finish when
+ // we're pausing it, and the finish arrived after we changed current session.
+ if (mSessionMap[sessionKey].state == Session::NOT_STARTED) {
+ ALOGW("%s: ignoring %s for session %s that was never started", __FUNCTION__, reason,
+ sessionToString(sessionKey).c_str());
+ return;
+ }
+
+ ALOGV("%s: session %s %s", __FUNCTION__, sessionToString(sessionKey).c_str(), reason);
+ func(sessionKey);
+}
+
+void TranscodingSessionController::onStarted(ClientIdType clientId, SessionIdType sessionId) {
+ notifyClient(clientId, sessionId, "started", [=](const SessionKeyType& sessionKey) {
+ auto callback = mSessionMap[sessionKey].callback.lock();
+ if (callback != nullptr) {
+ callback->onTranscodingStarted(sessionId);
+ }
+ });
+}
+
+void TranscodingSessionController::onPaused(ClientIdType clientId, SessionIdType sessionId) {
+ notifyClient(clientId, sessionId, "paused", [=](const SessionKeyType& sessionKey) {
+ auto callback = mSessionMap[sessionKey].callback.lock();
+ if (callback != nullptr) {
+ callback->onTranscodingPaused(sessionId);
+ }
+ });
+}
+
+void TranscodingSessionController::onResumed(ClientIdType clientId, SessionIdType sessionId) {
+ notifyClient(clientId, sessionId, "resumed", [=](const SessionKeyType& sessionKey) {
+ auto callback = mSessionMap[sessionKey].callback.lock();
+ if (callback != nullptr) {
+ callback->onTranscodingResumed(sessionId);
+ }
+ });
+}
+
+void TranscodingSessionController::onFinish(ClientIdType clientId, SessionIdType sessionId) {
+ notifyClient(clientId, sessionId, "finish", [=](const SessionKeyType& sessionKey) {
+ {
+ auto clientCallback = mSessionMap[sessionKey].callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFinished(
+ sessionId, TranscodingResultParcel({sessionId, -1 /*actualBitrateBps*/,
+ std::nullopt /*sessionStats*/}));
+ }
+ }
+
+ // Remove the session.
+ removeSession_l(sessionKey);
+
+ // Start next session.
+ updateCurrentSession_l();
+
+ validateState_l();
+ });
+}
+
+void TranscodingSessionController::onError(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingErrorCode err) {
+ notifyClient(clientId, sessionId, "error", [=](const SessionKeyType& sessionKey) {
+ {
+ auto clientCallback = mSessionMap[sessionKey].callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFailed(sessionId, err);
+ }
+ }
+
+ // Remove the session.
+ removeSession_l(sessionKey);
+
+ // Start next session.
+ updateCurrentSession_l();
+
+ validateState_l();
+ });
+}
+
+void TranscodingSessionController::onProgressUpdate(ClientIdType clientId, SessionIdType sessionId,
+ int32_t progress) {
+ notifyClient(clientId, sessionId, "progress", [=](const SessionKeyType& sessionKey) {
+ auto callback = mSessionMap[sessionKey].callback.lock();
+ if (callback != nullptr) {
+ callback->onProgressUpdate(sessionId, progress);
+ }
+ mSessionMap[sessionKey].lastProgress = progress;
+ });
+}
+
+void TranscodingSessionController::onResourceLost() {
+ ALOGI("%s", __FUNCTION__);
+
+ std::scoped_lock lock{mLock};
+
+ if (mResourceLost) {
+ return;
+ }
+
+ // If we receive a resource loss event, the TranscoderLibrary already paused
+ // the transcoding, so we don't need to call onPaused to notify it to pause.
+ // Only need to update the session state here.
+ if (mCurrentSession != nullptr && mCurrentSession->state == Session::RUNNING) {
+ mCurrentSession->state = Session::PAUSED;
+ // Notify the client as a paused event.
+ auto clientCallback = mCurrentSession->callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingPaused(mCurrentSession->key.second);
+ }
+ }
+ mResourceLost = true;
+
+ validateState_l();
+}
+
+void TranscodingSessionController::onTopUidsChanged(const std::unordered_set<uid_t>& uids) {
+ if (uids.empty()) {
+ ALOGW("%s: ignoring empty uids", __FUNCTION__);
+ return;
+ }
+
+ std::string uidStr;
+ for (auto it = uids.begin(); it != uids.end(); it++) {
+ if (!uidStr.empty()) {
+ uidStr += ", ";
+ }
+ uidStr += std::to_string(*it);
+ }
+
+ ALOGD("%s: topUids: size %zu, uids: %s", __FUNCTION__, uids.size(), uidStr.c_str());
+
+ std::scoped_lock lock{mLock};
+
+ moveUidsToTop_l(uids, true /*preserveTopUid*/);
+
+ updateCurrentSession_l();
+
+ validateState_l();
+}
+
+void TranscodingSessionController::onResourceAvailable() {
+ std::scoped_lock lock{mLock};
+
+ if (!mResourceLost) {
+ return;
+ }
+
+ ALOGI("%s", __FUNCTION__);
+
+ mResourceLost = false;
+ updateCurrentSession_l();
+
+ validateState_l();
+}
+
+void TranscodingSessionController::validateState_l() {
+#ifdef VALIDATE_STATE
+ LOG_ALWAYS_FATAL_IF(mSessionQueues.count(OFFLINE_UID) != 1,
+ "mSessionQueues offline queue number is not 1");
+ LOG_ALWAYS_FATAL_IF(*mOfflineUidIterator != OFFLINE_UID,
+ "mOfflineUidIterator not pointing to offline uid");
+ LOG_ALWAYS_FATAL_IF(mUidSortedList.size() != mSessionQueues.size(),
+ "mUidList and mSessionQueues size mismatch");
+
+ int32_t totalSessions = 0;
+ for (auto uid : mUidSortedList) {
+ LOG_ALWAYS_FATAL_IF(mSessionQueues.count(uid) != 1,
+ "mSessionQueues count for uid %d is not 1", uid);
+ for (auto& sessionKey : mSessionQueues[uid]) {
+ LOG_ALWAYS_FATAL_IF(mSessionMap.count(sessionKey) != 1,
+ "mSessions count for session %s is not 1",
+ sessionToString(sessionKey).c_str());
+ }
+
+ totalSessions += mSessionQueues[uid].size();
+ }
+ LOG_ALWAYS_FATAL_IF(mSessionMap.size() != totalSessions,
+ "mSessions size doesn't match total sessions counted from uid queues");
+#endif // VALIDATE_STATE
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/TranscodingUidPolicy.cpp b/media/libmediatranscoding/TranscodingUidPolicy.cpp
new file mode 100644
index 0000000..9763921
--- /dev/null
+++ b/media/libmediatranscoding/TranscodingUidPolicy.cpp
@@ -0,0 +1,345 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "TranscodingUidPolicy"
+
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <aidl/android/media/IResourceManagerService.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <android/content/pm/IPackageManagerNative.h>
+#include <binder/ActivityManager.h>
+#include <binder/IServiceManager.h>
+#include <binder/PermissionController.h>
+#include <cutils/misc.h> // FIRST_APPLICATION_UID
+#include <cutils/multiuser.h>
+#include <inttypes.h>
+#include <media/TranscodingUidPolicy.h>
+#include <utils/Log.h>
+
+#include <utility>
+
+namespace android {
+
+constexpr static uid_t OFFLINE_UID = -1;
+constexpr static const char* kTranscodingTag = "transcoding";
+
+/*
+ * The OOM score we're going to ask ResourceManager to use for our native transcoding
+ * service. ResourceManager issues reclaims based on these scores. It gets the scores
+ * from ActivityManagerService, which doesn't track native services. The values of the
+ * OOM scores are defined in:
+ * frameworks/base/services/core/java/com/android/server/am/ProcessList.java
+ * We use SERVICE_ADJ which is lower priority than an app possibly visible to the
+ * user, but higher priority than a cached app (which could be killed without disruption
+ * to the user).
+ */
+constexpr static int32_t SERVICE_ADJ = 500;
+
+using Status = ::ndk::ScopedAStatus;
+using aidl::android::media::BnResourceManagerClient;
+using aidl::android::media::IResourceManagerService;
+
+/*
+ * Placeholder ResourceManagerClient for registering process info override
+ * with the IResourceManagerService. This is only used as a token by the service
+ * to get notifications about binder death, not used for reclaiming resources.
+ */
+struct TranscodingUidPolicy::ResourceManagerClient : public BnResourceManagerClient {
+ explicit ResourceManagerClient() = default;
+
+ Status reclaimResource(bool* _aidl_return) override {
+ *_aidl_return = false;
+ return Status::ok();
+ }
+
+ Status getName(::std::string* _aidl_return) override {
+ _aidl_return->clear();
+ return Status::ok();
+ }
+
+ virtual ~ResourceManagerClient() = default;
+};
+
+struct TranscodingUidPolicy::UidObserver : public BnUidObserver,
+ public virtual IBinder::DeathRecipient {
+ explicit UidObserver(TranscodingUidPolicy* owner) : mOwner(owner) {}
+
+ // IUidObserver
+ void onUidGone(uid_t uid, bool disabled) override;
+ void onUidActive(uid_t uid) override;
+ void onUidIdle(uid_t uid, bool disabled) override;
+ void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq,
+ int32_t capability) override;
+
+ // IBinder::DeathRecipient implementation
+ void binderDied(const wp<IBinder>& who) override;
+
+ TranscodingUidPolicy* mOwner;
+};
+
+void TranscodingUidPolicy::UidObserver::onUidGone(uid_t uid __unused, bool disabled __unused) {}
+
+void TranscodingUidPolicy::UidObserver::onUidActive(uid_t uid __unused) {}
+
+void TranscodingUidPolicy::UidObserver::onUidIdle(uid_t uid __unused, bool disabled __unused) {}
+
+void TranscodingUidPolicy::UidObserver::onUidStateChanged(uid_t uid, int32_t procState,
+ int64_t procStateSeq __unused,
+ int32_t capability __unused) {
+ mOwner->onUidStateChanged(uid, procState);
+}
+
+void TranscodingUidPolicy::UidObserver::binderDied(const wp<IBinder>& /*who*/) {
+ ALOGW("TranscodingUidPolicy: ActivityManager has died");
+ // TODO(chz): this is a rare event (since if the AMS is dead, the system is
+ // probably dead as well). But we should try to reconnect.
+ mOwner->setUidObserverRegistered(false);
+}
+
+////////////////////////////////////////////////////////////////////////////
+
+//static
+bool TranscodingUidPolicy::getNamesForUids(const std::vector<int32_t>& uids,
+ std::vector<std::string>* names) {
+ names->clear();
+ sp<IServiceManager> sm(defaultServiceManager());
+ sp<IBinder> binder(sm->getService(String16("package_native")));
+ if (binder == nullptr) {
+ ALOGE("getService package_native failed");
+ return false;
+ }
+
+ sp<content::pm::IPackageManagerNative> packageMgr =
+ interface_cast<content::pm::IPackageManagerNative>(binder);
+ binder::Status status = packageMgr->getNamesForUids(uids, names);
+
+ if (!status.isOk() || names->size() != uids.size()) {
+ names->clear();
+ return false;
+ }
+ return true;
+}
+
+//static
+status_t TranscodingUidPolicy::getUidForPackage(String16 packageName, /*inout*/ uid_t& uid) {
+ PermissionController pc;
+ uid = pc.getPackageUid(packageName, 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(packageName).string());
+ return BAD_VALUE;
+ }
+
+ uid = multiuser_get_uid(0 /*userId*/, uid);
+ return NO_ERROR;
+}
+
+TranscodingUidPolicy::TranscodingUidPolicy()
+ : mAm(std::make_shared<ActivityManager>()),
+ mUidObserver(new UidObserver(this)),
+ mRegistered(false),
+ mTopUidState(ActivityManager::PROCESS_STATE_UNKNOWN) {
+ registerSelf();
+ setProcessInfoOverride();
+}
+
+TranscodingUidPolicy::~TranscodingUidPolicy() {
+ unregisterSelf();
+}
+
+void TranscodingUidPolicy::registerSelf() {
+ status_t res = mAm->linkToDeath(mUidObserver.get());
+ mAm->registerUidObserver(
+ mUidObserver.get(),
+ ActivityManager::UID_OBSERVER_GONE | ActivityManager::UID_OBSERVER_IDLE |
+ ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE,
+ ActivityManager::PROCESS_STATE_UNKNOWN, String16(kTranscodingTag));
+
+ if (res == OK) {
+ Mutex::Autolock _l(mUidLock);
+
+ mRegistered = true;
+ ALOGI("TranscodingUidPolicy: Registered with ActivityManager");
+ } else {
+ mAm->unregisterUidObserver(mUidObserver.get());
+ }
+}
+
+void TranscodingUidPolicy::unregisterSelf() {
+ mAm->unregisterUidObserver(mUidObserver.get());
+ mAm->unlinkToDeath(mUidObserver.get());
+
+ Mutex::Autolock _l(mUidLock);
+
+ mRegistered = false;
+
+ ALOGI("TranscodingUidPolicy: Unregistered with ActivityManager");
+}
+
+void TranscodingUidPolicy::setProcessInfoOverride() {
+ ::ndk::SpAIBinder binder(AServiceManager_getService("media.resource_manager"));
+ std::shared_ptr<IResourceManagerService> service = IResourceManagerService::fromBinder(binder);
+ if (service == nullptr) {
+ ALOGE("Failed to get IResourceManagerService");
+ return;
+ }
+
+ mProcInfoOverrideClient = ::ndk::SharedRefBase::make<ResourceManagerClient>();
+ Status status = service->overrideProcessInfo(
+ mProcInfoOverrideClient, getpid(), ActivityManager::PROCESS_STATE_SERVICE, SERVICE_ADJ);
+ if (!status.isOk()) {
+ ALOGW("Failed to setProcessInfoOverride.");
+ }
+}
+
+void TranscodingUidPolicy::setUidObserverRegistered(bool registered) {
+ Mutex::Autolock _l(mUidLock);
+
+ mRegistered = registered;
+}
+
+void TranscodingUidPolicy::setCallback(const std::shared_ptr<UidPolicyCallbackInterface>& cb) {
+ mUidPolicyCallback = cb;
+}
+
+void TranscodingUidPolicy::registerMonitorUid(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+ if (uid == OFFLINE_UID) {
+ ALOGW("Ignoring the offline uid");
+ return;
+ }
+ if (mUidStateMap.find(uid) != mUidStateMap.end()) {
+ ALOGE("%s: Trying to register uid: %d which is already monitored!", __FUNCTION__, uid);
+ return;
+ }
+
+ int32_t state = ActivityManager::PROCESS_STATE_UNKNOWN;
+ if (mRegistered && mAm->isUidActive(uid, String16(kTranscodingTag))) {
+ state = mAm->getUidProcessState(uid, String16(kTranscodingTag));
+ }
+
+ ALOGV("%s: inserting new uid: %u, procState %d", __FUNCTION__, uid, state);
+
+ mUidStateMap.emplace(std::pair<uid_t, int32_t>(uid, state));
+ mStateUidMap[state].insert(uid);
+
+ updateTopUid_l();
+}
+
+void TranscodingUidPolicy::unregisterMonitorUid(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+
+ auto it = mUidStateMap.find(uid);
+ if (it == mUidStateMap.end()) {
+ ALOGE("%s: Trying to unregister uid: %d which is not monitored!", __FUNCTION__, uid);
+ return;
+ }
+
+ auto stateIt = mStateUidMap.find(it->second);
+ if (stateIt != mStateUidMap.end()) {
+ stateIt->second.erase(uid);
+ if (stateIt->second.empty()) {
+ mStateUidMap.erase(stateIt);
+ }
+ }
+ mUidStateMap.erase(it);
+
+ updateTopUid_l();
+}
+
+bool TranscodingUidPolicy::isUidOnTop(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+
+ return mTopUidState != ActivityManager::PROCESS_STATE_UNKNOWN &&
+ mTopUidState == getProcState_l(uid);
+}
+
+std::unordered_set<uid_t> TranscodingUidPolicy::getTopUids() const {
+ Mutex::Autolock _l(mUidLock);
+
+ if (mTopUidState == ActivityManager::PROCESS_STATE_UNKNOWN) {
+ return std::unordered_set<uid_t>();
+ }
+
+ return mStateUidMap.at(mTopUidState);
+}
+
+void TranscodingUidPolicy::onUidStateChanged(uid_t uid, int32_t procState) {
+ ALOGV("onUidStateChanged: %u, procState %d", uid, procState);
+
+ bool topUidSetChanged = false;
+ std::unordered_set<uid_t> topUids;
+ {
+ Mutex::Autolock _l(mUidLock);
+ auto it = mUidStateMap.find(uid);
+ if (it != mUidStateMap.end() && it->second != procState) {
+ // Top set changed if 1) the uid is in the current top uid set, or 2) the
+ // new procState is at least the same priority as the current top uid state.
+ bool isUidCurrentTop = mTopUidState != ActivityManager::PROCESS_STATE_UNKNOWN &&
+ mStateUidMap[mTopUidState].count(uid) > 0;
+ bool isNewStateHigherThanTop = procState != ActivityManager::PROCESS_STATE_UNKNOWN &&
+ (procState <= mTopUidState ||
+ mTopUidState == ActivityManager::PROCESS_STATE_UNKNOWN);
+ topUidSetChanged = (isUidCurrentTop || isNewStateHigherThanTop);
+
+ // Move uid to the new procState.
+ mStateUidMap[it->second].erase(uid);
+ mStateUidMap[procState].insert(uid);
+ it->second = procState;
+
+ if (topUidSetChanged) {
+ updateTopUid_l();
+
+ // Make a copy of the uid set for callback.
+ topUids = mStateUidMap[mTopUidState];
+ }
+ }
+ }
+
+ ALOGV("topUidSetChanged: %d", topUidSetChanged);
+
+ if (topUidSetChanged) {
+ auto callback = mUidPolicyCallback.lock();
+ if (callback != nullptr) {
+ callback->onTopUidsChanged(topUids);
+ }
+ }
+}
+
+void TranscodingUidPolicy::updateTopUid_l() {
+ mTopUidState = ActivityManager::PROCESS_STATE_UNKNOWN;
+
+ // Find the lowest uid state (ignoring PROCESS_STATE_UNKNOWN) with some monitored uids.
+ for (auto stateIt = mStateUidMap.begin(); stateIt != mStateUidMap.end(); stateIt++) {
+ if (stateIt->first != ActivityManager::PROCESS_STATE_UNKNOWN && !stateIt->second.empty()) {
+ mTopUidState = stateIt->first;
+ break;
+ }
+ }
+
+ ALOGV("%s: top uid state is %d", __FUNCTION__, mTopUidState);
+}
+
+int32_t TranscodingUidPolicy::getProcState_l(uid_t uid) {
+ auto it = mUidStateMap.find(uid);
+ if (it != mUidStateMap.end()) {
+ return it->second;
+ }
+ return ActivityManager::PROCESS_STATE_UNKNOWN;
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl b/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
index 07b6c1a..ad2358e 100644
--- a/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
+++ b/media/libmediatranscoding/aidl/android/media/IMediaTranscodingService.aidl
@@ -16,9 +16,10 @@
package android.media;
-import android.media.TranscodingJobParcel;
+import android.media.ITranscodingClient;
+import android.media.ITranscodingClientCallback;
+import android.media.TranscodingSessionParcel;
import android.media.TranscodingRequestParcel;
-import android.media.ITranscodingServiceClient;
/**
* Binder interface for MediaTranscodingService.
@@ -48,64 +49,25 @@
/**
* Register the client with the MediaTranscodingService.
*
- * Client must call this function to register itself with the service in order to perform
- * transcoding. This function will return a unique positive Id assigned by the service.
- * Client should save this Id and use it for all the transaction with the service.
+ * Client must call this function to register itself with the service in
+ * order to perform transcoding tasks. This function will return an
+ * ITranscodingClient interface object. The client should save and use it
+ * for all future transactions with the service.
*
- * @param client interface for the MediaTranscodingService to call the client.
+ * @param callback client interface for the MediaTranscodingService to call
+ * the client.
+ * @param clientName name of the client.
* @param opPackageName op package name of the client.
- * @param clientUid user id of the client.
- * @param clientPid process id of the client.
- * @return a unique positive Id assigned to the client by the service, -1 means failed to
- * register.
+ * @return an ITranscodingClient interface object, with nullptr indicating
+ * failure to register.
*/
- int registerClient(in ITranscodingServiceClient client,
- in String opPackageName,
- in int clientUid,
- in int clientPid);
-
- /**
- * Unregister the client with the MediaTranscodingService.
- *
- * Client will not be able to perform any more transcoding after unregister.
- *
- * @param clientId assigned Id of the client.
- * @return true if succeeds, false otherwise.
- */
- boolean unregisterClient(in int clientId);
+ ITranscodingClient registerClient(
+ in ITranscodingClientCallback callback,
+ in String clientName,
+ in String opPackageName);
/**
* Returns the number of clients. This is used for debugging.
*/
int getNumOfClients();
-
- /**
- * Submits a transcoding request to MediaTranscodingService.
- *
- * @param clientId assigned Id of the client.
- * @param request a TranscodingRequest contains transcoding configuration.
- * @param job(output variable) a TranscodingJob generated by the MediaTranscodingService.
- * @return a unique positive jobId generated by the MediaTranscodingService, -1 means failure.
- */
- int submitRequest(in int clientId,
- in TranscodingRequestParcel request,
- out TranscodingJobParcel job);
-
- /**
- * Cancels a transcoding job.
- *
- * @param clientId assigned id of the client.
- * @param jobId a TranscodingJob generated by the MediaTranscodingService.
- * @return true if succeeds, false otherwise.
- */
- boolean cancelJob(in int clientId, in int jobId);
-
- /**
- * Queries the job detail associated with a jobId.
- *
- * @param jobId a TranscodingJob generated by the MediaTranscodingService.
- * @param job(output variable) the TranscodingJob associated with the jobId.
- * @return true if succeeds, false otherwise.
- */
- boolean getJobWithId(in int jobId, out TranscodingJobParcel job);
}
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
new file mode 100644
index 0000000..151e3d0
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/ITranscodingClient.aidl
@@ -0,0 +1,63 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.TranscodingSessionParcel;
+import android.media.TranscodingRequestParcel;
+
+/**
+ * ITranscodingClient
+ *
+ * Interface for a client to communicate with MediaTranscodingService.
+ *
+ * {@hide}
+ */
+interface ITranscodingClient {
+ /**
+ * Submits a transcoding request to MediaTranscodingService.
+ *
+ * @param request a TranscodingRequest contains transcoding configuration.
+ * @param session(output variable) a TranscodingSession generated by MediaTranscodingService.
+ * @return true if success, false otherwise.
+ */
+ boolean submitRequest(in TranscodingRequestParcel request,
+ out TranscodingSessionParcel session);
+
+ /**
+ * Cancels a transcoding session.
+ *
+ * @param sessionId a TranscodingSession generated by the MediaTranscodingService.
+ * @return true if succeeds, false otherwise.
+ */
+ boolean cancelSession(in int sessionId);
+
+ /**
+ * Queries the session detail associated with a sessionId.
+ *
+ * @param sessionId a TranscodingSession generated by the MediaTranscodingService.
+ * @param session(output variable) the TranscodingSession associated with the sessionId.
+ * @return true if succeeds, false otherwise.
+ */
+ boolean getSessionWithId(in int sessionId, out TranscodingSessionParcel session);
+
+ /**
+ * Unregister the client with the MediaTranscodingService.
+ *
+ * Client will not be able to perform any more transcoding after unregister.
+ */
+ void unregister();
+}
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingClientCallback.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingClientCallback.aidl
new file mode 100644
index 0000000..d7d9b6f
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/ITranscodingClientCallback.aidl
@@ -0,0 +1,107 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.TranscodingErrorCode;
+import android.media.TranscodingSessionParcel;
+import android.media.TranscodingResultParcel;
+import android.os.ParcelFileDescriptor;
+
+/**
+ * ITranscodingClientCallback
+ *
+ * Interface for the MediaTranscodingService to communicate with the client.
+ *
+ * {@hide}
+ */
+interface ITranscodingClientCallback {
+ /**
+ * Called to open a raw file descriptor to access data under a URI
+ *
+ * @param fileUri The path of the filename.
+ * @param mode The file mode to use. Must be one of ("r, "w", "rw")
+ * @return ParcelFileDescriptor if open the file successfully, null otherwise.
+ */
+ ParcelFileDescriptor openFileDescriptor(in @utf8InCpp String fileUri,
+ in @utf8InCpp String mode);
+
+ /**
+ * Called when the transcoding associated with the sessionId finished.
+ * This will only be called if client request to get all the status of the session.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ */
+ oneway void onTranscodingStarted(in int sessionId);
+
+ /**
+ * Called when the transcoding associated with the sessionId is paused.
+ * This will only be called if client request to get all the status of the session.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ */
+ oneway void onTranscodingPaused(in int sessionId);
+
+ /**
+ * Called when the transcoding associated with the sessionId is resumed.
+ * This will only be called if client request to get all the status of the session.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ */
+ oneway void onTranscodingResumed(in int sessionId);
+
+ /**
+ * Called when the transcoding associated with the sessionId finished.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ * @param result contains the transcoded file stats and other transcoding metrics if requested.
+ */
+ oneway void onTranscodingFinished(in int sessionId, in TranscodingResultParcel result);
+
+ /**
+ * Called when the transcoding associated with the sessionId failed.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ * @param errorCode error code that indicates the error.
+ */
+ oneway void onTranscodingFailed(in int sessionId, in TranscodingErrorCode errorCode);
+
+ /**
+ * Called when the transcoding configuration associated with the sessionId gets updated, i.e. wait
+ * number in the session queue.
+ *
+ * <p> This will only be called if client set requestUpdate to be true in the TranscodingRequest
+ * submitted to the MediaTranscodingService.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ * @param oldAwaitNumber previous number of sessions ahead of current session.
+ * @param newAwaitNumber updated number of sessions ahead of current session.
+ */
+ oneway void onAwaitNumberOfSessionsChanged(in int sessionId,
+ in int oldAwaitNumber,
+ in int newAwaitNumber);
+
+ /**
+ * Called when there is an update on the progress of the TranscodingSession.
+ *
+ * <p> This will only be called if client set requestUpdate to be true in the TranscodingRequest
+ * submitted to the MediaTranscodingService.
+ *
+ * @param sessionId sessionId assigned by the MediaTranscodingService upon receiving request.
+ * @param progress an integer number ranging from 0 ~ 100 inclusive.
+ */
+ oneway void onProgressUpdate(in int sessionId, in int progress);
+}
diff --git a/media/libmediatranscoding/aidl/android/media/ITranscodingServiceClient.aidl b/media/libmediatranscoding/aidl/android/media/ITranscodingServiceClient.aidl
deleted file mode 100644
index e23c833..0000000
--- a/media/libmediatranscoding/aidl/android/media/ITranscodingServiceClient.aidl
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Copyright (c) 2019, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.TranscodingErrorCode;
-import android.media.TranscodingJobParcel;
-import android.media.TranscodingResultParcel;
-
-/**
- * ITranscodingServiceClient interface for the MediaTranscodingervice to communicate with the
- * client.
- *
- * {@hide}
- */
-//TODO(hkuang): Implement the interface.
-interface ITranscodingServiceClient {
- /**
- * Retrieves the name of the client.
- */
- @utf8InCpp String getName();
-
- /**
- * Called when the transcoding associated with the jobId finished.
- *
- * @param jobId jobId assigned by the MediaTranscodingService upon receiving request.
- * @param result contains the transcoded file stats and other transcoding metrics if requested.
- */
- oneway void onTranscodingFinished(in int jobId, in TranscodingResultParcel result);
-
- /**
- * Called when the transcoding associated with the jobId failed.
- *
- * @param jobId jobId assigned by the MediaTranscodingService upon receiving request.
- * @param errorCode error code that indicates the error.
- */
- oneway void onTranscodingFailed(in int jobId, in TranscodingErrorCode errorCode);
-
- /**
- * Called when the transcoding configuration associated with the jobId gets updated, i.e. wait
- * number in the job queue.
- *
- * <p> This will only be called if client set requestUpdate to be true in the TranscodingRequest
- * submitted to the MediaTranscodingService.
- *
- * @param jobId jobId assigned by the MediaTranscodingService upon receiving request.
- * @param oldAwaitNumber previous number of jobs ahead of current job.
- * @param newAwaitNumber updated number of jobs ahead of current job.
- */
- oneway void onAwaitNumberOfJobsChanged(in int jobId,
- in int oldAwaitNumber,
- in int newAwaitNumber);
-
- /**
- * Called when there is an update on the progress of the TranscodingJob.
- *
- * <p> This will only be called if client set requestUpdate to be true in the TranscodingRequest
- * submitted to the MediaTranscodingService.
- *
- * @param jobId jobId assigned by the MediaTranscodingService upon receiving request.
- * @param progress an integer number ranging from 0 ~ 100 inclusive.
- */
- oneway void onProgressUpdate(in int jobId, in int progress);
-}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
index 7f47fdc..b044d41 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingErrorCode.aidl
@@ -23,11 +23,12 @@
*/
@Backing(type = "int")
enum TranscodingErrorCode {
- kUnknown = 0,
- kUnsupported = 1,
- kDecoderError = 2,
- kEncoderError = 3,
- kExtractorError = 4,
- kMuxerError = 5,
- kInvalidBitstream = 6
+ kNoError = 0,
+ kUnknown = 1,
+ kMalformed = 2,
+ kUnsupported = 3,
+ kInvalidParameter = 4,
+ kInvalidOperation = 5,
+ kErrorIO = 6,
+ kInsufficientResources = 7,
}
\ No newline at end of file
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobParcel.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingJobParcel.aidl
deleted file mode 100644
index d912c38..0000000
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobParcel.aidl
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-import android.media.TranscodingRequestParcel;
-
-/**
- * TranscodingJob is generated by the MediaTranscodingService upon receiving a TranscodingRequest.
- * It contains all the necessary configuration generated by the MediaTranscodingService for the
- * TranscodingRequest.
- *
- * {@hide}
- */
-//TODO(hkuang): Implement the parcelable.
-parcelable TranscodingJobParcel {
- /**
- * A unique positive Id generated by the MediaTranscodingService.
- */
- int jobId;
-
- /**
- * The request associated with the TranscodingJob.
- */
- TranscodingRequestParcel request;
-
- /**
- * Current number of jobs ahead of this job. The service schedules the job based on the priority
- * passed from the client. Client could specify whether to receive updates when the
- * awaitNumberOfJobs changes through setting requestProgressUpdate in the TranscodingRequest.
- */
- int awaitNumberOfJobs;
-}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
index 7b7986d..03c24f0 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingRequestParcel.aidl
@@ -16,8 +16,10 @@
package android.media;
-import android.media.TranscodingJobPriority;
+import android.media.TranscodingSessionPriority;
+import android.media.TranscodingTestConfig;
import android.media.TranscodingType;
+import android.media.TranscodingVideoTrackFormat;
/**
* TranscodingRequest contains the desired configuration for the transcoding.
@@ -27,9 +29,28 @@
//TODO(hkuang): Implement the parcelable.
parcelable TranscodingRequestParcel {
/**
- * Name of file to be transcoded.
+ * The absolute file path of the source file.
*/
- @utf8InCpp String fileName;
+ @utf8InCpp String sourceFilePath;
+
+ /**
+ * The absolute file path of the destination file.
+ */
+ @utf8InCpp String destinationFilePath;
+
+ /**
+ * The UID of the client that this transcoding request is for. Only privileged caller could
+ * set this Uid as only they could do the transcoding on behalf of the client.
+ * -1 means not available.
+ */
+ int clientUid = -1;
+
+ /**
+ * The PID of the client that this transcoding request is for. Only privileged caller could
+ * set this Uid as only they could do the transcoding on behalf of the client.
+ * -1 means not available.
+ */
+ int clientPid = -1;
/**
* Type of the transcoding.
@@ -37,22 +58,44 @@
TranscodingType transcodingType;
/**
- * Input source file descriptor.
+ * Requested video track format for the transcoding.
+ * Note that the transcoding service will try to fulfill the requested format as much as
+ * possbile, while subject to hardware and software limitation. The final video track format
+ * will be available in the TranscodingSessionParcel when the session is finished.
*/
- ParcelFileDescriptor inFd;
-
- /**
- * Output transcoded file descriptor.
- */
- ParcelFileDescriptor outFd;
+ @nullable TranscodingVideoTrackFormat requestedVideoTrackFormat;
/**
* Priority of this transcoding. Service will schedule the transcoding based on the priority.
*/
- TranscodingJobPriority priority;
+ TranscodingSessionPriority priority;
/**
- * Whether to receive update on progress and change of awaitNumJobs.
+ * Whether to receive update on progress and change of awaitNumSessions.
+ * Default to false.
*/
- boolean requestUpdate;
+ boolean requestProgressUpdate = false;
+
+ /**
+ * Whether to receive update on session's start/stop/pause/resume.
+ * Default to false.
+ */
+ boolean requestSessionEventUpdate = false;
+
+ /**
+ * Whether this request is for testing.
+ */
+ boolean isForTesting = false;
+
+ /**
+ * Test configuration. This will be available only when isForTesting is set to true.
+ */
+ @nullable TranscodingTestConfig testConfig;
+
+ /**
+ * Whether to get the stats of the transcoding.
+ * If this is enabled, the TranscodingSessionStats will be returned in TranscodingResultParcel
+ * upon transcoding finishes.
+ */
+ boolean enableStats = false;
}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingResultParcel.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingResultParcel.aidl
index 65c49e7..7826e25 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingResultParcel.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingResultParcel.aidl
@@ -16,6 +16,8 @@
package android.media;
+import android.media.TranscodingSessionStats;
+
/**
* Result of the transcoding.
*
@@ -24,9 +26,9 @@
//TODO(hkuang): Implement the parcelable.
parcelable TranscodingResultParcel {
/**
- * The jobId associated with the TranscodingResult.
+ * The sessionId associated with the TranscodingResult.
*/
- int jobId;
+ int sessionId;
/**
* Actual bitrate of the transcoded video in bits per second. This will only present for video
@@ -34,5 +36,9 @@
*/
int actualBitrateBps;
- // TODO(hkuang): Add more fields.
+ /**
+ * Stats of the transcoding session. This will only be available when client requests to get the
+ * stats in TranscodingRequestParcel.
+ */
+ @nullable TranscodingSessionStats sessionStats;
}
\ No newline at end of file
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingSessionParcel.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingSessionParcel.aidl
new file mode 100644
index 0000000..3a4a500
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingSessionParcel.aidl
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.TranscodingRequestParcel;
+import android.media.TranscodingVideoTrackFormat;
+
+/**
+ * TranscodingSession is generated by the MediaTranscodingService upon receiving a
+ * TranscodingRequest. It contains all the necessary configuration generated by the
+ * MediaTranscodingService for the TranscodingRequest.
+ *
+ * {@hide}
+ */
+//TODO(hkuang): Implement the parcelable.
+parcelable TranscodingSessionParcel {
+ /**
+ * A unique positive Id generated by the MediaTranscodingService.
+ */
+ int sessionId;
+
+ /**
+ * The request associated with the TranscodingSession.
+ */
+ TranscodingRequestParcel request;
+
+ /**
+ * Output video track's format. This will only be avaiable for video transcoding and it will
+ * be avaiable when the session is finished.
+ */
+ @nullable TranscodingVideoTrackFormat videoTrackFormat;
+
+ /**
+ * Current number of sessions ahead of this session. The service schedules the session based on
+ * the priority passed from the client. Client could specify whether to receive updates when the
+ * awaitNumberOfSessions changes through setting requestProgressUpdate in the TranscodingRequest.
+ */
+ int awaitNumberOfSessions;
+}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingSessionPriority.aidl
similarity index 92%
rename from media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
rename to media/libmediatranscoding/aidl/android/media/TranscodingSessionPriority.aidl
index 1a5d81a..f001484 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingSessionPriority.aidl
@@ -17,12 +17,12 @@
package android.media;
/**
- * Priority of a transcoding job.
+ * Priority of a transcoding session.
*
* {@hide}
*/
@Backing(type="int")
-enum TranscodingJobPriority {
+enum TranscodingSessionPriority {
// TODO(hkuang): define what each priority level actually mean.
kUnspecified = 0,
kLow = 1,
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingSessionStats.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingSessionStats.aidl
new file mode 100644
index 0000000..b3e7eea
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingSessionStats.aidl
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * TranscodingSessionStats encapsulated the stats of the a TranscodingSession.
+ *
+ * {@hide}
+ */
+parcelable TranscodingSessionStats {
+ /**
+ * System time of when the session is created.
+ */
+ long sessionCreatedTimeUs;
+
+ /**
+ * System time of when the session is finished.
+ */
+ long sessionFinishedTimeUs;
+
+ /**
+ * Total time spend on transcoding, exclude the time in pause.
+ */
+ long totalProcessingTimeUs;
+
+ /**
+ * Total time spend on handling the session, include the time in pause.
+ * The totaltimeUs is actually the same as sessionFinishedTimeUs - sessionCreatedTimeUs.
+ */
+ long totalTimeUs;
+}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingTestConfig.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingTestConfig.aidl
new file mode 100644
index 0000000..12e0e94
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingTestConfig.aidl
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ package android.media;
+
+ /**
+ * TranscodingTestConfig contains the test configureation used in testing.
+ *
+ * {@hide}
+ */
+parcelable TranscodingTestConfig {
+ /**
+ * Whether to use SimulatedTranscoder for testing. Note that SimulatedTranscoder does not send
+ * transcoding sessions to real MediaTranscoder.
+ */
+ boolean useSimulatedTranscoder = false;
+
+ /**
+ * Passthrough mode used for testing. The transcoding service will assume the destination
+ * path already contains the transcoding of the source file and return it to client directly.
+ */
+ boolean passThroughMode = false;
+
+ /**
+ * Time of processing the session in milliseconds. Service will return the session result at
+ * least after processingTotalTimeMs from the time it starts to process the session. Note that
+ * if service uses real MediaTranscoder to do transcoding, the time spent on transcoding may be
+ * more than that.
+ */
+ int processingTotalTimeMs = 0;
+}
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl b/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl
new file mode 100644
index 0000000..8ed241a
--- /dev/null
+++ b/media/libmediatranscoding/aidl/android/media/TranscodingVideoTrackFormat.aidl
@@ -0,0 +1,84 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.TranscodingVideoCodecType;
+
+/**
+ * TranscodingVideoTrackFormat contains the video track format of a video.
+ *
+ * TODO(hkuang): Switch to PersistableBundle when b/156428735 is fixed or after we remove
+ * aidl_interface
+ *
+ * Note that TranscodingVideoTrackFormat is used in TranscodingRequestParcel for the client to
+ * specify the desired transcoded video format, and is also used in TranscodingSessionParcel for the
+ * service to notify client of the final video format for transcoding.
+ * When used as input in TranscodingRequestParcel, the client only needs to specify the config that
+ * they want to change, e.g. codec or resolution, and all the missing configs will be extracted
+ * from the source video and applied to the destination video.
+ * When used as output in TranscodingSessionParcel, all the configs will be populated to indicate
+ * the final encoder configs used for transcoding.
+ *
+ * {@hide}
+ */
+parcelable TranscodingVideoTrackFormat {
+ /**
+ * Video Codec type.
+ */
+ TranscodingVideoCodecType codecType; // TranscodingVideoCodecType::kUnspecified;
+
+ /**
+ * Width of the video in pixels. -1 means unavailable.
+ */
+ int width = -1;
+
+ /**
+ * Height of the video in pixels. -1 means unavailable.
+ */
+ int height = -1;
+
+ /**
+ * Bitrate in bits per second. -1 means unavailable.
+ */
+ int bitrateBps = -1;
+
+ /**
+ * Codec profile. This must be the same constant as used in MediaCodecInfo.CodecProfileLevel.
+ * -1 means unavailable.
+ */
+ int profile = -1;
+
+ /**
+ * Codec level. This must be the same constant as used in MediaCodecInfo.CodecProfileLevel.
+ * -1 means unavailable.
+ */
+ int level = -1;
+
+ /**
+ * Decoder operating rate. This is used to work around the fact that vendor does not boost the
+ * hardware to maximum speed in transcoding usage case. This operating rate will be applied
+ * to decoder inside MediaTranscoder. -1 means unavailable.
+ */
+ int decoderOperatingRate = -1;
+
+ /**
+ * Encoder operating rate. This is used to work around the fact that vendor does not boost the
+ * hardware to maximum speed in transcoding usage case. This operating rate will be applied
+ * to encoder inside MediaTranscoder. -1 means unavailable.
+ */
+ int encoderOperatingRate = -1;
+}
diff --git a/media/libmediatranscoding/build_and_run_all_unit_tests.sh b/media/libmediatranscoding/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..388e2ea
--- /dev/null
+++ b/media/libmediatranscoding/build_and_run_all_unit_tests.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Script to run all transcoding related tests from subfolders.
+# Run script from this folder.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount && adb sync
+SYNC_FINISHED=true
+
+# Run the transcoding service tests.
+pushd tests
+. build_and_run_all_unit_tests.sh
+popd
+
+# Run the transcoder tests.
+pushd transcoder/tests/
+. build_and_run_all_unit_tests.sh
+popd
+
diff --git a/media/libmediatranscoding/include/media/AdjustableMaxPriorityQueue.h b/media/libmediatranscoding/include/media/AdjustableMaxPriorityQueue.h
index 0e8dcfd..5ba1ee2 100644
--- a/media/libmediatranscoding/include/media/AdjustableMaxPriorityQueue.h
+++ b/media/libmediatranscoding/include/media/AdjustableMaxPriorityQueue.h
@@ -26,7 +26,7 @@
namespace android {
/*
- * AdjustableMaxPriorityQueue is a custom max priority queue that helps managing jobs for
+ * AdjustableMaxPriorityQueue is a custom max priority queue that helps managing sessions for
* MediaTranscodingService.
*
* AdjustableMaxPriorityQueue is a wrapper template around the STL's *_heap() functions.
@@ -38,7 +38,7 @@
*/
template <class T, class Comparator = std::less<T>>
class AdjustableMaxPriorityQueue {
- public:
+public:
typedef typename std::vector<T>::iterator iterator;
typedef typename std::vector<T>::const_iterator const_iterator;
@@ -104,7 +104,7 @@
/* Return the backbone storage of this PriorityQueue. Mainly used for debugging. */
const std::vector<T>& getStorage() const { return mHeap; };
- private:
+private:
std::vector<T> mHeap;
/* Implementation shared by both public push() methods. */
diff --git a/media/libmediatranscoding/include/media/ControllerClientInterface.h b/media/libmediatranscoding/include/media/ControllerClientInterface.h
new file mode 100644
index 0000000..3fd4f0c
--- /dev/null
+++ b/media/libmediatranscoding/include/media/ControllerClientInterface.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_CONTROLLER_CLIENT_INTERFACE_H
+#define ANDROID_MEDIA_CONTROLLER_CLIENT_INTERFACE_H
+
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+#include <media/TranscodingDefs.h>
+
+namespace android {
+
+using ::aidl::android::media::ITranscodingClientCallback;
+using ::aidl::android::media::TranscodingRequestParcel;
+
+// Interface for a client to call the controller to schedule or retrieve
+// the status of a session.
+class ControllerClientInterface {
+public:
+ /**
+ * Submits one request to the controller.
+ *
+ * Returns true on success and false on failure. This call will fail is a session identified
+ * by <clientId, sessionId> already exists.
+ */
+ virtual bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t uid,
+ const TranscodingRequestParcel& request,
+ const std::weak_ptr<ITranscodingClientCallback>& clientCallback) = 0;
+
+ /**
+ * Cancels a session identified by <clientId, sessionId>.
+ *
+ * If sessionId is negative (<0), all sessions with a specified priority (that's not
+ * TranscodingSessionPriority::kUnspecified) will be cancelled. Otherwise, only the single
+ * session <clientId, sessionId> will be cancelled.
+ *
+ * Returns false if a single session is being cancelled but it doesn't exist. Returns
+ * true otherwise.
+ */
+ virtual bool cancel(ClientIdType clientId, SessionIdType sessionId) = 0;
+
+ /**
+ * Retrieves information about a session.
+ *
+ * Returns true and the session if it exists, and false otherwise.
+ */
+ virtual bool getSession(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingRequestParcel* request) = 0;
+
+protected:
+ virtual ~ControllerClientInterface() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_CONTROLLER_CLIENT_INTERFACE_H
diff --git a/media/libmediatranscoding/include/media/ResourcePolicyInterface.h b/media/libmediatranscoding/include/media/ResourcePolicyInterface.h
new file mode 100644
index 0000000..4a92af8
--- /dev/null
+++ b/media/libmediatranscoding/include/media/ResourcePolicyInterface.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
+#define ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
+#include <memory>
+namespace android {
+
+class ResourcePolicyCallbackInterface;
+
+// Interface for the SessionController to control the resource status updates.
+class ResourcePolicyInterface {
+public:
+ // Set the associated callback interface to send the events when resource
+ // status changes. (Set to nullptr will stop the updates.)
+ virtual void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) = 0;
+
+protected:
+ virtual ~ResourcePolicyInterface() = default;
+};
+
+// Interface for notifying the SessionController of a change in resource status.
+class ResourcePolicyCallbackInterface {
+public:
+ // Called when codec resources become available. The controller may use this
+ // as a signal to attempt restart transcoding sessions that were previously
+ // paused due to temporary resource loss.
+ virtual void onResourceAvailable() = 0;
+
+protected:
+ virtual ~ResourcePolicyCallbackInterface() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_RESOURCE_POLICY_INTERFACE_H
diff --git a/media/libmediatranscoding/include/media/TranscoderInterface.h b/media/libmediatranscoding/include/media/TranscoderInterface.h
new file mode 100644
index 0000000..e17cd5a
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscoderInterface.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODER_INTERFACE_H
+#define ANDROID_MEDIA_TRANSCODER_INTERFACE_H
+
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <aidl/android/media/TranscodingErrorCode.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+#include <media/TranscodingDefs.h>
+
+namespace android {
+
+using ::aidl::android::media::ITranscodingClientCallback;
+using ::aidl::android::media::TranscodingErrorCode;
+using ::aidl::android::media::TranscodingRequestParcel;
+class TranscoderCallbackInterface;
+
+// Interface for the controller to call the transcoder to take actions.
+class TranscoderInterface {
+public:
+ virtual void setCallback(const std::shared_ptr<TranscoderCallbackInterface>& cb) = 0;
+ virtual void start(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) = 0;
+ virtual void pause(ClientIdType clientId, SessionIdType sessionId) = 0;
+ virtual void resume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) = 0;
+ virtual void stop(ClientIdType clientId, SessionIdType sessionId) = 0;
+
+protected:
+ virtual ~TranscoderInterface() = default;
+};
+
+// Interface for the transcoder to notify the controller of the status of
+// the currently running session, or temporary loss of transcoding resources.
+class TranscoderCallbackInterface {
+public:
+ // TODO(chz): determine what parameters are needed here.
+ virtual void onStarted(ClientIdType clientId, SessionIdType sessionId) = 0;
+ virtual void onPaused(ClientIdType clientId, SessionIdType sessionId) = 0;
+ virtual void onResumed(ClientIdType clientId, SessionIdType sessionId) = 0;
+ virtual void onFinish(ClientIdType clientId, SessionIdType sessionId) = 0;
+ virtual void onError(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingErrorCode err) = 0;
+ virtual void onProgressUpdate(ClientIdType clientId, SessionIdType sessionId,
+ int32_t progress) = 0;
+
+ // Called when transcoding becomes temporarily inaccessible due to loss of resource.
+ // If there is any session currently running, it will be paused. When resource contention
+ // is solved, the controller should call TranscoderInterface's to either start a new session,
+ // or resume a paused session.
+ virtual void onResourceLost() = 0;
+
+protected:
+ virtual ~TranscoderCallbackInterface() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODER_INTERFACE_H
diff --git a/media/libmediatranscoding/include/media/TranscoderWrapper.h b/media/libmediatranscoding/include/media/TranscoderWrapper.h
new file mode 100644
index 0000000..6bf6b56
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscoderWrapper.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_TRANSCODER_WRAPPER_H
+#define ANDROID_TRANSCODER_WRAPPER_H
+
+#include <android-base/thread_annotations.h>
+#include <media/NdkMediaError.h>
+#include <media/TranscoderInterface.h>
+
+#include <list>
+#include <map>
+#include <mutex>
+
+namespace android {
+
+class MediaTranscoder;
+class Parcelable;
+
+/*
+ * Wrapper class around MediaTranscoder.
+ * Implements TranscoderInterface for TranscodingSessionController to use.
+ */
+class TranscoderWrapper : public TranscoderInterface,
+ public std::enable_shared_from_this<TranscoderWrapper> {
+public:
+ TranscoderWrapper();
+
+ virtual void setCallback(const std::shared_ptr<TranscoderCallbackInterface>& cb) override;
+ virtual void start(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) override;
+ virtual void pause(ClientIdType clientId, SessionIdType sessionId) override;
+ virtual void resume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) override;
+ virtual void stop(ClientIdType clientId, SessionIdType sessionId) override;
+
+private:
+ class CallbackImpl;
+ struct Event {
+ enum Type { NoEvent, Start, Pause, Resume, Stop, Finish, Error, Progress } type;
+ ClientIdType clientId;
+ SessionIdType sessionId;
+ std::function<void()> runnable;
+ int32_t arg;
+ };
+ using SessionKeyType = std::pair<ClientIdType, SessionIdType>;
+
+ std::shared_ptr<CallbackImpl> mTranscoderCb;
+ std::shared_ptr<MediaTranscoder> mTranscoder;
+ std::weak_ptr<TranscoderCallbackInterface> mCallback;
+ std::mutex mLock;
+ std::condition_variable mCondition;
+ std::list<Event> mQueue; // GUARDED_BY(mLock);
+ std::map<SessionKeyType, std::shared_ptr<const Parcel>> mPausedStateMap;
+ ClientIdType mCurrentClientId;
+ SessionIdType mCurrentSessionId;
+
+ static std::string toString(const Event& event);
+ void onFinish(ClientIdType clientId, SessionIdType sessionId);
+ void onError(ClientIdType clientId, SessionIdType sessionId, media_status_t status);
+ void onProgress(ClientIdType clientId, SessionIdType sessionId, int32_t progress);
+
+ media_status_t handleStart(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback);
+ media_status_t handlePause(ClientIdType clientId, SessionIdType sessionId);
+ media_status_t handleResume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback);
+ media_status_t setupTranscoder(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& callback,
+ const std::shared_ptr<const Parcel>& pausedState = nullptr);
+
+ void cleanup();
+ void reportError(ClientIdType clientId, SessionIdType sessionId, media_status_t err);
+ void queueEvent(Event::Type type, ClientIdType clientId, SessionIdType sessionId,
+ const std::function<void()> runnable, int32_t arg = 0);
+ void threadLoop();
+};
+
+} // namespace android
+#endif // ANDROID_TRANSCODER_WRAPPER_H
diff --git a/media/libmediatranscoding/include/media/TranscodingClientManager.h b/media/libmediatranscoding/include/media/TranscodingClientManager.h
index eec120a..5feeae9 100644
--- a/media/libmediatranscoding/include/media/TranscodingClientManager.h
+++ b/media/libmediatranscoding/include/media/TranscodingClientManager.h
@@ -17,73 +17,77 @@
#ifndef ANDROID_MEDIA_TRANSCODING_CLIENT_MANAGER_H
#define ANDROID_MEDIA_TRANSCODING_CLIENT_MANAGER_H
-#include <aidl/android/media/BnTranscodingServiceClient.h>
-#include <android/binder_ibinder.h>
+#include <aidl/android/media/ITranscodingClient.h>
+#include <aidl/android/media/ITranscodingClientCallback.h>
#include <sys/types.h>
#include <utils/Condition.h>
-#include <utils/RefBase.h>
#include <utils/String8.h>
#include <utils/Vector.h>
+#include <map>
#include <mutex>
#include <unordered_map>
+#include <unordered_set>
+
+#include "ControllerClientInterface.h"
namespace android {
-using ::aidl::android::media::ITranscodingServiceClient;
-
-class MediaTranscodingService;
+using ::aidl::android::media::ITranscodingClient;
+using ::aidl::android::media::ITranscodingClientCallback;
/*
* TranscodingClientManager manages all the transcoding clients across different processes.
*
- * TranscodingClientManager is a global singleton that could only acquired by
- * MediaTranscodingService. It manages all the clients's registration/unregistration and clients'
- * information. It also bookkeeps all the clients' information. It also monitors to the death of the
+ * TranscodingClientManager manages all the clients's registration/unregistration and clients'
+ * information. It also bookkeeps all the clients' information. It also monitors the death of the
* clients. Upon client's death, it will remove the client from it.
*
* TODO(hkuang): Hook up with ResourceManager for resource management.
* TODO(hkuang): Hook up with MediaMetrics to log all the transactions.
*/
-class TranscodingClientManager {
- public:
+class TranscodingClientManager : public std::enable_shared_from_this<TranscodingClientManager> {
+public:
virtual ~TranscodingClientManager();
/**
- * ClientInfo contains a single client's information.
- */
- struct ClientInfo {
- /* The remote client that this ClientInfo is associated with. */
- std::shared_ptr<ITranscodingServiceClient> mClient;
- /* A unique positive Id assigned to the client by the service. */
- int32_t mClientId;
- /* Process id of the client */
- int32_t mClientPid;
- /* User id of the client. */
- int32_t mClientUid;
- /* Package name of the client. */
- std::string mClientOpPackageName;
-
- ClientInfo(const std::shared_ptr<ITranscodingServiceClient>& client, int64_t clientId,
- int32_t pid, int32_t uid, const std::string& opPackageName)
- : mClient(client),
- mClientId(clientId),
- mClientPid(pid),
- mClientUid(uid),
- mClientOpPackageName(opPackageName) {}
- };
-
- /**
* Adds a new client to the manager.
*
- * The client must have valid clientId, pid, uid and opPackageName, otherwise, this will return
- * a non-zero errorcode. If the client has already been added, it will also return non-zero
- * errorcode.
+ * The client must have valid callback, pid, uid, clientName and opPackageName.
+ * Otherwise, this will return a non-zero errorcode. If the client callback has
+ * already been added, it will also return non-zero errorcode.
*
- * @param client to be added to the manager.
+ * @param callback client callback for the service to call this client.
+ * @param clientName client's name.
+ * @param opPackageName client's package name.
+ * @param client output holding the ITranscodingClient interface for the client
+ * to use for subsequent communications with the service.
* @return 0 if client is added successfully, non-zero errorcode otherwise.
*/
- status_t addClient(std::unique_ptr<ClientInfo> client);
+ status_t addClient(const std::shared_ptr<ITranscodingClientCallback>& callback,
+ const std::string& clientName, const std::string& opPackageName,
+ std::shared_ptr<ITranscodingClient>* client);
+
+ /**
+ * Gets the number of clients.
+ */
+ size_t getNumOfClients() const;
+
+ /**
+ * Dump all the client information to the fd.
+ */
+ void dumpAllClients(int fd, const Vector<String16>& args);
+
+private:
+ friend class MediaTranscodingService;
+ friend class TranscodingClientManagerTest;
+ struct ClientImpl;
+
+ // Only allow MediaTranscodingService and unit tests to instantiate.
+ TranscodingClientManager(const std::shared_ptr<ControllerClientInterface>& controller);
+
+ // Checks if a user is trusted (and allowed to submit sessions on behalf of other uids)
+ bool isTrustedCallingUid(uid_t uid);
/**
* Removes an existing client from the manager.
@@ -93,39 +97,24 @@
* @param clientId id of the client to be removed..
* @return 0 if client is removed successfully, non-zero errorcode otherwise.
*/
- status_t removeClient(int32_t clientId);
-
- /**
- * Gets the number of clients.
- */
- size_t getNumOfClients() const;
-
- /**
- * Checks if a client with clientId is already registered.
- */
- bool isClientIdRegistered(int32_t clientId) const;
-
- /**
- * Dump all the client information to the fd.
- */
- void dumpAllClients(int fd, const Vector<String16>& args);
-
- private:
- friend class MediaTranscodingService;
- friend class TranscodingClientManagerTest;
-
- /** Get the singleton instance of the TranscodingClientManager. */
- static TranscodingClientManager& getInstance();
-
- TranscodingClientManager();
+ status_t removeClient(ClientIdType clientId);
static void BinderDiedCallback(void* cookie);
mutable std::mutex mLock;
- std::unordered_map<int32_t, std::unique_ptr<ClientInfo>> mClientIdToClientInfoMap
+ std::unordered_map<ClientIdType, std::shared_ptr<ClientImpl>> mClientIdToClientMap
GUARDED_BY(mLock);
+ std::unordered_set<uintptr_t> mRegisteredCallbacks GUARDED_BY(mLock);
::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+
+ std::shared_ptr<ControllerClientInterface> mSessionController;
+ uid_t mMediaProviderUid;
+
+ static std::atomic<ClientIdType> sCookieCounter;
+ static std::mutex sCookie2ClientLock;
+ static std::map<ClientIdType, std::shared_ptr<ClientImpl>> sCookie2Client
+ GUARDED_BY(sCookie2ClientLock);
};
} // namespace android
diff --git a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl b/media/libmediatranscoding/include/media/TranscodingDefs.h
similarity index 61%
copy from media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
copy to media/libmediatranscoding/include/media/TranscodingDefs.h
index 1a5d81a..8e02dd2 100644
--- a/media/libmediatranscoding/aidl/android/media/TranscodingJobPriority.aidl
+++ b/media/libmediatranscoding/include/media/TranscodingDefs.h
@@ -14,24 +14,16 @@
* limitations under the License.
*/
-package android.media;
+#ifndef ANDROID_MEDIA_TRANSCODING_DEFS_H
+#define ANDROID_MEDIA_TRANSCODING_DEFS_H
-/**
- * Priority of a transcoding job.
- *
- * {@hide}
- */
-@Backing(type="int")
-enum TranscodingJobPriority {
- // TODO(hkuang): define what each priority level actually mean.
- kUnspecified = 0,
- kLow = 1,
- /**
- * 2 ~ 20 is reserved for future use.
- */
- kNormal = 21,
- /**
- * 22 ~ 30 is reserved for future use.
- */
- kHigh = 31,
-}
\ No newline at end of file
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+
+namespace android {
+
+using ClientIdType = uintptr_t;
+using SessionIdType = int32_t;
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_DEFS_H
diff --git a/media/libmediatranscoding/include/media/TranscodingRequest.h b/media/libmediatranscoding/include/media/TranscodingRequest.h
new file mode 100644
index 0000000..aae621f
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscodingRequest.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_REQUEST_H
+#define ANDROID_MEDIA_TRANSCODING_REQUEST_H
+
+#include <aidl/android/media/TranscodingRequestParcel.h>
+
+namespace android {
+
+using ::aidl::android::media::TranscodingRequestParcel;
+
+// Helper class for duplicating a TranscodingRequestParcel
+class TranscodingRequest : public TranscodingRequestParcel {
+public:
+ TranscodingRequest() = default;
+ TranscodingRequest(const TranscodingRequestParcel& parcel) { setTo(parcel); }
+ TranscodingRequest& operator=(const TranscodingRequest& request) {
+ setTo(request);
+ return *this;
+ }
+
+private:
+ void setTo(const TranscodingRequestParcel& parcel) {
+ sourceFilePath = parcel.sourceFilePath;
+ destinationFilePath = parcel.destinationFilePath;
+ clientUid = parcel.clientUid;
+ clientPid = parcel.clientPid;
+ transcodingType = parcel.transcodingType;
+ requestedVideoTrackFormat = parcel.requestedVideoTrackFormat;
+ priority = parcel.priority;
+ requestProgressUpdate = parcel.requestProgressUpdate;
+ requestSessionEventUpdate = parcel.requestSessionEventUpdate;
+ isForTesting = parcel.isForTesting;
+ testConfig = parcel.testConfig;
+ }
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_REQUEST_H
diff --git a/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h b/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h
new file mode 100644
index 0000000..0836eda
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscodingResourcePolicy.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
+#define ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
+
+#include <android/binder_auto_utils.h>
+#include <media/ResourcePolicyInterface.h>
+#include <utils/Condition.h>
+
+#include <mutex>
+namespace aidl {
+namespace android {
+namespace media {
+class IResourceObserverService;
+}
+} // namespace android
+} // namespace aidl
+
+namespace android {
+
+using ::aidl::android::media::IResourceObserverService;
+
+class TranscodingResourcePolicy : public ResourcePolicyInterface {
+public:
+ explicit TranscodingResourcePolicy();
+ ~TranscodingResourcePolicy();
+
+ void setCallback(const std::shared_ptr<ResourcePolicyCallbackInterface>& cb) override;
+
+private:
+ struct ResourceObserver;
+ mutable std::mutex mRegisteredLock;
+ bool mRegistered GUARDED_BY(mRegisteredLock);
+ std::shared_ptr<IResourceObserverService> mService GUARDED_BY(mRegisteredLock);
+ std::shared_ptr<ResourceObserver> mObserver;
+
+ mutable std::mutex mCallbackLock;
+ std::weak_ptr<ResourcePolicyCallbackInterface> mResourcePolicyCallback
+ GUARDED_BY(mCallbackLock);
+
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+
+ static void BinderDiedCallback(void* cookie);
+
+ void registerSelf();
+ void unregisterSelf();
+ void onResourceAvailable();
+}; // class TranscodingUidPolicy
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_RESOURCE_POLICY_H
diff --git a/media/libmediatranscoding/include/media/TranscodingSessionController.h b/media/libmediatranscoding/include/media/TranscodingSessionController.h
new file mode 100644
index 0000000..9ab3518
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscodingSessionController.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_SESSION_CONTROLLER_H
+#define ANDROID_MEDIA_TRANSCODING_SESSION_CONTROLLER_H
+
+#include <aidl/android/media/TranscodingSessionPriority.h>
+#include <media/ControllerClientInterface.h>
+#include <media/ResourcePolicyInterface.h>
+#include <media/TranscoderInterface.h>
+#include <media/TranscodingRequest.h>
+#include <media/UidPolicyInterface.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include <list>
+#include <map>
+#include <mutex>
+
+namespace android {
+using ::aidl::android::media::TranscodingResultParcel;
+using ::aidl::android::media::TranscodingSessionPriority;
+
+class TranscodingSessionController : public UidPolicyCallbackInterface,
+ public ControllerClientInterface,
+ public TranscoderCallbackInterface,
+ public ResourcePolicyCallbackInterface {
+public:
+ virtual ~TranscodingSessionController();
+
+ // ControllerClientInterface
+ bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t uid,
+ const TranscodingRequestParcel& request,
+ const std::weak_ptr<ITranscodingClientCallback>& clientCallback) override;
+ bool cancel(ClientIdType clientId, SessionIdType sessionId) override;
+ bool getSession(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingRequestParcel* request) override;
+ // ~ControllerClientInterface
+
+ // TranscoderCallbackInterface
+ void onStarted(ClientIdType clientId, SessionIdType sessionId) override;
+ void onPaused(ClientIdType clientId, SessionIdType sessionId) override;
+ void onResumed(ClientIdType clientId, SessionIdType sessionId) override;
+ void onFinish(ClientIdType clientId, SessionIdType sessionId) override;
+ void onError(ClientIdType clientId, SessionIdType sessionId, TranscodingErrorCode err) override;
+ void onProgressUpdate(ClientIdType clientId, SessionIdType sessionId,
+ int32_t progress) override;
+ void onResourceLost() override;
+ // ~TranscoderCallbackInterface
+
+ // UidPolicyCallbackInterface
+ void onTopUidsChanged(const std::unordered_set<uid_t>& uids) override;
+ // ~UidPolicyCallbackInterface
+
+ // ResourcePolicyCallbackInterface
+ void onResourceAvailable() override;
+ // ~ResourcePolicyCallbackInterface
+
+ /**
+ * Dump all the session information to the fd.
+ */
+ void dumpAllSessions(int fd, const Vector<String16>& args);
+
+private:
+ friend class MediaTranscodingService;
+ friend class TranscodingSessionControllerTest;
+
+ using SessionKeyType = std::pair<ClientIdType, SessionIdType>;
+ using SessionQueueType = std::list<SessionKeyType>;
+
+ struct Session {
+ SessionKeyType key;
+ uid_t uid;
+ enum State {
+ NOT_STARTED,
+ RUNNING,
+ PAUSED,
+ } state;
+ int32_t lastProgress;
+ TranscodingRequest request;
+ std::weak_ptr<ITranscodingClientCallback> callback;
+ };
+
+ // TODO(chz): call transcoder without global lock.
+ // Use mLock for all entrypoints for now.
+ mutable std::mutex mLock;
+
+ std::map<SessionKeyType, Session> mSessionMap;
+
+ // uid->SessionQueue map (uid == -1: offline queue)
+ std::map<uid_t, SessionQueueType> mSessionQueues;
+
+ // uids, with the head being the most-recently-top app, 2nd item is the
+ // previous top app, etc.
+ std::list<uid_t> mUidSortedList;
+ std::list<uid_t>::iterator mOfflineUidIterator;
+
+ std::shared_ptr<TranscoderInterface> mTranscoder;
+ std::shared_ptr<UidPolicyInterface> mUidPolicy;
+ std::shared_ptr<ResourcePolicyInterface> mResourcePolicy;
+
+ Session* mCurrentSession;
+ bool mResourceLost;
+
+ // Only allow MediaTranscodingService and unit tests to instantiate.
+ TranscodingSessionController(const std::shared_ptr<TranscoderInterface>& transcoder,
+ const std::shared_ptr<UidPolicyInterface>& uidPolicy,
+ const std::shared_ptr<ResourcePolicyInterface>& resourcePolicy);
+
+ Session* getTopSession_l();
+ void updateCurrentSession_l();
+ void removeSession_l(const SessionKeyType& sessionKey);
+ void moveUidsToTop_l(const std::unordered_set<uid_t>& uids, bool preserveTopUid);
+ void notifyClient(ClientIdType clientId, SessionIdType sessionId, const char* reason,
+ std::function<void(const SessionKeyType&)> func);
+ // Internal state verifier (debug only)
+ void validateState_l();
+
+ static String8 sessionToString(const SessionKeyType& sessionKey);
+ static const char* sessionStateToString(const Session::State sessionState);
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_SESSION_CONTROLLER_H
diff --git a/media/libmediatranscoding/include/media/TranscodingUidPolicy.h b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
new file mode 100644
index 0000000..946770c
--- /dev/null
+++ b/media/libmediatranscoding/include/media/TranscodingUidPolicy.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_UID_POLICY_H
+#define ANDROID_MEDIA_TRANSCODING_UID_POLICY_H
+
+#include <aidl/android/media/ITranscodingClient.h>
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <media/UidPolicyInterface.h>
+#include <sys/types.h>
+#include <utils/Condition.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+
+#include <map>
+#include <mutex>
+#include <unordered_map>
+#include <unordered_set>
+
+namespace android {
+
+class ActivityManager;
+// Observer for UID lifecycle and provide information about the uid's app
+// priority used by the session controller.
+class TranscodingUidPolicy : public UidPolicyInterface {
+public:
+ explicit TranscodingUidPolicy();
+ ~TranscodingUidPolicy();
+
+ // UidPolicyInterface
+ bool isUidOnTop(uid_t uid) override;
+ void registerMonitorUid(uid_t uid) override;
+ void unregisterMonitorUid(uid_t uid) override;
+ std::unordered_set<uid_t> getTopUids() const override;
+ void setCallback(const std::shared_ptr<UidPolicyCallbackInterface>& cb) override;
+ // ~UidPolicyInterface
+
+ static bool getNamesForUids(const std::vector<int32_t>& uids, std::vector<std::string>* names);
+ static status_t getUidForPackage(String16 packageName, /*inout*/ uid_t& uid);
+
+private:
+ void onUidStateChanged(uid_t uid, int32_t procState);
+ void setUidObserverRegistered(bool registerd);
+ void registerSelf();
+ void unregisterSelf();
+ void setProcessInfoOverride();
+ int32_t getProcState_l(uid_t uid) NO_THREAD_SAFETY_ANALYSIS;
+ void updateTopUid_l() NO_THREAD_SAFETY_ANALYSIS;
+
+ struct UidObserver;
+ struct ResourceManagerClient;
+ mutable Mutex mUidLock;
+ std::shared_ptr<ActivityManager> mAm;
+ sp<UidObserver> mUidObserver;
+ bool mRegistered GUARDED_BY(mUidLock);
+ int32_t mTopUidState GUARDED_BY(mUidLock);
+ std::unordered_map<uid_t, int32_t> mUidStateMap GUARDED_BY(mUidLock);
+ std::map<int32_t, std::unordered_set<uid_t>> mStateUidMap GUARDED_BY(mUidLock);
+ std::weak_ptr<UidPolicyCallbackInterface> mUidPolicyCallback;
+ std::shared_ptr<ResourceManagerClient> mProcInfoOverrideClient;
+}; // class TranscodingUidPolicy
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODING_SERVICE_H
diff --git a/media/libmediatranscoding/include/media/UidPolicyInterface.h b/media/libmediatranscoding/include/media/UidPolicyInterface.h
new file mode 100644
index 0000000..05d8db0
--- /dev/null
+++ b/media/libmediatranscoding/include/media/UidPolicyInterface.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_UID_POLICY_INTERFACE_H
+#define ANDROID_MEDIA_UID_POLICY_INTERFACE_H
+
+#include <unordered_set>
+
+namespace android {
+
+class UidPolicyCallbackInterface;
+
+// Interface for the controller to query a uid's info.
+class UidPolicyInterface {
+public:
+ // Instruct the uid policy to start monitoring a uid.
+ virtual void registerMonitorUid(uid_t uid) = 0;
+ // Instruct the uid policy to stop monitoring a uid.
+ virtual void unregisterMonitorUid(uid_t uid) = 0;
+ // Whether a uid is among the set of uids that's currently top priority.
+ virtual bool isUidOnTop(uid_t uid) = 0;
+ // Retrieves the set of uids that's currently top priority.
+ virtual std::unordered_set<uid_t> getTopUids() const = 0;
+ // Set the associated callback interface to send the events when uid states change.
+ virtual void setCallback(const std::shared_ptr<UidPolicyCallbackInterface>& cb) = 0;
+
+protected:
+ virtual ~UidPolicyInterface() = default;
+};
+
+// Interface for notifying the controller of a change in uid states.
+class UidPolicyCallbackInterface {
+public:
+ // Called when the set of uids that's top priority among the uids of interest
+ // has changed. The receiver of this callback should adjust accordingly.
+ virtual void onTopUidsChanged(const std::unordered_set<uid_t>& uids) = 0;
+
+protected:
+ virtual ~UidPolicyCallbackInterface() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_UID_POLICY_INTERFACE_H
diff --git a/media/libmediatranscoding/tests/AdjustableMaxPriorityQueue_tests.cpp b/media/libmediatranscoding/tests/AdjustableMaxPriorityQueue_tests.cpp
index d58af4e..a35ca53 100644
--- a/media/libmediatranscoding/tests/AdjustableMaxPriorityQueue_tests.cpp
+++ b/media/libmediatranscoding/tests/AdjustableMaxPriorityQueue_tests.cpp
@@ -36,7 +36,7 @@
namespace android {
class IntUniquePtrComp {
- public:
+public:
bool operator()(const std::unique_ptr<int>& lhs, const std::unique_ptr<int>& rhs) const {
return *lhs < *rhs;
}
@@ -223,19 +223,19 @@
}
// Test the heap property and make sure it is the same as std::priority_queue.
-TEST(AdjustableMaxPriorityQueueTest, TranscodingJobTest) {
- // Test data structure that mimics the Transcoding job.
- struct TranscodingJob {
+TEST(AdjustableMaxPriorityQueueTest, TranscodingSessionTest) {
+ // Test data structure that mimics the Transcoding session.
+ struct TranscodingSession {
int32_t priority;
int64_t createTimeUs;
};
- // The job is arranging according to priority with highest priority comes first.
- // For the job with the same priority, the job with early createTime will come first.
- class TranscodingJobComp {
- public:
- bool operator()(const std::unique_ptr<TranscodingJob>& lhs,
- const std::unique_ptr<TranscodingJob>& rhs) const {
+ // The session is arranging according to priority with highest priority comes first.
+ // For the session with the same priority, the session with early createTime will come first.
+ class TranscodingSessionComp {
+ public:
+ bool operator()(const std::unique_ptr<TranscodingSession>& lhs,
+ const std::unique_ptr<TranscodingSession>& rhs) const {
if (lhs->priority != rhs->priority) {
return lhs->priority < rhs->priority;
}
@@ -244,46 +244,47 @@
};
// Map to save each value's position in the heap.
- std::unordered_map<int, TranscodingJob*> jobIdToJobMap;
+ std::unordered_map<int, TranscodingSession*> sessionIdToSessionMap;
- TranscodingJob testJobs[] = {
- {1 /*priority*/, 66 /*createTimeUs*/}, // First job,
- {2 /*priority*/, 67 /*createTimeUs*/}, // Second job,
- {2 /*priority*/, 66 /*createTimeUs*/}, // Third job,
- {3 /*priority*/, 68 /*createTimeUs*/}, // Fourth job.
+ TranscodingSession testSessions[] = {
+ {1 /*priority*/, 66 /*createTimeUs*/}, // First session,
+ {2 /*priority*/, 67 /*createTimeUs*/}, // Second session,
+ {2 /*priority*/, 66 /*createTimeUs*/}, // Third session,
+ {3 /*priority*/, 68 /*createTimeUs*/}, // Fourth session.
};
- AdjustableMaxPriorityQueue<std::unique_ptr<TranscodingJob>, TranscodingJobComp> jobQueue;
+ AdjustableMaxPriorityQueue<std::unique_ptr<TranscodingSession>, TranscodingSessionComp>
+ sessionQueue;
- // Pushes all the jobs into the heap.
- for (int jobId = 0; jobId < 4; ++jobId) {
- auto newJob = std::make_unique<TranscodingJob>(testJobs[jobId]);
- jobIdToJobMap[jobId] = newJob.get();
- EXPECT_TRUE(jobQueue.push(std::move(newJob)));
+ // Pushes all the sessions into the heap.
+ for (int sessionId = 0; sessionId < 4; ++sessionId) {
+ auto newSession = std::make_unique<TranscodingSession>(testSessions[sessionId]);
+ sessionIdToSessionMap[sessionId] = newSession.get();
+ EXPECT_TRUE(sessionQueue.push(std::move(newSession)));
}
- // Check the job queue size.
- EXPECT_EQ(4, jobQueue.size());
+ // Check the session queue size.
+ EXPECT_EQ(4, sessionQueue.size());
- // Check the top and it should be Forth job: (3, 68)
- const std::unique_ptr<TranscodingJob>& topJob = jobQueue.top();
- EXPECT_EQ(3, topJob->priority);
- EXPECT_EQ(68, topJob->createTimeUs);
+ // Check the top and it should be Forth session: (3, 68)
+ const std::unique_ptr<TranscodingSession>& topSession = sessionQueue.top();
+ EXPECT_EQ(3, topSession->priority);
+ EXPECT_EQ(68, topSession->createTimeUs);
// Consume the top.
- std::unique_ptr<TranscodingJob> consumeJob = jobQueue.consume_top();
+ std::unique_ptr<TranscodingSession> consumeSession = sessionQueue.consume_top();
- // Check the top and it should be Third Job (2, 66)
- const std::unique_ptr<TranscodingJob>& topJob2 = jobQueue.top();
- EXPECT_EQ(2, topJob2->priority);
- EXPECT_EQ(66, topJob2->createTimeUs);
+ // Check the top and it should be Third Session (2, 66)
+ const std::unique_ptr<TranscodingSession>& topSession2 = sessionQueue.top();
+ EXPECT_EQ(2, topSession2->priority);
+ EXPECT_EQ(66, topSession2->createTimeUs);
- // Change the Second job's priority to 4 from (2, 67) -> (4, 67). It should becomes top of the
- // queue.
- jobIdToJobMap[1]->priority = 4;
- jobQueue.rebuild();
- const std::unique_ptr<TranscodingJob>& topJob3 = jobQueue.top();
- EXPECT_EQ(4, topJob3->priority);
- EXPECT_EQ(67, topJob3->createTimeUs);
+ // Change the Second session's priority to 4 from (2, 67) -> (4, 67). It should becomes
+ // top of the queue.
+ sessionIdToSessionMap[1]->priority = 4;
+ sessionQueue.rebuild();
+ const std::unique_ptr<TranscodingSession>& topSession3 = sessionQueue.top();
+ EXPECT_EQ(4, topSession3->priority);
+ EXPECT_EQ(67, topSession3->createTimeUs);
}
} // namespace android
\ No newline at end of file
diff --git a/media/libmediatranscoding/tests/Android.bp b/media/libmediatranscoding/tests/Android.bp
index 8191b00..7b15b1b 100644
--- a/media/libmediatranscoding/tests/Android.bp
+++ b/media/libmediatranscoding/tests/Android.bp
@@ -38,6 +38,16 @@
}
//
+// TranscodingSessionController unit test
+//
+cc_test {
+ name: "TranscodingSessionController_tests",
+ defaults: ["libmediatranscoding_test_defaults"],
+
+ srcs: ["TranscodingSessionController_tests.cpp"],
+}
+
+//
// AdjustableMaxPriorityQueue unit test
//
cc_test {
@@ -45,4 +55,4 @@
defaults: ["libmediatranscoding_test_defaults"],
srcs: ["AdjustableMaxPriorityQueue_tests.cpp"],
-}
\ No newline at end of file
+}
diff --git a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
index 5d2419d..1a50923 100644
--- a/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
+++ b/media/libmediatranscoding/tests/TranscodingClientManager_tests.cpp
@@ -19,262 +19,522 @@
// #define LOG_NDEBUG 0
#define LOG_TAG "TranscodingClientManagerTest"
-#include <aidl/android/media/BnTranscodingServiceClient.h>
+#include <aidl/android/media/BnTranscodingClientCallback.h>
#include <aidl/android/media/IMediaTranscodingService.h>
-#include <aidl/android/media/ITranscodingServiceClient.h>
#include <android-base/logging.h>
#include <android/binder_manager.h>
#include <android/binder_process.h>
#include <gtest/gtest.h>
+#include <media/ControllerClientInterface.h>
#include <media/TranscodingClientManager.h>
+#include <media/TranscodingRequest.h>
#include <utils/Log.h>
+#include <list>
+
namespace android {
using Status = ::ndk::ScopedAStatus;
-using aidl::android::media::BnTranscodingServiceClient;
-using aidl::android::media::IMediaTranscodingService;
-using aidl::android::media::ITranscodingServiceClient;
+using ::aidl::android::media::BnTranscodingClientCallback;
+using ::aidl::android::media::IMediaTranscodingService;
+using ::aidl::android::media::TranscodingErrorCode;
+using ::aidl::android::media::TranscodingRequestParcel;
+using ::aidl::android::media::TranscodingResultParcel;
+using ::aidl::android::media::TranscodingSessionParcel;
+using ::aidl::android::media::TranscodingSessionPriority;
-constexpr int32_t kInvalidClientId = -1;
-constexpr int32_t kInvalidClientPid = -1;
-constexpr int32_t kInvalidClientUid = -1;
-constexpr const char* kInvalidClientOpPackageName = "";
+constexpr pid_t kInvalidClientPid = -5;
+constexpr pid_t kInvalidClientUid = -10;
+constexpr const char* kInvalidClientName = "";
+constexpr const char* kInvalidClientPackage = "";
-constexpr int32_t kClientId = 1;
-constexpr int32_t kClientPid = 2;
-constexpr int32_t kClientUid = 3;
-constexpr const char* kClientOpPackageName = "TestClient";
+constexpr const char* kClientName = "TestClientName";
+constexpr const char* kClientPackage = "TestClientPackage";
-struct TestClient : public BnTranscodingServiceClient {
- TestClient(const std::shared_ptr<IMediaTranscodingService>& service) : mService(service) {
- ALOGD("TestClient Created");
- }
+#define SESSION(n) (n)
- Status getName(std::string* _aidl_return) override {
- *_aidl_return = "test_client";
+struct TestClientCallback : public BnTranscodingClientCallback {
+ TestClientCallback() { ALOGI("TestClientCallback Created"); }
+
+ virtual ~TestClientCallback() { ALOGI("TestClientCallback destroyed"); };
+
+ Status openFileDescriptor(const std::string& /*in_fileUri*/, const std::string& /*in_mode*/,
+ ::ndk::ScopedFileDescriptor* /*_aidl_return*/) override {
return Status::ok();
}
- Status onTranscodingFinished(
- int32_t /* in_jobId */,
- const ::aidl::android::media::TranscodingResultParcel& /* in_result */) override {
+ Status onTranscodingStarted(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingPaused(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingResumed(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingFinished(int32_t in_sessionId,
+ const TranscodingResultParcel& in_result) override {
+ EXPECT_EQ(in_sessionId, in_result.sessionId);
+ mEventQueue.push_back(Finished(in_sessionId));
return Status::ok();
}
- Status onTranscodingFailed(
- int32_t /* in_jobId */,
- ::aidl::android::media::TranscodingErrorCode /*in_errorCode */) override {
+ Status onTranscodingFailed(int32_t in_sessionId,
+ TranscodingErrorCode /*in_errorCode */) override {
+ mEventQueue.push_back(Failed(in_sessionId));
return Status::ok();
}
- Status onAwaitNumberOfJobsChanged(int32_t /* in_jobId */, int32_t /* in_oldAwaitNumber */,
- int32_t /* in_newAwaitNumber */) override {
+ Status onAwaitNumberOfSessionsChanged(int32_t /* in_sessionId */,
+ int32_t /* in_oldAwaitNumber */,
+ int32_t /* in_newAwaitNumber */) override {
return Status::ok();
}
- Status onProgressUpdate(int32_t /* in_jobId */, int32_t /* in_progress */) override {
+ Status onProgressUpdate(int32_t /* in_sessionId */, int32_t /* in_progress */) override {
return Status::ok();
}
- virtual ~TestClient() { ALOGI("TestClient destroyed"); };
+ struct Event {
+ enum {
+ NoEvent,
+ Finished,
+ Failed,
+ } type;
+ SessionIdType sessionId;
+ };
- private:
- std::shared_ptr<IMediaTranscodingService> mService;
- TestClient(const TestClient&) = delete;
- TestClient& operator=(const TestClient&) = delete;
+ static constexpr Event NoEvent = {Event::NoEvent, 0};
+#define DECLARE_EVENT(action) \
+ static Event action(SessionIdType sessionId) { return {Event::action, sessionId}; }
+
+ DECLARE_EVENT(Finished);
+ DECLARE_EVENT(Failed);
+
+ const Event& popEvent() {
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+ return mPoppedEvent;
+ }
+
+private:
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+
+ TestClientCallback(const TestClientCallback&) = delete;
+ TestClientCallback& operator=(const TestClientCallback&) = delete;
+};
+
+bool operator==(const TestClientCallback::Event& lhs, const TestClientCallback::Event& rhs) {
+ return lhs.type == rhs.type && lhs.sessionId == rhs.sessionId;
+}
+
+struct TestController : public ControllerClientInterface {
+ TestController() { ALOGI("TestController Created"); }
+
+ virtual ~TestController() { ALOGI("TestController Destroyed"); }
+
+ bool submit(ClientIdType clientId, SessionIdType sessionId, uid_t /*uid*/,
+ const TranscodingRequestParcel& request,
+ const std::weak_ptr<ITranscodingClientCallback>& clientCallback) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+ if (mSessions.count(sessionKey) > 0) {
+ return false;
+ }
+
+ // This is the secret name we'll check, to test error propagation from
+ // the controller back to client.
+ if (request.sourceFilePath == "bad_source_file") {
+ return false;
+ }
+
+ mSessions[sessionKey].request = request;
+ mSessions[sessionKey].callback = clientCallback;
+
+ mLastSession = sessionKey;
+ return true;
+ }
+
+ bool cancel(ClientIdType clientId, SessionIdType sessionId) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+ mSessions.erase(sessionKey);
+ return true;
+ }
+
+ bool getSession(ClientIdType clientId, SessionIdType sessionId,
+ TranscodingRequestParcel* request) override {
+ SessionKeyType sessionKey = std::make_pair(clientId, sessionId);
+ if (mSessions.count(sessionKey) == 0) {
+ return false;
+ }
+
+ *(TranscodingRequest*)request = mSessions[sessionKey].request;
+ return true;
+ }
+
+ void finishLastSession() {
+ auto it = mSessions.find(mLastSession);
+ if (it == mSessions.end()) {
+ return;
+ }
+ {
+ auto clientCallback = it->second.callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFinished(
+ mLastSession.second,
+ TranscodingResultParcel({mLastSession.second, 0, std::nullopt}));
+ }
+ }
+ mSessions.erase(it);
+ }
+
+ void abortLastSession() {
+ auto it = mSessions.find(mLastSession);
+ if (it == mSessions.end()) {
+ return;
+ }
+ {
+ auto clientCallback = it->second.callback.lock();
+ if (clientCallback != nullptr) {
+ clientCallback->onTranscodingFailed(mLastSession.second,
+ TranscodingErrorCode::kUnknown);
+ }
+ }
+ mSessions.erase(it);
+ }
+
+ struct Session {
+ TranscodingRequest request;
+ std::weak_ptr<ITranscodingClientCallback> callback;
+ };
+
+ typedef std::pair<ClientIdType, SessionIdType> SessionKeyType;
+ std::map<SessionKeyType, Session> mSessions;
+ SessionKeyType mLastSession;
};
class TranscodingClientManagerTest : public ::testing::Test {
- public:
- TranscodingClientManagerTest() : mClientManager(TranscodingClientManager::getInstance()) {
+public:
+ TranscodingClientManagerTest()
+ : mController(new TestController()),
+ mClientManager(new TranscodingClientManager(mController)) {
ALOGD("TranscodingClientManagerTest created");
}
void SetUp() override {
- ::ndk::SpAIBinder binder(AServiceManager_getService("media.transcoding"));
- mService = IMediaTranscodingService::fromBinder(binder);
- if (mService == nullptr) {
- ALOGE("Failed to connect to the media.trascoding service.");
- return;
- }
-
- mTestClient = ::ndk::SharedRefBase::make<TestClient>(mService);
+ mClientCallback1 = ::ndk::SharedRefBase::make<TestClientCallback>();
+ mClientCallback2 = ::ndk::SharedRefBase::make<TestClientCallback>();
+ mClientCallback3 = ::ndk::SharedRefBase::make<TestClientCallback>();
}
- void TearDown() override {
- ALOGI("TranscodingClientManagerTest tear down");
- mService = nullptr;
- }
+ void TearDown() override { ALOGI("TranscodingClientManagerTest tear down"); }
~TranscodingClientManagerTest() { ALOGD("TranscodingClientManagerTest destroyed"); }
- TranscodingClientManager& mClientManager;
- std::shared_ptr<ITranscodingServiceClient> mTestClient = nullptr;
- std::shared_ptr<IMediaTranscodingService> mService = nullptr;
+ void addMultipleClients() {
+ EXPECT_EQ(
+ mClientManager->addClient(mClientCallback1, kClientName, kClientPackage, &mClient1),
+ OK);
+ EXPECT_NE(mClient1, nullptr);
+
+ EXPECT_EQ(
+ mClientManager->addClient(mClientCallback2, kClientName, kClientPackage, &mClient2),
+ OK);
+ EXPECT_NE(mClient2, nullptr);
+
+ EXPECT_EQ(
+ mClientManager->addClient(mClientCallback3, kClientName, kClientPackage, &mClient3),
+ OK);
+ EXPECT_NE(mClient3, nullptr);
+
+ EXPECT_EQ(mClientManager->getNumOfClients(), 3);
+ }
+
+ void unregisterMultipleClients() {
+ EXPECT_TRUE(mClient1->unregister().isOk());
+ EXPECT_TRUE(mClient2->unregister().isOk());
+ EXPECT_TRUE(mClient3->unregister().isOk());
+ EXPECT_EQ(mClientManager->getNumOfClients(), 0);
+ }
+
+ std::shared_ptr<TestController> mController;
+ std::shared_ptr<TranscodingClientManager> mClientManager;
+ std::shared_ptr<ITranscodingClient> mClient1;
+ std::shared_ptr<ITranscodingClient> mClient2;
+ std::shared_ptr<ITranscodingClient> mClient3;
+ std::shared_ptr<TestClientCallback> mClientCallback1;
+ std::shared_ptr<TestClientCallback> mClientCallback2;
+ std::shared_ptr<TestClientCallback> mClientCallback3;
};
-TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientId) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- // Create a client with invalid client id.
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client, kInvalidClientId, kClientPid, kClientUid, kClientOpPackageName);
-
- // Add the client to the manager and expect failure.
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err != OK);
+TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientCallback) {
+ // Add a client with null callback and expect failure.
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err = mClientManager->addClient(nullptr, kClientName, kClientPackage, &client);
+ EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
+//
+//TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPid) {
+// // Add a client with invalid Pid and expect failure.
+// std::shared_ptr<ITranscodingClient> client;
+// status_t err = mClientManager->addClient(mClientCallback1,
+// kClientName, kClientPackage, &client);
+// EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
+//}
-TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPid) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- // Create a client with invalid Pid.
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client, kClientId, kInvalidClientPid, kClientUid, kClientOpPackageName);
-
- // Add the client to the manager and expect failure.
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err != OK);
-}
-
-TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientUid) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- // Create a client with invalid Uid.
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client, kClientId, kClientPid, kInvalidClientUid, kClientOpPackageName);
-
- // Add the client to the manager and expect failure.
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err != OK);
+TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientName) {
+ // Add a client with invalid name and expect failure.
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err = mClientManager->addClient(mClientCallback1, kInvalidClientName, kClientPackage,
+ &client);
+ EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
TEST_F(TranscodingClientManagerTest, TestAddingWithInvalidClientPackageName) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- // Create a client with invalid packagename.
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client, kClientId, kClientPid, kClientUid, kInvalidClientOpPackageName);
-
- // Add the client to the manager and expect failure.
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err != OK);
+ // Add a client with invalid packagename and expect failure.
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err = mClientManager->addClient(mClientCallback1, kClientName, kInvalidClientPackage,
+ &client);
+ EXPECT_EQ(err, IMediaTranscodingService::ERROR_ILLEGAL_ARGUMENT);
}
TEST_F(TranscodingClientManagerTest, TestAddingValidClient) {
- std::shared_ptr<ITranscodingServiceClient> client1 =
- ::ndk::SharedRefBase::make<TestClient>(mService);
+ // Add a valid client, should succeed.
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err =
+ mClientManager->addClient(mClientCallback1, kClientName, kClientPackage, &client);
+ EXPECT_EQ(err, OK);
+ EXPECT_NE(client.get(), nullptr);
+ EXPECT_EQ(mClientManager->getNumOfClients(), 1);
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client1, kClientId, kClientPid, kClientUid, kClientOpPackageName);
-
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err == OK);
-
- size_t numOfClients = mClientManager.getNumOfClients();
- EXPECT_EQ(numOfClients, 1);
-
- err = mClientManager.removeClient(kClientId);
- EXPECT_TRUE(err == OK);
+ // Unregister client, should succeed.
+ Status status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(mClientManager->getNumOfClients(), 0);
}
TEST_F(TranscodingClientManagerTest, TestAddingDupliacteClient) {
- std::shared_ptr<ITranscodingServiceClient> client1 =
- ::ndk::SharedRefBase::make<TestClient>(mService);
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err =
+ mClientManager->addClient(mClientCallback1, kClientName, kClientPackage, &client);
+ EXPECT_EQ(err, OK);
+ EXPECT_NE(client.get(), nullptr);
+ EXPECT_EQ(mClientManager->getNumOfClients(), 1);
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client1, kClientId, kClientPid, kClientUid, kClientOpPackageName);
+ std::shared_ptr<ITranscodingClient> dupClient;
+ err = mClientManager->addClient(mClientCallback1, "dupClient", "dupPackage", &dupClient);
+ EXPECT_EQ(err, IMediaTranscodingService::ERROR_ALREADY_EXISTS);
+ EXPECT_EQ(dupClient.get(), nullptr);
+ EXPECT_EQ(mClientManager->getNumOfClients(), 1);
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err == OK);
+ Status status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(mClientManager->getNumOfClients(), 0);
- err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err != OK);
+ err = mClientManager->addClient(mClientCallback1, "dupClient", "dupPackage", &dupClient);
+ EXPECT_EQ(err, OK);
+ EXPECT_NE(dupClient.get(), nullptr);
+ EXPECT_EQ(mClientManager->getNumOfClients(), 1);
- err = mClientManager.removeClient(kClientId);
- EXPECT_TRUE(err == OK);
+ status = dupClient->unregister();
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(mClientManager->getNumOfClients(), 0);
}
TEST_F(TranscodingClientManagerTest, TestAddingMultipleClient) {
- std::shared_ptr<ITranscodingServiceClient> client1 =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo1 =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client1, kClientId, kClientPid, kClientUid, kClientOpPackageName);
-
- status_t err = mClientManager.addClient(std::move(clientInfo1));
- EXPECT_TRUE(err == OK);
-
- std::shared_ptr<ITranscodingServiceClient> client2 =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo2 =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client2, kClientId + 1, kClientPid, kClientUid, kClientOpPackageName);
-
- err = mClientManager.addClient(std::move(clientInfo2));
- EXPECT_TRUE(err == OK);
-
- std::shared_ptr<ITranscodingServiceClient> client3 =
- ::ndk::SharedRefBase::make<TestClient>(mService);
-
- // Create a client with invalid packagename.
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo3 =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client3, kClientId + 2, kClientPid, kClientUid, kClientOpPackageName);
-
- err = mClientManager.addClient(std::move(clientInfo3));
- EXPECT_TRUE(err == OK);
-
- size_t numOfClients = mClientManager.getNumOfClients();
- EXPECT_EQ(numOfClients, 3);
-
- err = mClientManager.removeClient(kClientId);
- EXPECT_TRUE(err == OK);
-
- err = mClientManager.removeClient(kClientId + 1);
- EXPECT_TRUE(err == OK);
-
- err = mClientManager.removeClient(kClientId + 2);
- EXPECT_TRUE(err == OK);
+ addMultipleClients();
+ unregisterMultipleClients();
}
-TEST_F(TranscodingClientManagerTest, TestRemovingNonExistClient) {
- status_t err = mClientManager.removeClient(kInvalidClientId);
- EXPECT_TRUE(err != OK);
+TEST_F(TranscodingClientManagerTest, TestSubmitCancelGetSessions) {
+ addMultipleClients();
- err = mClientManager.removeClient(1000 /* clientId */);
- EXPECT_TRUE(err != OK);
+ // Test sessionId assignment.
+ TranscodingRequestParcel request;
+ request.sourceFilePath = "test_source_file_0";
+ request.destinationFilePath = "test_desintaion_file_0";
+ TranscodingSessionParcel session;
+ bool result;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(0));
+
+ request.sourceFilePath = "test_source_file_1";
+ request.destinationFilePath = "test_desintaion_file_1";
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(1));
+
+ request.sourceFilePath = "test_source_file_2";
+ request.destinationFilePath = "test_desintaion_file_2";
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(2));
+
+ // Test submit bad request (no valid sourceFilePath) fails.
+ TranscodingRequestParcel badRequest;
+ badRequest.sourceFilePath = "bad_source_file";
+ badRequest.destinationFilePath = "bad_destination_file";
+ EXPECT_TRUE(mClient1->submitRequest(badRequest, &session, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test submit with bad pid/uid.
+ badRequest.sourceFilePath = "test_source_file_3";
+ badRequest.destinationFilePath = "test_desintaion_file_3";
+ badRequest.clientPid = kInvalidClientPid;
+ badRequest.clientUid = kInvalidClientUid;
+ EXPECT_TRUE(mClient1->submitRequest(badRequest, &session, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test get sessions by id.
+ EXPECT_TRUE(mClient1->getSessionWithId(SESSION(2), &session, &result).isOk());
+ EXPECT_EQ(session.sessionId, SESSION(2));
+ EXPECT_EQ(session.request.sourceFilePath, "test_source_file_2");
+ EXPECT_TRUE(result);
+
+ // Test get sessions by invalid id fails.
+ EXPECT_TRUE(mClient1->getSessionWithId(SESSION(100), &session, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test cancel non-existent session fail.
+ EXPECT_TRUE(mClient2->cancelSession(SESSION(100), &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test cancel valid sessionId in arbitrary order.
+ EXPECT_TRUE(mClient1->cancelSession(SESSION(2), &result).isOk());
+ EXPECT_TRUE(result);
+
+ EXPECT_TRUE(mClient1->cancelSession(SESSION(0), &result).isOk());
+ EXPECT_TRUE(result);
+
+ EXPECT_TRUE(mClient1->cancelSession(SESSION(1), &result).isOk());
+ EXPECT_TRUE(result);
+
+ // Test cancel session again fails.
+ EXPECT_TRUE(mClient1->cancelSession(SESSION(1), &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test get session after cancel fails.
+ EXPECT_TRUE(mClient1->getSessionWithId(SESSION(2), &session, &result).isOk());
+ EXPECT_FALSE(result);
+
+ // Test sessionId independence for each client.
+ EXPECT_TRUE(mClient2->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(0));
+
+ EXPECT_TRUE(mClient2->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(1));
+
+ unregisterMultipleClients();
}
-TEST_F(TranscodingClientManagerTest, TestCheckClientWithClientId) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
+TEST_F(TranscodingClientManagerTest, TestClientCallback) {
+ addMultipleClients();
- std::unique_ptr<TranscodingClientManager::ClientInfo> clientInfo =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- client, kClientId, kClientPid, kClientUid, kClientOpPackageName);
+ TranscodingRequestParcel request;
+ request.sourceFilePath = "test_source_file_name";
+ request.destinationFilePath = "test_destination_file_name";
+ TranscodingSessionParcel session;
+ bool result;
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(0));
- status_t err = mClientManager.addClient(std::move(clientInfo));
- EXPECT_TRUE(err == OK);
+ mController->finishLastSession();
+ EXPECT_EQ(mClientCallback1->popEvent(), TestClientCallback::Finished(session.sessionId));
- bool res = mClientManager.isClientIdRegistered(kClientId);
- EXPECT_TRUE(res);
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(1));
- res = mClientManager.isClientIdRegistered(kInvalidClientId);
- EXPECT_FALSE(res);
+ mController->abortLastSession();
+ EXPECT_EQ(mClientCallback1->popEvent(), TestClientCallback::Failed(session.sessionId));
+
+ EXPECT_TRUE(mClient1->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(2));
+
+ EXPECT_TRUE(mClient2->submitRequest(request, &session, &result).isOk());
+ EXPECT_TRUE(result);
+ EXPECT_EQ(session.sessionId, SESSION(0));
+
+ mController->finishLastSession();
+ EXPECT_EQ(mClientCallback2->popEvent(), TestClientCallback::Finished(session.sessionId));
+
+ unregisterMultipleClients();
}
-} // namespace android
\ No newline at end of file
+TEST_F(TranscodingClientManagerTest, TestUseAfterUnregister) {
+ // Add a client.
+ std::shared_ptr<ITranscodingClient> client;
+ status_t err =
+ mClientManager->addClient(mClientCallback1, kClientName, kClientPackage, &client);
+ EXPECT_EQ(err, OK);
+ EXPECT_NE(client.get(), nullptr);
+
+ // Submit 2 requests, 1 offline and 1 realtime.
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+ bool result;
+
+ request.sourceFilePath = "test_source_file_0";
+ request.destinationFilePath = "test_destination_file_0";
+ request.priority = TranscodingSessionPriority::kUnspecified;
+ EXPECT_TRUE(client->submitRequest(request, &session, &result).isOk() && result);
+ EXPECT_EQ(session.sessionId, SESSION(0));
+
+ request.sourceFilePath = "test_source_file_1";
+ request.destinationFilePath = "test_destination_file_1";
+ request.priority = TranscodingSessionPriority::kNormal;
+ EXPECT_TRUE(client->submitRequest(request, &session, &result).isOk() && result);
+ EXPECT_EQ(session.sessionId, SESSION(1));
+
+ // Unregister client, should succeed.
+ Status status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+
+ // Test submit new request after unregister, should fail with ERROR_DISCONNECTED.
+ request.sourceFilePath = "test_source_file_2";
+ request.destinationFilePath = "test_destination_file_2";
+ request.priority = TranscodingSessionPriority::kNormal;
+ status = client->submitRequest(request, &session, &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ // Test cancel sessions after unregister, should fail with ERROR_DISCONNECTED
+ // regardless of realtime or offline session, or whether the sessionId is valid.
+ status = client->cancelSession(SESSION(0), &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ status = client->cancelSession(SESSION(1), &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ status = client->cancelSession(SESSION(2), &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ // Test get sessions, should fail with ERROR_DISCONNECTED regardless of realtime
+ // or offline session, or whether the sessionId is valid.
+ status = client->getSessionWithId(SESSION(0), &session, &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ status = client->getSessionWithId(SESSION(1), &session, &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ status = client->getSessionWithId(SESSION(2), &session, &result);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
new file mode 100644
index 0000000..4809d7a
--- /dev/null
+++ b/media/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for TranscodingSessionController
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "TranscodingSessionControllerTest"
+
+#include <aidl/android/media/BnTranscodingClientCallback.h>
+#include <aidl/android/media/IMediaTranscodingService.h>
+#include <aidl/android/media/ITranscodingClient.h>
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <android-base/logging.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <gtest/gtest.h>
+#include <media/TranscodingClientManager.h>
+#include <media/TranscodingSessionController.h>
+#include <utils/Log.h>
+
+#include <unordered_set>
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using aidl::android::media::BnTranscodingClientCallback;
+using aidl::android::media::IMediaTranscodingService;
+using aidl::android::media::ITranscodingClient;
+using aidl::android::media::TranscodingRequestParcel;
+
+constexpr ClientIdType kClientId = 1000;
+constexpr SessionIdType kClientSessionId = 0;
+constexpr uid_t kClientUid = 5000;
+constexpr uid_t kInvalidUid = (uid_t)-1;
+
+#define CLIENT(n) (kClientId + (n))
+#define SESSION(n) (kClientSessionId + (n))
+#define UID(n) (kClientUid + (n))
+
+class TestUidPolicy : public UidPolicyInterface {
+public:
+ TestUidPolicy() = default;
+ virtual ~TestUidPolicy() = default;
+
+ // UidPolicyInterface
+ void registerMonitorUid(uid_t /*uid*/) override {}
+ void unregisterMonitorUid(uid_t /*uid*/) override {}
+ bool isUidOnTop(uid_t uid) override { return mTopUids.count(uid) > 0; }
+ std::unordered_set<uid_t> getTopUids() const override { return mTopUids; }
+ void setCallback(const std::shared_ptr<UidPolicyCallbackInterface>& cb) override {
+ mUidPolicyCallback = cb;
+ }
+ void setTop(uid_t uid) {
+ std::unordered_set<uid_t> uids = {uid};
+ setTop(uids);
+ }
+ void setTop(const std::unordered_set<uid_t>& uids) {
+ mTopUids = uids;
+ auto uidPolicyCb = mUidPolicyCallback.lock();
+ if (uidPolicyCb != nullptr) {
+ uidPolicyCb->onTopUidsChanged(mTopUids);
+ }
+ }
+
+ std::unordered_set<uid_t> mTopUids;
+ std::weak_ptr<UidPolicyCallbackInterface> mUidPolicyCallback;
+};
+
+class TestTranscoder : public TranscoderInterface {
+public:
+ TestTranscoder() : mLastError(TranscodingErrorCode::kUnknown) {}
+ virtual ~TestTranscoder() {}
+
+ // TranscoderInterface
+ void setCallback(const std::shared_ptr<TranscoderCallbackInterface>& /*cb*/) override {}
+
+ void start(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& /*request*/,
+ const std::shared_ptr<ITranscodingClientCallback>& /*clientCallback*/) override {
+ mEventQueue.push_back(Start(clientId, sessionId));
+ }
+ void pause(ClientIdType clientId, SessionIdType sessionId) override {
+ mEventQueue.push_back(Pause(clientId, sessionId));
+ }
+ void resume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& /*request*/,
+ const std::shared_ptr<ITranscodingClientCallback>& /*clientCallback*/) override {
+ mEventQueue.push_back(Resume(clientId, sessionId));
+ }
+ void stop(ClientIdType clientId, SessionIdType sessionId) override {
+ mEventQueue.push_back(Stop(clientId, sessionId));
+ }
+
+ void onFinished(ClientIdType clientId, SessionIdType sessionId) {
+ mEventQueue.push_back(Finished(clientId, sessionId));
+ }
+
+ void onFailed(ClientIdType clientId, SessionIdType sessionId, TranscodingErrorCode err) {
+ mLastError = err;
+ mEventQueue.push_back(Failed(clientId, sessionId));
+ }
+
+ TranscodingErrorCode getLastError() {
+ TranscodingErrorCode result = mLastError;
+ mLastError = TranscodingErrorCode::kUnknown;
+ return result;
+ }
+
+ struct Event {
+ enum { NoEvent, Start, Pause, Resume, Stop, Finished, Failed } type;
+ ClientIdType clientId;
+ SessionIdType sessionId;
+ };
+
+ static constexpr Event NoEvent = {Event::NoEvent, 0, 0};
+
+#define DECLARE_EVENT(action) \
+ static Event action(ClientIdType clientId, SessionIdType sessionId) { \
+ return {Event::action, clientId, sessionId}; \
+ }
+
+ DECLARE_EVENT(Start);
+ DECLARE_EVENT(Pause);
+ DECLARE_EVENT(Resume);
+ DECLARE_EVENT(Stop);
+ DECLARE_EVENT(Finished);
+ DECLARE_EVENT(Failed);
+
+ const Event& popEvent() {
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+ return mPoppedEvent;
+ }
+
+private:
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+ TranscodingErrorCode mLastError;
+};
+
+bool operator==(const TestTranscoder::Event& lhs, const TestTranscoder::Event& rhs) {
+ return lhs.type == rhs.type && lhs.clientId == rhs.clientId && lhs.sessionId == rhs.sessionId;
+}
+
+struct TestClientCallback : public BnTranscodingClientCallback {
+ TestClientCallback(TestTranscoder* owner, int64_t clientId)
+ : mOwner(owner), mClientId(clientId) {
+ ALOGD("TestClient Created");
+ }
+
+ Status openFileDescriptor(const std::string& /*in_fileUri*/, const std::string& /*in_mode*/,
+ ::ndk::ScopedFileDescriptor* /*_aidl_return*/) override {
+ return Status::ok();
+ }
+
+ Status onTranscodingStarted(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingPaused(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingResumed(int32_t /*in_sessionId*/) override { return Status::ok(); }
+
+ Status onTranscodingFinished(int32_t in_sessionId,
+ const TranscodingResultParcel& in_result) override {
+ EXPECT_EQ(in_sessionId, in_result.sessionId);
+ ALOGD("TestClientCallback: received onTranscodingFinished");
+ mOwner->onFinished(mClientId, in_sessionId);
+ return Status::ok();
+ }
+
+ Status onTranscodingFailed(int32_t in_sessionId, TranscodingErrorCode in_errorCode) override {
+ mOwner->onFailed(mClientId, in_sessionId, in_errorCode);
+ return Status::ok();
+ }
+
+ Status onAwaitNumberOfSessionsChanged(int32_t /* in_sessionId */,
+ int32_t /* in_oldAwaitNumber */,
+ int32_t /* in_newAwaitNumber */) override {
+ return Status::ok();
+ }
+
+ Status onProgressUpdate(int32_t /* in_sessionId */, int32_t /* in_progress */) override {
+ return Status::ok();
+ }
+
+ virtual ~TestClientCallback() { ALOGI("TestClient destroyed"); };
+
+private:
+ TestTranscoder* mOwner;
+ int64_t mClientId;
+ TestClientCallback(const TestClientCallback&) = delete;
+ TestClientCallback& operator=(const TestClientCallback&) = delete;
+};
+
+class TranscodingSessionControllerTest : public ::testing::Test {
+public:
+ TranscodingSessionControllerTest() { ALOGI("TranscodingSessionControllerTest created"); }
+
+ void SetUp() override {
+ ALOGI("TranscodingSessionControllerTest set up");
+ mTranscoder.reset(new TestTranscoder());
+ mUidPolicy.reset(new TestUidPolicy());
+ mController.reset(new TranscodingSessionController(mTranscoder, mUidPolicy,
+ nullptr /*resourcePolicy*/));
+ mUidPolicy->setCallback(mController);
+
+ // Set priority only, ignore other fields for now.
+ mOfflineRequest.priority = TranscodingSessionPriority::kUnspecified;
+ mRealtimeRequest.priority = TranscodingSessionPriority::kHigh;
+ mClientCallback0 =
+ ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(0));
+ mClientCallback1 =
+ ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(1));
+ mClientCallback2 =
+ ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(2));
+ mClientCallback3 =
+ ::ndk::SharedRefBase::make<TestClientCallback>(mTranscoder.get(), CLIENT(3));
+ }
+
+ void TearDown() override { ALOGI("TranscodingSessionControllerTest tear down"); }
+
+ ~TranscodingSessionControllerTest() { ALOGD("TranscodingSessionControllerTest destroyed"); }
+
+ std::shared_ptr<TestTranscoder> mTranscoder;
+ std::shared_ptr<TestUidPolicy> mUidPolicy;
+ std::shared_ptr<TranscodingSessionController> mController;
+ TranscodingRequestParcel mOfflineRequest;
+ TranscodingRequestParcel mRealtimeRequest;
+ std::shared_ptr<TestClientCallback> mClientCallback0;
+ std::shared_ptr<TestClientCallback> mClientCallback1;
+ std::shared_ptr<TestClientCallback> mClientCallback2;
+ std::shared_ptr<TestClientCallback> mClientCallback3;
+};
+
+TEST_F(TranscodingSessionControllerTest, TestSubmitSession) {
+ ALOGD("TestSubmitSession");
+
+ // Start with UID(1) on top.
+ mUidPolicy->setTop(UID(1));
+
+ // Submit offline session to CLIENT(0) in UID(0).
+ // Should start immediately (because this is the only session).
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), 0));
+
+ // Submit real-time session to CLIENT(0).
+ // Should pause offline session and start new session, even if UID(0) is not on top.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Submit real-time session to CLIENT(0), should be queued after the previous session.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit real-time session to CLIENT(1) in same uid, should be queued after the previous
+ // session.
+ mController->submit(CLIENT(1), SESSION(0), UID(0), mRealtimeRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit real-time session to CLIENT(2) in UID(1).
+ // Should pause previous session and start new session, because UID(1) is (has been) top.
+ mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
+
+ // Submit offline session, shouldn't generate any event.
+ mController->submit(CLIENT(2), SESSION(1), UID(1), mOfflineRequest, mClientCallback2);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(0) to top.
+ mUidPolicy->setTop(UID(0));
+ // Should pause current session, and resume last session in UID(0).
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(2), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(1)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestCancelSession) {
+ ALOGD("TestCancelSession");
+
+ // Submit real-time session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should not start.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Cancel queued real-time session.
+ // Cancel real-time session SESSION(1), should be cancelled.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(1)));
+
+ // Cancel queued offline session.
+ // Cancel offline session SESSION(2), should be cancelled.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(2)));
+
+ // Submit offline session SESSION(3), shouldn't cause any event.
+ mController->submit(CLIENT(0), SESSION(3), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Cancel running real-time session SESSION(0).
+ // - Should be stopped first then cancelled.
+ // - Should also start offline session SESSION(2) because real-time queue is empty.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(3)));
+
+ // Submit real-time session SESSION(4), offline SESSION(3) should pause and SESSION(4)
+ // should start.
+ mController->submit(CLIENT(0), SESSION(4), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(3)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(4)));
+
+ // Cancel paused SESSION(3). SESSION(3) should be stopped.
+ EXPECT_TRUE(mController->cancel(CLIENT(0), SESSION(3)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Stop(CLIENT(0), SESSION(3)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestFinishSession) {
+ ALOGD("TestFinishSession");
+
+ // Start with unspecified top UID.
+ // Finish without any sessions submitted, should be ignored.
+ mController->onFinish(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should pause offline session and start immediately.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Finish when the session never started, should be ignored.
+ mController->onFinish(CLIENT(0), SESSION(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // Submit real-time session to CLIENT(1) in UID(1), should pause previous session and start
+ // new session.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), mRealtimeRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+
+ // Simulate Finish that arrived late, after pause issued by controller.
+ // Should still be propagated to client, but shouldn't trigger any new start.
+ mController->onFinish(CLIENT(0), SESSION(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(1)));
+
+ // Finish running real-time session, should start next real-time session in queue.
+ mController->onFinish(CLIENT(1), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(1), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+
+ // Finish running real-time session, should resume next session (offline session) in queue.
+ mController->onFinish(CLIENT(0), SESSION(2));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Finish running offline session.
+ mController->onFinish(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(0)));
+
+ // Duplicate finish for last session, should be ignored.
+ mController->onFinish(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+}
+
+TEST_F(TranscodingSessionControllerTest, TestFailSession) {
+ ALOGD("TestFailSession");
+
+ // Start with unspecified top UID.
+ // Fail without any sessions submitted, should be ignored.
+ mController->onError(CLIENT(0), SESSION(0), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit offline session SESSION(0), should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mOfflineRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit real-time session SESSION(1), should pause offline session and start immediately.
+ mController->submit(CLIENT(0), SESSION(1), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(1)));
+
+ // Submit real-time session SESSION(2), should not start.
+ mController->submit(CLIENT(0), SESSION(2), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Fail when the session never started, should be ignored.
+ mController->onError(CLIENT(0), SESSION(2), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // UID(1) moves to top.
+ mUidPolicy->setTop(UID(1));
+ // Submit real-time session to CLIENT(1) in UID(1), should pause previous session and start
+ // new session.
+ mController->submit(CLIENT(1), SESSION(0), UID(1), mRealtimeRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(1)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+
+ // Simulate Fail that arrived late, after pause issued by controller.
+ // Should still be propagated to client, but shouldn't trigger any new start.
+ mController->onError(CLIENT(0), SESSION(1), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(1)));
+
+ // Fail running real-time session, should start next real-time session in queue.
+ mController->onError(CLIENT(1), SESSION(0), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(1), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(2)));
+
+ // Fail running real-time session, should resume next session (offline session) in queue.
+ mController->onError(CLIENT(0), SESSION(2), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(2)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Fail running offline session, and test error code propagation.
+ mController->onError(CLIENT(0), SESSION(0), TranscodingErrorCode::kInvalidOperation);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Failed(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->getLastError(), TranscodingErrorCode::kInvalidOperation);
+
+ // Duplicate fail for last session, should be ignored.
+ mController->onError(CLIENT(0), SESSION(0), TranscodingErrorCode::kUnknown);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+}
+
+TEST_F(TranscodingSessionControllerTest, TestTopUidChanged) {
+ ALOGD("TestTopUidChanged");
+
+ // Start with unspecified top UID.
+ // Submit real-time session to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit offline session to CLIENT(0), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(0), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Move UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit real-time session to CLIENT(2) in different uid UID(1).
+ // Should pause previous session and start new session.
+ mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
+
+ // Bring UID(0) back to top.
+ mUidPolicy->setTop(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(2), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Bring invalid uid to top.
+ mUidPolicy->setTop(kInvalidUid);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Finish session, next real-time session should resume.
+ mController->onFinish(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(2), SESSION(0)));
+
+ // Finish session, offline session should start.
+ mController->onFinish(CLIENT(2), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(2), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestTopUidSetChanged) {
+ ALOGD("TestTopUidChanged_MultipleUids");
+
+ // Start with unspecified top UID.
+ // Submit real-time session to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit offline session to CLIENT(0), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(0), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Set UID(0), UID(1) to top set.
+ // UID(0) should continue to run.
+ mUidPolicy->setTop({UID(0), UID(1)});
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit real-time session to CLIENT(2) in different uid UID(1).
+ // UID(0) should pause and UID(1) should start.
+ mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
+
+ // Remove UID(0) from top set, and only leave UID(1) in the set.
+ // UID(1) should continue to run.
+ mUidPolicy->setTop(UID(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Set UID(0), UID(2) to top set.
+ // UID(1) should continue to run.
+ mUidPolicy->setTop({UID(1), UID(2)});
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Bring UID(0) back to top.
+ mUidPolicy->setTop(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(2), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Bring invalid uid to top.
+ mUidPolicy->setTop(kInvalidUid);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Finish session, next real-time session from UID(1) should resume, even if UID(1)
+ // no longer top.
+ mController->onFinish(CLIENT(0), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(2), SESSION(0)));
+
+ // Finish session, offline session should start.
+ mController->onFinish(CLIENT(2), SESSION(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Finished(CLIENT(2), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(1), SESSION(0)));
+}
+
+TEST_F(TranscodingSessionControllerTest, TestResourceLost) {
+ ALOGD("TestResourceLost");
+
+ // Start with unspecified top UID.
+ // Submit real-time session to CLIENT(0), session should start immediately.
+ mController->submit(CLIENT(0), SESSION(0), UID(0), mRealtimeRequest, mClientCallback0);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(0), SESSION(0)));
+
+ // Submit offline session to CLIENT(0), should not start.
+ mController->submit(CLIENT(1), SESSION(0), UID(0), mOfflineRequest, mClientCallback1);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Move UID(1) to top.
+ mUidPolicy->setTop(UID(1));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Submit real-time session to CLIENT(2) in different uid UID(1).
+ // Should pause previous session and start new session.
+ mController->submit(CLIENT(2), SESSION(0), UID(1), mRealtimeRequest, mClientCallback2);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Pause(CLIENT(0), SESSION(0)));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(2), SESSION(0)));
+
+ // Test 1: No queue change during resource loss.
+ // Signal resource lost.
+ mController->onResourceLost();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Signal resource available, CLIENT(2) should resume.
+ mController->onResourceAvailable();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(2), SESSION(0)));
+
+ // Test 2: Change of queue order during resource loss.
+ // Signal resource lost.
+ mController->onResourceLost();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Move UID(0) back to top, should have no resume due to no resource.
+ mUidPolicy->setTop(UID(0));
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Signal resource available, CLIENT(0) should resume.
+ mController->onResourceAvailable();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Resume(CLIENT(0), SESSION(0)));
+
+ // Test 3: Adding new queue during resource loss.
+ // Signal resource lost.
+ mController->onResourceLost();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Move UID(2) to top.
+ mUidPolicy->setTop(UID(2));
+
+ // Submit real-time session to CLIENT(3) in UID(2), session shouldn't start due to no resource.
+ mController->submit(CLIENT(3), SESSION(0), UID(2), mRealtimeRequest, mClientCallback3);
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::NoEvent);
+
+ // Signal resource available, CLIENT(3)'s session should start.
+ mController->onResourceAvailable();
+ EXPECT_EQ(mTranscoder->popEvent(), TestTranscoder::Start(CLIENT(3), SESSION(0)));
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/tests/assets/backyard_hevc_1920x1080_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/backyard_hevc_1920x1080_20Mbps.mp4
new file mode 100644
index 0000000..80d1ec3
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/backyard_hevc_1920x1080_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/cubicle_avc_480x240_aac_24KHz.mp4 b/media/libmediatranscoding/tests/assets/cubicle_avc_480x240_aac_24KHz.mp4
new file mode 100644
index 0000000..ef7e1b7
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/cubicle_avc_480x240_aac_24KHz.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4 b/media/libmediatranscoding/tests/assets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4
new file mode 100644
index 0000000..df42a15
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/jets_hevc_1280x720_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/jets_hevc_1280x720_20Mbps.mp4
new file mode 100644
index 0000000..7794b99
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/jets_hevc_1280x720_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/longtest_15s.mp4 b/media/libmediatranscoding/tests/assets/longtest_15s.mp4
new file mode 100644
index 0000000..b50d8e4
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/longtest_15s.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_12Mbps.mp4 b/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_12Mbps.mp4
new file mode 100644
index 0000000..92dda3b
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_12Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_20Mbps.mp4 b/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_20Mbps.mp4
new file mode 100644
index 0000000..2fe37bd
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/plex_hevc_3840x2160_20Mbps.mp4
Binary files differ
diff --git a/media/libmediatranscoding/tests/assets/push_assets.sh b/media/libmediatranscoding/tests/assets/push_assets.sh
new file mode 100755
index 0000000..8afc947
--- /dev/null
+++ b/media/libmediatranscoding/tests/assets/push_assets.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Pushes the assets to the /data/local/tmp.
+#
+
+if [ "$SYNC_FINISHED" != true ]; then
+ if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+ fi
+
+ # ensure we have mm
+ . $ANDROID_BUILD_TOP/build/envsetup.sh
+
+ mm
+
+ echo "waiting for device"
+
+ adb root && adb wait-for-device remount
+fi
+
+echo "Copying files to device"
+
+adb shell mkdir -p /data/local/tmp/TranscodingTestAssets
+
+FILES=$ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/*
+for file in $FILES
+do
+adb push --sync $file /data/local/tmp/TranscodingTestAssets
+done
+
+echo "Copy done"
diff --git a/media/libmediatranscoding/tests/build_and_run_all_unit_tests.sh b/media/libmediatranscoding/tests/build_and_run_all_unit_tests.sh
index d8e4830..5db9258 100644
--- a/media/libmediatranscoding/tests/build_and_run_all_unit_tests.sh
+++ b/media/libmediatranscoding/tests/build_and_run_all_unit_tests.sh
@@ -3,24 +3,32 @@
# Run tests in this directory.
#
-if [ -z "$ANDROID_BUILD_TOP" ]; then
- echo "Android build environment not set"
- exit -1
+if [ "$SYNC_FINISHED" != true ]; then
+ if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+ fi
+
+ # ensure we have mm
+ . $ANDROID_BUILD_TOP/build/envsetup.sh
+
+ mm
+
+ echo "waiting for device"
+
+ adb root && adb wait-for-device remount && adb sync
fi
-# ensure we have mm
-. $ANDROID_BUILD_TOP/build/envsetup.sh
-
-mm
-
-echo "waiting for device"
-
-adb root && adb wait-for-device remount && adb sync
-
echo "========================================"
echo "testing TranscodingClientManager"
-adb shell /data/nativetest64/TranscodingClientManager_tests/TranscodingClientManager_tests
+#adb shell /data/nativetest64/TranscodingClientManager_tests/TranscodingClientManager_tests
+adb shell /data/nativetest/TranscodingClientManager_tests/TranscodingClientManager_tests
echo "testing AdjustableMaxPriorityQueue"
-adb shell /data/nativetest64/AdjustableMaxPriorityQueue_tests/AdjustableMaxPriorityQueue_tests
+#adb shell /data/nativetest64/AdjustableMaxPriorityQueue_tests/AdjustableMaxPriorityQueue_tests
+adb shell /data/nativetest/AdjustableMaxPriorityQueue_tests/AdjustableMaxPriorityQueue_tests
+
+echo "testing TranscodingSessionController"
+#adb shell /data/nativetest64/TranscodingSessionController_tests/TranscodingSessionController_tests
+adb shell /data/nativetest/TranscodingSessionController_tests/TranscodingSessionController_tests
diff --git a/media/libmediatranscoding/transcoder/Android.bp b/media/libmediatranscoding/transcoder/Android.bp
new file mode 100644
index 0000000..258ed9a
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/Android.bp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_defaults {
+ name: "mediatranscoder_defaults",
+
+ srcs: [
+ "MediaSampleQueue.cpp",
+ "MediaSampleReaderNDK.cpp",
+ "MediaSampleWriter.cpp",
+ "MediaTrackTranscoder.cpp",
+ "MediaTranscoder.cpp",
+ "NdkCommon.cpp",
+ "PassthroughTrackTranscoder.cpp",
+ "VideoTrackTranscoder.cpp",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libmediandk",
+ "libnativewindow",
+ "libutils",
+ // TODO: Use libbinder_ndk
+ "libbinder",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wformat",
+ "-Wno-error=deprecated-declarations",
+ "-Wthread-safety",
+ "-Wunused",
+ "-Wunreachable-code",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ },
+}
+
+cc_library_shared {
+ name: "libmediatranscoder",
+ defaults: ["mediatranscoder_defaults"],
+}
+
+cc_library_shared {
+ name: "libmediatranscoder_asan",
+ defaults: ["mediatranscoder_defaults"],
+
+ sanitize: {
+ address: true,
+ },
+}
diff --git a/media/libmediatranscoding/transcoder/MediaSampleQueue.cpp b/media/libmediatranscoding/transcoder/MediaSampleQueue.cpp
new file mode 100644
index 0000000..b085c98
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/MediaSampleQueue.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleQueue"
+
+#include <android-base/logging.h>
+#include <media/MediaSampleQueue.h>
+
+namespace android {
+
+bool MediaSampleQueue::enqueue(const std::shared_ptr<MediaSample>& sample) {
+ std::scoped_lock<std::mutex> lock(mMutex);
+ if (!mAborted) {
+ mSampleQueue.push(sample);
+ mCondition.notify_one();
+ }
+ return mAborted;
+}
+
+// Unfortunately std::unique_lock is incompatible with -Wthread-safety
+bool MediaSampleQueue::dequeue(std::shared_ptr<MediaSample>* sample) NO_THREAD_SAFETY_ANALYSIS {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (mSampleQueue.empty() && !mAborted) {
+ mCondition.wait(lock);
+ }
+
+ if (!mAborted) {
+ if (sample != nullptr) {
+ *sample = mSampleQueue.front();
+ }
+ mSampleQueue.pop();
+ }
+ return mAborted;
+}
+
+bool MediaSampleQueue::isEmpty() {
+ std::scoped_lock<std::mutex> lock(mMutex);
+ return mSampleQueue.empty();
+}
+
+void MediaSampleQueue::abort() {
+ std::scoped_lock<std::mutex> lock(mMutex);
+ // Clear the queue and notify consumers.
+ std::queue<std::shared_ptr<MediaSample>> empty = {};
+ std::swap(mSampleQueue, empty);
+ mAborted = true;
+ mCondition.notify_all();
+}
+} // namespace android
\ No newline at end of file
diff --git a/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
new file mode 100644
index 0000000..53d567e
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/MediaSampleReaderNDK.cpp
@@ -0,0 +1,437 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleReader"
+
+#include <android-base/logging.h>
+#include <media/MediaSampleReaderNDK.h>
+
+#include <algorithm>
+#include <cmath>
+
+namespace android {
+
+// Check that the extractor sample flags have the expected NDK meaning.
+static_assert(SAMPLE_FLAG_SYNC_SAMPLE == AMEDIAEXTRACTOR_SAMPLE_FLAG_SYNC,
+ "Sample flag mismatch: SYNC_SAMPLE");
+
+// static
+std::shared_ptr<MediaSampleReader> MediaSampleReaderNDK::createFromFd(int fd, size_t offset,
+ size_t size) {
+ AMediaExtractor* extractor = AMediaExtractor_new();
+ if (extractor == nullptr) {
+ LOG(ERROR) << "Unable to allocate AMediaExtractor";
+ return nullptr;
+ }
+
+ media_status_t status = AMediaExtractor_setDataSourceFd(extractor, fd, offset, size);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "AMediaExtractor_setDataSourceFd returned error: " << status;
+ AMediaExtractor_delete(extractor);
+ return nullptr;
+ }
+
+ auto sampleReader = std::shared_ptr<MediaSampleReaderNDK>(new MediaSampleReaderNDK(extractor));
+ return sampleReader;
+}
+
+MediaSampleReaderNDK::MediaSampleReaderNDK(AMediaExtractor* extractor)
+ : mExtractor(extractor), mTrackCount(AMediaExtractor_getTrackCount(mExtractor)) {
+ if (mTrackCount > 0) {
+ mTrackCursors.resize(mTrackCount);
+ }
+}
+
+MediaSampleReaderNDK::~MediaSampleReaderNDK() {
+ if (mExtractor != nullptr) {
+ AMediaExtractor_delete(mExtractor);
+ }
+}
+
+void MediaSampleReaderNDK::advanceTrack_l(int trackIndex) {
+ if (!mEnforceSequentialAccess) {
+ // Note: Positioning the extractor before advancing the track is needed for two reasons:
+ // 1. To enable multiple advances without explicitly letting the extractor catch up.
+ // 2. To prevent the extractor from being farther than "next".
+ (void)moveToTrack_l(trackIndex);
+ }
+
+ SampleCursor& cursor = mTrackCursors[trackIndex];
+ cursor.previous = cursor.current;
+ cursor.current = cursor.next;
+ cursor.next.reset();
+
+ if (mEnforceSequentialAccess && trackIndex == mExtractorTrackIndex) {
+ while (advanceExtractor_l()) {
+ SampleCursor& cursor = mTrackCursors[mExtractorTrackIndex];
+ if (cursor.current.isSet && cursor.current.index == mExtractorSampleIndex) {
+ if (mExtractorTrackIndex != trackIndex) {
+ mTrackSignals[mExtractorTrackIndex].notify_all();
+ }
+ break;
+ }
+ }
+ }
+ return;
+}
+
+bool MediaSampleReaderNDK::advanceExtractor_l() {
+ // Reset the "next" sample time whenever the extractor advances past a sample that is current,
+ // to ensure that "next" is appropriately updated when the extractor advances over the next
+ // sample of that track.
+ if (mTrackCursors[mExtractorTrackIndex].current.isSet &&
+ mTrackCursors[mExtractorTrackIndex].current.index == mExtractorSampleIndex) {
+ mTrackCursors[mExtractorTrackIndex].next.reset();
+ }
+
+ if (!AMediaExtractor_advance(mExtractor)) {
+ mEosReached = true;
+ for (auto it = mTrackSignals.begin(); it != mTrackSignals.end(); ++it) {
+ it->second.notify_all();
+ }
+ return false;
+ }
+
+ mExtractorTrackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ mExtractorSampleIndex++;
+
+ SampleCursor& cursor = mTrackCursors[mExtractorTrackIndex];
+ if (mExtractorSampleIndex > cursor.previous.index) {
+ if (!cursor.current.isSet) {
+ cursor.current.set(mExtractorSampleIndex, AMediaExtractor_getSampleTime(mExtractor));
+ } else if (!cursor.next.isSet && mExtractorSampleIndex > cursor.current.index) {
+ cursor.next.set(mExtractorSampleIndex, AMediaExtractor_getSampleTime(mExtractor));
+ }
+ }
+
+ return true;
+}
+
+media_status_t MediaSampleReaderNDK::seekExtractorBackwards_l(int64_t targetTimeUs,
+ int targetTrackIndex,
+ uint64_t targetSampleIndex) {
+ if (targetSampleIndex > mExtractorSampleIndex) {
+ LOG(ERROR) << "Error: Forward seek is not supported";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ // AMediaExtractor supports reading negative timestamps but does not support seeking to them.
+ const int64_t seekToTimeUs = std::max(targetTimeUs, (int64_t)0);
+ media_status_t status =
+ AMediaExtractor_seekTo(mExtractor, seekToTimeUs, AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to seek to " << seekToTimeUs << ", target " << targetTimeUs;
+ return status;
+ }
+ mExtractorTrackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ int64_t sampleTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+
+ while (sampleTimeUs != targetTimeUs || mExtractorTrackIndex != targetTrackIndex) {
+ if (!AMediaExtractor_advance(mExtractor)) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+ mExtractorTrackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ sampleTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+ }
+ mExtractorSampleIndex = targetSampleIndex;
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::moveToSample_l(SamplePosition& pos, int trackIndex) {
+ // Seek backwards if the extractor is ahead of the sample.
+ if (pos.isSet && mExtractorSampleIndex > pos.index) {
+ media_status_t status = seekExtractorBackwards_l(pos.timeStampUs, trackIndex, pos.index);
+ if (status != AMEDIA_OK) return status;
+ }
+
+ // Advance until extractor points to the sample.
+ while (!(pos.isSet && pos.index == mExtractorSampleIndex)) {
+ if (!advanceExtractor_l()) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+ }
+
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::moveToTrack_l(int trackIndex) {
+ return moveToSample_l(mTrackCursors[trackIndex].current, trackIndex);
+}
+
+media_status_t MediaSampleReaderNDK::waitForTrack_l(int trackIndex,
+ std::unique_lock<std::mutex>& lockHeld) {
+ while (trackIndex != mExtractorTrackIndex && !mEosReached && mEnforceSequentialAccess) {
+ mTrackSignals[trackIndex].wait(lockHeld);
+ }
+
+ if (mEosReached) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::primeExtractorForTrack_l(
+ int trackIndex, std::unique_lock<std::mutex>& lockHeld) {
+ if (mExtractorTrackIndex < 0) {
+ mExtractorTrackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ if (mExtractorTrackIndex < 0) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+ mTrackCursors[mExtractorTrackIndex].current.set(mExtractorSampleIndex,
+ AMediaExtractor_getSampleTime(mExtractor));
+ }
+
+ if (mEnforceSequentialAccess) {
+ return waitForTrack_l(trackIndex, lockHeld);
+ } else {
+ return moveToTrack_l(trackIndex);
+ }
+}
+
+media_status_t MediaSampleReaderNDK::selectTrack(int trackIndex) {
+ std::scoped_lock lock(mExtractorMutex);
+
+ if (trackIndex < 0 || trackIndex >= mTrackCount) {
+ LOG(ERROR) << "Invalid trackIndex " << trackIndex << " for trackCount " << mTrackCount;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (mTrackSignals.find(trackIndex) != mTrackSignals.end()) {
+ LOG(ERROR) << "TrackIndex " << trackIndex << " already selected";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (mExtractorTrackIndex >= 0) {
+ LOG(ERROR) << "Tracks must be selected before sample reading begins.";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ media_status_t status = AMediaExtractor_selectTrack(mExtractor, trackIndex);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "AMediaExtractor_selectTrack returned error: " << status;
+ return status;
+ }
+
+ mTrackSignals.emplace(std::piecewise_construct, std::forward_as_tuple(trackIndex),
+ std::forward_as_tuple());
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::setEnforceSequentialAccess(bool enforce) {
+ std::scoped_lock lock(mExtractorMutex);
+
+ if (mEnforceSequentialAccess && !enforce) {
+ // If switching from enforcing to not enforcing sequential access there may be threads
+ // waiting that needs to be woken up.
+ for (auto it = mTrackSignals.begin(); it != mTrackSignals.end(); ++it) {
+ it->second.notify_all();
+ }
+ } else if (!mEnforceSequentialAccess && enforce && mExtractorTrackIndex >= 0) {
+ // If switching from not enforcing to enforcing sequential access the extractor needs to be
+ // positioned for the track farthest behind so that it won't get stuck waiting.
+ struct {
+ SamplePosition* pos = nullptr;
+ int trackIndex = -1;
+ } earliestSample;
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; ++trackIndex) {
+ SamplePosition& lastKnownTrackPosition = mTrackCursors[trackIndex].current.isSet
+ ? mTrackCursors[trackIndex].current
+ : mTrackCursors[trackIndex].previous;
+
+ if (lastKnownTrackPosition.isSet) {
+ if (earliestSample.pos == nullptr ||
+ earliestSample.pos->index > lastKnownTrackPosition.index) {
+ earliestSample.pos = &lastKnownTrackPosition;
+ earliestSample.trackIndex = trackIndex;
+ }
+ }
+ }
+
+ if (earliestSample.pos == nullptr) {
+ LOG(ERROR) << "No known sample position found";
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ media_status_t status = moveToSample_l(*earliestSample.pos, earliestSample.trackIndex);
+ if (status != AMEDIA_OK) return status;
+
+ while (!(mTrackCursors[mExtractorTrackIndex].current.isSet &&
+ mTrackCursors[mExtractorTrackIndex].current.index == mExtractorSampleIndex)) {
+ if (!advanceExtractor_l()) {
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+ }
+ }
+
+ mEnforceSequentialAccess = enforce;
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::getEstimatedBitrateForTrack(int trackIndex, int32_t* bitrate) {
+ std::scoped_lock lock(mExtractorMutex);
+ media_status_t status = AMEDIA_OK;
+
+ if (mTrackSignals.find(trackIndex) == mTrackSignals.end()) {
+ LOG(ERROR) << "Track is not selected.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (bitrate == nullptr) {
+ LOG(ERROR) << "bitrate pointer is NULL.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (mExtractorTrackIndex >= 0) {
+ LOG(ERROR) << "getEstimatedBitrateForTrack must be called before sample reading begins.";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ // Sample the track.
+ static constexpr int64_t kSamplingDurationUs = 10 * 1000 * 1000; // 10 seconds
+ size_t lastSampleSize = 0;
+ size_t totalSampleSize = 0;
+ int64_t firstSampleTimeUs = 0;
+ int64_t lastSampleTimeUs = 0;
+
+ do {
+ if (AMediaExtractor_getSampleTrackIndex(mExtractor) == trackIndex) {
+ lastSampleTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+ if (totalSampleSize == 0) {
+ firstSampleTimeUs = lastSampleTimeUs;
+ }
+
+ lastSampleSize = AMediaExtractor_getSampleSize(mExtractor);
+ totalSampleSize += lastSampleSize;
+ }
+ } while ((lastSampleTimeUs - firstSampleTimeUs) < kSamplingDurationUs &&
+ AMediaExtractor_advance(mExtractor));
+
+ // Reset the extractor to the beginning.
+ status = AMediaExtractor_seekTo(mExtractor, 0, AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to reset extractor: " << status;
+ return status;
+ }
+
+ int64_t durationUs = 0;
+ const int64_t sampledDurationUs = lastSampleTimeUs - firstSampleTimeUs;
+
+ if (sampledDurationUs < kSamplingDurationUs) {
+ // Track is shorter than the sampling duration so use the full track duration to get better
+ // accuracy (i.e. don't skip the last sample).
+ AMediaFormat* trackFormat = getTrackFormat(trackIndex);
+ if (!AMediaFormat_getInt64(trackFormat, AMEDIAFORMAT_KEY_DURATION, &durationUs)) {
+ durationUs = 0;
+ }
+ AMediaFormat_delete(trackFormat);
+ }
+
+ if (durationUs == 0) {
+ // The sampled duration does not account for the last sample's duration so its size should
+ // not be included either.
+ totalSampleSize -= lastSampleSize;
+ durationUs = sampledDurationUs;
+ }
+
+ if (totalSampleSize == 0 || durationUs <= 0) {
+ LOG(ERROR) << "Unable to estimate track bitrate";
+ return AMEDIA_ERROR_MALFORMED;
+ }
+
+ *bitrate = roundf((float)totalSampleSize * 8 * 1000000 / durationUs);
+ return AMEDIA_OK;
+}
+
+media_status_t MediaSampleReaderNDK::getSampleInfoForTrack(int trackIndex, MediaSampleInfo* info) {
+ std::unique_lock<std::mutex> lock(mExtractorMutex);
+
+ if (mTrackSignals.find(trackIndex) == mTrackSignals.end()) {
+ LOG(ERROR) << "Track not selected.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (info == nullptr) {
+ LOG(ERROR) << "MediaSampleInfo pointer is NULL.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ media_status_t status = primeExtractorForTrack_l(trackIndex, lock);
+ if (status == AMEDIA_OK) {
+ info->presentationTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+ info->flags = AMediaExtractor_getSampleFlags(mExtractor);
+ info->size = AMediaExtractor_getSampleSize(mExtractor);
+ } else if (status == AMEDIA_ERROR_END_OF_STREAM) {
+ info->presentationTimeUs = 0;
+ info->flags = SAMPLE_FLAG_END_OF_STREAM;
+ info->size = 0;
+ }
+ return status;
+}
+
+media_status_t MediaSampleReaderNDK::readSampleDataForTrack(int trackIndex, uint8_t* buffer,
+ size_t bufferSize) {
+ std::unique_lock<std::mutex> lock(mExtractorMutex);
+
+ if (mTrackSignals.find(trackIndex) == mTrackSignals.end()) {
+ LOG(ERROR) << "Track not selected.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ } else if (buffer == nullptr) {
+ LOG(ERROR) << "buffer pointer is NULL";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ media_status_t status = primeExtractorForTrack_l(trackIndex, lock);
+ if (status != AMEDIA_OK) {
+ return status;
+ }
+
+ ssize_t sampleSize = AMediaExtractor_getSampleSize(mExtractor);
+ if (bufferSize < sampleSize) {
+ LOG(ERROR) << "Buffer is too small for sample, " << bufferSize << " vs " << sampleSize;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ ssize_t bytesRead = AMediaExtractor_readSampleData(mExtractor, buffer, bufferSize);
+ if (bytesRead < sampleSize) {
+ LOG(ERROR) << "Unable to read full sample, " << bytesRead << " vs " << sampleSize;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ advanceTrack_l(trackIndex);
+
+ return AMEDIA_OK;
+}
+
+void MediaSampleReaderNDK::advanceTrack(int trackIndex) {
+ std::scoped_lock lock(mExtractorMutex);
+
+ if (mTrackSignals.find(trackIndex) != mTrackSignals.end()) {
+ advanceTrack_l(trackIndex);
+ } else {
+ LOG(ERROR) << "Trying to advance a track that is not selected (#" << trackIndex << ")";
+ }
+}
+
+AMediaFormat* MediaSampleReaderNDK::getFileFormat() {
+ return AMediaExtractor_getFileFormat(mExtractor);
+}
+
+size_t MediaSampleReaderNDK::getTrackCount() const {
+ return mTrackCount;
+}
+
+AMediaFormat* MediaSampleReaderNDK::getTrackFormat(int trackIndex) {
+ if (trackIndex < 0 || trackIndex >= mTrackCount) {
+ LOG(ERROR) << "Invalid trackIndex " << trackIndex << " for trackCount " << mTrackCount;
+ return AMediaFormat_new();
+ }
+
+ return AMediaExtractor_getTrackFormat(mExtractor, trackIndex);
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
new file mode 100644
index 0000000..afa5021
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/MediaSampleWriter.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleWriter"
+
+#include <android-base/logging.h>
+#include <media/MediaSampleWriter.h>
+#include <media/NdkMediaMuxer.h>
+
+namespace android {
+
+class DefaultMuxer : public MediaSampleWriterMuxerInterface {
+public:
+ // MediaSampleWriterMuxerInterface
+ ssize_t addTrack(AMediaFormat* trackFormat) override {
+ // If the track format has rotation, need to call AMediaMuxer_setOrientationHint
+ // to set the rotation. Muxer doesn't take rotation specified on the track.
+ const char* mime;
+ if (AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime) &&
+ strncmp(mime, "video/", 6) == 0) {
+ int32_t rotation;
+ if (AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_ROTATION, &rotation) &&
+ (rotation != 0)) {
+ AMediaMuxer_setOrientationHint(mMuxer, rotation);
+ }
+ }
+
+ return AMediaMuxer_addTrack(mMuxer, trackFormat);
+ }
+ media_status_t start() override { return AMediaMuxer_start(mMuxer); }
+ media_status_t writeSampleData(size_t trackIndex, const uint8_t* data,
+ const AMediaCodecBufferInfo* info) override {
+ return AMediaMuxer_writeSampleData(mMuxer, trackIndex, data, info);
+ }
+ media_status_t stop() override { return AMediaMuxer_stop(mMuxer); }
+ // ~MediaSampleWriterMuxerInterface
+
+ static std::shared_ptr<DefaultMuxer> create(int fd) {
+ AMediaMuxer* ndkMuxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
+ if (ndkMuxer == nullptr) {
+ LOG(ERROR) << "Unable to create AMediaMuxer";
+ return nullptr;
+ }
+
+ return std::make_shared<DefaultMuxer>(ndkMuxer);
+ }
+
+ ~DefaultMuxer() {
+ if (mMuxer != nullptr) {
+ AMediaMuxer_delete(mMuxer);
+ }
+ }
+
+ DefaultMuxer(AMediaMuxer* muxer) : mMuxer(muxer){};
+ DefaultMuxer() = delete;
+
+private:
+ AMediaMuxer* mMuxer;
+};
+
+// static
+std::shared_ptr<MediaSampleWriter> MediaSampleWriter::Create() {
+ return std::shared_ptr<MediaSampleWriter>(new MediaSampleWriter());
+}
+
+MediaSampleWriter::~MediaSampleWriter() {
+ if (mState == STARTED) {
+ stop(); // Join thread.
+ }
+}
+
+bool MediaSampleWriter::init(int fd, const std::weak_ptr<CallbackInterface>& callbacks) {
+ return init(DefaultMuxer::create(fd), callbacks);
+}
+
+bool MediaSampleWriter::init(const std::shared_ptr<MediaSampleWriterMuxerInterface>& muxer,
+ const std::weak_ptr<CallbackInterface>& callbacks) {
+ if (callbacks.lock() == nullptr) {
+ LOG(ERROR) << "Callback object cannot be null";
+ return false;
+ } else if (muxer == nullptr) {
+ LOG(ERROR) << "Muxer cannot be null";
+ return false;
+ }
+
+ std::scoped_lock lock(mMutex);
+ if (mState != UNINITIALIZED) {
+ LOG(ERROR) << "Sample writer is already initialized";
+ return false;
+ }
+
+ mState = INITIALIZED;
+ mMuxer = muxer;
+ mCallbacks = callbacks;
+ return true;
+}
+
+MediaSampleWriter::MediaSampleConsumerFunction MediaSampleWriter::addTrack(
+ const std::shared_ptr<AMediaFormat>& trackFormat) {
+ if (trackFormat == nullptr) {
+ LOG(ERROR) << "Track format must be non-null";
+ return nullptr;
+ }
+
+ std::scoped_lock lock(mMutex);
+ if (mState != INITIALIZED) {
+ LOG(ERROR) << "Muxer needs to be initialized when adding tracks.";
+ return nullptr;
+ }
+ ssize_t trackIndexOrError = mMuxer->addTrack(trackFormat.get());
+ if (trackIndexOrError < 0) {
+ LOG(ERROR) << "Failed to add media track to muxer: " << trackIndexOrError;
+ return nullptr;
+ }
+ const size_t trackIndex = static_cast<size_t>(trackIndexOrError);
+
+ int64_t durationUs;
+ if (!AMediaFormat_getInt64(trackFormat.get(), AMEDIAFORMAT_KEY_DURATION, &durationUs)) {
+ durationUs = 0;
+ }
+
+ mTracks.emplace(trackIndex, durationUs);
+ std::shared_ptr<MediaSampleWriter> thisWriter = shared_from_this();
+
+ return [self = shared_from_this(), trackIndex](const std::shared_ptr<MediaSample>& sample) {
+ self->addSampleToTrack(trackIndex, sample);
+ };
+}
+
+void MediaSampleWriter::addSampleToTrack(size_t trackIndex,
+ const std::shared_ptr<MediaSample>& sample) {
+ if (sample == nullptr) return;
+
+ bool wasEmpty;
+ {
+ std::scoped_lock lock(mMutex);
+ wasEmpty = mSampleQueue.empty();
+ mSampleQueue.push(std::make_pair(trackIndex, sample));
+ }
+
+ if (wasEmpty) {
+ mSampleSignal.notify_one();
+ }
+}
+
+bool MediaSampleWriter::start() {
+ std::scoped_lock lock(mMutex);
+
+ if (mTracks.size() == 0) {
+ LOG(ERROR) << "No tracks to write.";
+ return false;
+ } else if (mState != INITIALIZED) {
+ LOG(ERROR) << "Sample writer is not initialized";
+ return false;
+ }
+
+ mState = STARTED;
+ mThread = std::thread([this] {
+ media_status_t status = writeSamples();
+ if (auto callbacks = mCallbacks.lock()) {
+ callbacks->onFinished(this, status);
+ }
+ });
+ return true;
+}
+
+bool MediaSampleWriter::stop() {
+ {
+ std::scoped_lock lock(mMutex);
+ if (mState != STARTED) {
+ LOG(ERROR) << "Sample writer is not started.";
+ return false;
+ }
+ mState = STOPPED;
+ }
+
+ mSampleSignal.notify_all();
+ mThread.join();
+ return true;
+}
+
+media_status_t MediaSampleWriter::writeSamples() {
+ media_status_t muxerStatus = mMuxer->start();
+ if (muxerStatus != AMEDIA_OK) {
+ LOG(ERROR) << "Error starting muxer: " << muxerStatus;
+ return muxerStatus;
+ }
+
+ media_status_t writeStatus = runWriterLoop();
+ if (writeStatus != AMEDIA_OK) {
+ LOG(ERROR) << "Error writing samples: " << writeStatus;
+ }
+
+ muxerStatus = mMuxer->stop();
+ if (muxerStatus != AMEDIA_OK) {
+ LOG(ERROR) << "Error stopping muxer: " << muxerStatus;
+ }
+
+ return writeStatus != AMEDIA_OK ? writeStatus : muxerStatus;
+}
+
+media_status_t MediaSampleWriter::runWriterLoop() NO_THREAD_SAFETY_ANALYSIS {
+ AMediaCodecBufferInfo bufferInfo;
+ int32_t lastProgressUpdate = 0;
+ int trackEosCount = 0;
+
+ // Set the "primary" track that will be used to determine progress to the track with longest
+ // duration.
+ int primaryTrackIndex = -1;
+ int64_t longestDurationUs = 0;
+ for (auto it = mTracks.begin(); it != mTracks.end(); ++it) {
+ if (it->second.mDurationUs > longestDurationUs) {
+ primaryTrackIndex = it->first;
+ longestDurationUs = it->second.mDurationUs;
+ }
+ }
+
+ while (true) {
+ if (trackEosCount >= mTracks.size()) {
+ break;
+ }
+
+ size_t trackIndex;
+ std::shared_ptr<MediaSample> sample;
+ {
+ std::unique_lock lock(mMutex);
+ while (mSampleQueue.empty() && mState == STARTED) {
+ mSampleSignal.wait(lock);
+ }
+
+ if (mState != STARTED) {
+ return AMEDIA_ERROR_UNKNOWN; // TODO(lnilsson): Custom error code.
+ }
+
+ auto& topEntry = mSampleQueue.top();
+ trackIndex = topEntry.first;
+ sample = topEntry.second;
+ mSampleQueue.pop();
+ }
+
+ TrackRecord& track = mTracks[trackIndex];
+
+ if (sample->info.flags & SAMPLE_FLAG_END_OF_STREAM) {
+ if (track.mReachedEos) {
+ continue;
+ }
+
+ // Track reached end of stream.
+ track.mReachedEos = true;
+ trackEosCount++;
+
+ // Preserve source track duration by setting the appropriate timestamp on the
+ // empty End-Of-Stream sample.
+ if (track.mDurationUs > 0 && track.mFirstSampleTimeSet) {
+ sample->info.presentationTimeUs = track.mDurationUs + track.mFirstSampleTimeUs;
+ }
+ }
+
+ track.mPrevSampleTimeUs = sample->info.presentationTimeUs;
+ if (!track.mFirstSampleTimeSet) {
+ // Record the first sample's timestamp in order to translate duration to EOS
+ // time for tracks that does not start at 0.
+ track.mFirstSampleTimeUs = sample->info.presentationTimeUs;
+ track.mFirstSampleTimeSet = true;
+ }
+
+ bufferInfo.offset = sample->dataOffset;
+ bufferInfo.size = sample->info.size;
+ bufferInfo.flags = sample->info.flags;
+ bufferInfo.presentationTimeUs = sample->info.presentationTimeUs;
+
+ media_status_t status = mMuxer->writeSampleData(trackIndex, sample->buffer, &bufferInfo);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "writeSampleData returned " << status;
+ return status;
+ }
+ sample.reset();
+
+ // TODO(lnilsson): Add option to toggle progress reporting on/off.
+ if (trackIndex == primaryTrackIndex) {
+ const int64_t elapsed = track.mPrevSampleTimeUs - track.mFirstSampleTimeUs;
+ int32_t progress = (elapsed * 100) / track.mDurationUs;
+ progress = std::clamp(progress, 0, 100);
+
+ if (progress > lastProgressUpdate) {
+ if (auto callbacks = mCallbacks.lock()) {
+ callbacks->onProgressUpdate(this, progress);
+ }
+ lastProgressUpdate = progress;
+ }
+ }
+ }
+
+ return AMEDIA_OK;
+}
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
new file mode 100644
index 0000000..698594f
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/MediaTrackTranscoder.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaTrackTranscoder"
+
+#include <android-base/logging.h>
+#include <media/MediaTrackTranscoder.h>
+#include <media/MediaTrackTranscoderCallback.h>
+
+namespace android {
+
+media_status_t MediaTrackTranscoder::configure(
+ const std::shared_ptr<MediaSampleReader>& mediaSampleReader, int trackIndex,
+ const std::shared_ptr<AMediaFormat>& destinationFormat) {
+ std::scoped_lock lock{mStateMutex};
+
+ if (mState != UNINITIALIZED) {
+ LOG(ERROR) << "Configure can only be called once";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ if (mediaSampleReader == nullptr) {
+ LOG(ERROR) << "MediaSampleReader is null";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ if (trackIndex < 0 || trackIndex >= mediaSampleReader->getTrackCount()) {
+ LOG(ERROR) << "TrackIndex is invalid " << trackIndex;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ mMediaSampleReader = mediaSampleReader;
+ mTrackIndex = trackIndex;
+
+ mSourceFormat = std::shared_ptr<AMediaFormat>(mMediaSampleReader->getTrackFormat(mTrackIndex),
+ &AMediaFormat_delete);
+ if (mSourceFormat == nullptr) {
+ LOG(ERROR) << "Unable to get format for track #" << mTrackIndex;
+ return AMEDIA_ERROR_MALFORMED;
+ }
+
+ media_status_t status = configureDestinationFormat(destinationFormat);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "configure failed with error " << status;
+ return status;
+ }
+
+ mState = CONFIGURED;
+ return AMEDIA_OK;
+}
+
+bool MediaTrackTranscoder::start() {
+ std::scoped_lock lock{mStateMutex};
+
+ if (mState != CONFIGURED) {
+ LOG(ERROR) << "TrackTranscoder must be configured before started";
+ return false;
+ }
+
+ mTranscodingThread = std::thread([this] {
+ media_status_t status = runTranscodeLoop();
+
+ // Notify the client.
+ if (auto callbacks = mTranscoderCallback.lock()) {
+ if (status != AMEDIA_OK) {
+ callbacks->onTrackError(this, status);
+ } else {
+ callbacks->onTrackFinished(this);
+ }
+ }
+ });
+
+ mState = STARTED;
+ return true;
+}
+
+bool MediaTrackTranscoder::stop() {
+ std::scoped_lock lock{mStateMutex};
+
+ if (mState == STARTED) {
+ abortTranscodeLoop();
+ mMediaSampleReader->setEnforceSequentialAccess(false);
+ mTranscodingThread.join();
+ {
+ std::scoped_lock lock{mSampleMutex};
+ mSampleQueue.abort(); // Release any buffered samples.
+ }
+ mState = STOPPED;
+ return true;
+ }
+
+ LOG(ERROR) << "TrackTranscoder must be started before stopped";
+ return false;
+}
+
+void MediaTrackTranscoder::notifyTrackFormatAvailable() {
+ if (auto callbacks = mTranscoderCallback.lock()) {
+ callbacks->onTrackFormatAvailable(this);
+ }
+}
+
+void MediaTrackTranscoder::onOutputSampleAvailable(const std::shared_ptr<MediaSample>& sample) {
+ std::scoped_lock lock{mSampleMutex};
+ if (mSampleConsumer == nullptr) {
+ mSampleQueue.enqueue(sample);
+ } else {
+ mSampleConsumer(sample);
+ }
+}
+
+void MediaTrackTranscoder::setSampleConsumer(
+ const MediaSampleWriter::MediaSampleConsumerFunction& sampleConsumer) {
+ std::scoped_lock lock{mSampleMutex};
+ mSampleConsumer = sampleConsumer;
+
+ std::shared_ptr<MediaSample> sample;
+ while (!mSampleQueue.isEmpty() && !mSampleQueue.dequeue(&sample)) {
+ mSampleConsumer(sample);
+ }
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/MediaTranscoder.cpp b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
new file mode 100644
index 0000000..cdb8368
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/MediaTranscoder.cpp
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscoder"
+
+#include <android-base/logging.h>
+#include <binder/Parcel.h>
+#include <fcntl.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/MediaSampleWriter.h>
+#include <media/MediaTranscoder.h>
+#include <media/NdkCommon.h>
+#include <media/PassthroughTrackTranscoder.h>
+#include <media/VideoTrackTranscoder.h>
+#include <unistd.h>
+
+namespace android {
+
+static AMediaFormat* mergeMediaFormats(AMediaFormat* base, AMediaFormat* overlay) {
+ if (base == nullptr || overlay == nullptr) {
+ LOG(ERROR) << "Cannot merge null formats";
+ return nullptr;
+ }
+
+ AMediaFormat* format = AMediaFormat_new();
+ if (AMediaFormat_copy(format, base) != AMEDIA_OK) {
+ AMediaFormat_delete(format);
+ return nullptr;
+ }
+
+ // Note: AMediaFormat does not expose a function for appending values from another format or for
+ // iterating over all values and keys in a format. Instead we define a static list of known keys
+ // along with their value types and copy the ones that are present. A better solution would be
+ // to either implement required functions in NDK or to parse the overlay format's string
+ // representation and copy all existing keys.
+ static const AMediaFormatUtils::EntryCopier kSupportedFormatEntries[] = {
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_MIME, String),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_DURATION, Int64),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_WIDTH, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_HEIGHT, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_BIT_RATE, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_PROFILE, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_LEVEL, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_FORMAT, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_RANGE, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_STANDARD, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_COLOR_TRANSFER, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_FRAME_RATE, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_PRIORITY, Int32),
+ ENTRY_COPIER2(AMEDIAFORMAT_KEY_OPERATING_RATE, Float, Int32),
+ };
+ const size_t entryCount = sizeof(kSupportedFormatEntries) / sizeof(kSupportedFormatEntries[0]);
+
+ AMediaFormatUtils::CopyFormatEntries(overlay, format, kSupportedFormatEntries, entryCount);
+ return format;
+}
+
+void MediaTranscoder::sendCallback(media_status_t status) {
+ // If the transcoder is already cancelled explicitly, don't send any error callbacks.
+ // Tracks and sample writer will report errors for abort. However, currently we can't
+ // tell it apart from real errors. Ideally we still want to report real errors back
+ // to client, as there is a small chance that explicit abort and the real error come
+ // at around the same time, we should report that if abort has a specific error code.
+ // On the other hand, if the transcoder actually finished (status is AMEDIA_OK) at around
+ // the same time of the abort, we should still report the finish back to the client.
+ if (mCancelled && status != AMEDIA_OK) {
+ return;
+ }
+
+ bool expected = false;
+ if (mCallbackSent.compare_exchange_strong(expected, true)) {
+ if (status == AMEDIA_OK) {
+ mCallbacks->onFinished(this);
+ } else {
+ mCallbacks->onError(this, status);
+ }
+
+ // Transcoding is done and the callback to the client has been sent, so tear down the
+ // pipeline but do it asynchronously to avoid deadlocks. If an error occurred, client
+ // should clean up the file.
+ std::thread asyncCancelThread{[self = shared_from_this()] { self->cancel(); }};
+ asyncCancelThread.detach();
+ }
+}
+
+void MediaTranscoder::onTrackFormatAvailable(const MediaTrackTranscoder* transcoder) {
+ LOG(INFO) << "TrackTranscoder " << transcoder << " format available.";
+
+ std::scoped_lock lock{mTracksAddedMutex};
+
+ // Ignore duplicate format change.
+ if (mTracksAdded.count(transcoder) > 0) {
+ return;
+ }
+
+ // Add track to the writer.
+ auto consumer = mSampleWriter->addTrack(transcoder->getOutputFormat());
+ if (consumer == nullptr) {
+ LOG(ERROR) << "Unable to add track to sample writer.";
+ sendCallback(AMEDIA_ERROR_UNKNOWN);
+ return;
+ }
+
+ MediaTrackTranscoder* mutableTranscoder = const_cast<MediaTrackTranscoder*>(transcoder);
+ mutableTranscoder->setSampleConsumer(consumer);
+
+ mTracksAdded.insert(transcoder);
+ if (mTracksAdded.size() == mTrackTranscoders.size()) {
+ // Enable sequential access mode on the sample reader to achieve optimal read performance.
+ // This has to wait until all tracks have delivered their output formats and the sample
+ // writer is started. Otherwise the tracks will not get their output sample queues drained
+ // and the transcoder could hang due to one track running out of buffers and blocking the
+ // other tracks from reading source samples before they could output their formats.
+ mSampleReader->setEnforceSequentialAccess(true);
+ LOG(INFO) << "Starting sample writer.";
+ bool started = mSampleWriter->start();
+ if (!started) {
+ LOG(ERROR) << "Unable to start sample writer.";
+ sendCallback(AMEDIA_ERROR_UNKNOWN);
+ }
+ }
+}
+
+void MediaTranscoder::onTrackFinished(const MediaTrackTranscoder* transcoder) {
+ LOG(DEBUG) << "TrackTranscoder " << transcoder << " finished";
+}
+
+void MediaTranscoder::onTrackError(const MediaTrackTranscoder* transcoder, media_status_t status) {
+ LOG(ERROR) << "TrackTranscoder " << transcoder << " returned error " << status;
+ sendCallback(status);
+}
+
+void MediaTranscoder::onFinished(const MediaSampleWriter* writer __unused, media_status_t status) {
+ LOG((status != AMEDIA_OK) ? ERROR : DEBUG) << "Sample writer finished with status " << status;
+ sendCallback(status);
+}
+
+void MediaTranscoder::onProgressUpdate(const MediaSampleWriter* writer __unused, int32_t progress) {
+ // Dispatch progress updated to the client.
+ mCallbacks->onProgressUpdate(this, progress);
+}
+
+MediaTranscoder::MediaTranscoder(const std::shared_ptr<CallbackInterface>& callbacks)
+ : mCallbacks(callbacks) {}
+
+std::shared_ptr<MediaTranscoder> MediaTranscoder::create(
+ const std::shared_ptr<CallbackInterface>& callbacks,
+ const std::shared_ptr<const Parcel>& pausedState) {
+ if (pausedState != nullptr) {
+ LOG(INFO) << "Initializing from paused state.";
+ }
+ if (callbacks == nullptr) {
+ LOG(ERROR) << "Callbacks cannot be null";
+ return nullptr;
+ }
+
+ return std::shared_ptr<MediaTranscoder>(new MediaTranscoder(callbacks));
+}
+
+media_status_t MediaTranscoder::configureSource(int fd) {
+ if (fd < 0) {
+ LOG(ERROR) << "Invalid source fd: " << fd;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ const size_t fileSize = lseek(fd, 0, SEEK_END);
+ lseek(fd, 0, SEEK_SET);
+
+ mSampleReader = MediaSampleReaderNDK::createFromFd(fd, 0 /* offset */, fileSize);
+
+ if (mSampleReader == nullptr) {
+ LOG(ERROR) << "Unable to parse source fd: " << fd;
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ const size_t trackCount = mSampleReader->getTrackCount();
+ for (size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
+ AMediaFormat* trackFormat = mSampleReader->getTrackFormat(static_cast<int>(trackIndex));
+ if (trackFormat == nullptr) {
+ LOG(ERROR) << "Track #" << trackIndex << " has no format";
+ return AMEDIA_ERROR_MALFORMED;
+ }
+
+ mSourceTrackFormats.emplace_back(trackFormat, &AMediaFormat_delete);
+ }
+
+ return AMEDIA_OK;
+}
+
+std::vector<std::shared_ptr<AMediaFormat>> MediaTranscoder::getTrackFormats() const {
+ // Return a deep copy of the formats to avoid the caller modifying our internal formats.
+ std::vector<std::shared_ptr<AMediaFormat>> trackFormats;
+ for (const std::shared_ptr<AMediaFormat>& sourceFormat : mSourceTrackFormats) {
+ AMediaFormat* copy = AMediaFormat_new();
+ AMediaFormat_copy(copy, sourceFormat.get());
+ trackFormats.emplace_back(copy, &AMediaFormat_delete);
+ }
+ return trackFormats;
+}
+
+media_status_t MediaTranscoder::configureTrackFormat(size_t trackIndex, AMediaFormat* trackFormat) {
+ if (mSampleReader == nullptr) {
+ LOG(ERROR) << "Source must be configured before tracks";
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ } else if (trackIndex >= mSourceTrackFormats.size()) {
+ LOG(ERROR) << "Track index " << trackIndex
+ << " is out of bounds. Track count: " << mSourceTrackFormats.size();
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ media_status_t status = mSampleReader->selectTrack(trackIndex);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to select track " << trackIndex;
+ return status;
+ }
+
+ std::shared_ptr<MediaTrackTranscoder> transcoder;
+ std::shared_ptr<AMediaFormat> format;
+
+ if (trackFormat == nullptr) {
+ transcoder = std::make_shared<PassthroughTrackTranscoder>(shared_from_this());
+ } else {
+ const char* srcMime = nullptr;
+ if (!AMediaFormat_getString(mSourceTrackFormats[trackIndex].get(), AMEDIAFORMAT_KEY_MIME,
+ &srcMime)) {
+ LOG(ERROR) << "Source track #" << trackIndex << " has no mime type";
+ return AMEDIA_ERROR_MALFORMED;
+ }
+
+ if (strncmp(srcMime, "video/", 6) != 0) {
+ LOG(ERROR) << "Only video tracks are supported for transcoding. Unable to configure "
+ "track #"
+ << trackIndex << " with mime " << srcMime;
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ const char* dstMime = nullptr;
+ if (AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &dstMime)) {
+ if (strncmp(dstMime, "video/", 6) != 0) {
+ LOG(ERROR) << "Unable to convert media types for track #" << trackIndex << ", from "
+ << srcMime << " to " << dstMime;
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+ }
+
+ transcoder = VideoTrackTranscoder::create(shared_from_this());
+
+ AMediaFormat* mergedFormat =
+ mergeMediaFormats(mSourceTrackFormats[trackIndex].get(), trackFormat);
+ if (mergedFormat == nullptr) {
+ LOG(ERROR) << "Unable to merge source and destination formats";
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ format = std::shared_ptr<AMediaFormat>(mergedFormat, &AMediaFormat_delete);
+ }
+
+ status = transcoder->configure(mSampleReader, trackIndex, format);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Configure track transcoder for track #" << trackIndex << " returned error "
+ << status;
+ return status;
+ }
+
+ mTrackTranscoders.emplace_back(std::move(transcoder));
+ return AMEDIA_OK;
+}
+
+media_status_t MediaTranscoder::configureDestination(int fd) {
+ if (fd < 0) {
+ LOG(ERROR) << "Invalid destination fd: " << fd;
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (mSampleWriter != nullptr) {
+ LOG(ERROR) << "Destination is already configured.";
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ mSampleWriter = MediaSampleWriter::Create();
+ const bool initOk = mSampleWriter->init(fd, shared_from_this());
+
+ if (!initOk) {
+ LOG(ERROR) << "Unable to initialize sample writer with destination fd: " << fd;
+ mSampleWriter.reset();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ return AMEDIA_OK;
+}
+
+media_status_t MediaTranscoder::start() {
+ if (mTrackTranscoders.size() < 1) {
+ LOG(ERROR) << "Unable to start, no tracks are configured.";
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ } else if (mSampleWriter == nullptr) {
+ LOG(ERROR) << "Unable to start, destination is not configured";
+ return AMEDIA_ERROR_INVALID_OPERATION;
+ }
+
+ // Start transcoders
+ for (auto& transcoder : mTrackTranscoders) {
+ bool started = transcoder->start();
+ if (!started) {
+ LOG(ERROR) << "Unable to start track transcoder.";
+ cancel();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ }
+ return AMEDIA_OK;
+}
+
+media_status_t MediaTranscoder::pause(std::shared_ptr<const Parcel>* pausedState) {
+ // TODO: write internal states to parcel.
+ *pausedState = std::make_shared<Parcel>();
+ return cancel();
+}
+
+media_status_t MediaTranscoder::resume() {
+ // TODO: restore internal states from parcel.
+ return start();
+}
+
+media_status_t MediaTranscoder::cancel() {
+ bool expected = false;
+ if (!mCancelled.compare_exchange_strong(expected, true)) {
+ // Already cancelled.
+ return AMEDIA_OK;
+ }
+
+ mSampleWriter->stop();
+ mSampleReader->setEnforceSequentialAccess(false);
+ for (auto& transcoder : mTrackTranscoders) {
+ transcoder->stop();
+ }
+
+ return AMEDIA_OK;
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/NdkCommon.cpp b/media/libmediatranscoding/transcoder/NdkCommon.cpp
new file mode 100644
index 0000000..a7b79dc
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/NdkCommon.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "NdkCommon"
+
+#include <android-base/logging.h>
+#include <media/NdkCommon.h>
+
+#include <cstdio>
+#include <cstring>
+#include <utility>
+
+/* TODO(b/153592281)
+ * Note: constants used by the native media tests but not available in media ndk api
+ */
+const char* AMEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
+const char* AMEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
+const char* AMEDIA_MIMETYPE_VIDEO_AV1 = "video/av01";
+const char* AMEDIA_MIMETYPE_VIDEO_AVC = "video/avc";
+const char* AMEDIA_MIMETYPE_VIDEO_HEVC = "video/hevc";
+const char* AMEDIA_MIMETYPE_VIDEO_MPEG4 = "video/mp4v-es";
+const char* AMEDIA_MIMETYPE_VIDEO_H263 = "video/3gpp";
+
+/* TODO(b/153592281) */
+const char* TBD_AMEDIACODEC_PARAMETER_KEY_ALLOW_FRAME_DROP = "allow-frame-drop";
+const char* TBD_AMEDIACODEC_PARAMETER_KEY_REQUEST_SYNC_FRAME = "request-sync";
+const char* TBD_AMEDIACODEC_PARAMETER_KEY_VIDEO_BITRATE = "video-bitrate";
+const char* TBD_AMEDIACODEC_PARAMETER_KEY_MAX_B_FRAMES = "max-bframes";
+
+namespace AMediaFormatUtils {
+
+#define DEFINE_FORMAT_VALUE_COPY_FUNC(_type, _typeName) \
+ bool CopyFormatEntry##_typeName(const char* key, AMediaFormat* from, AMediaFormat* to) { \
+ _type value; \
+ if (AMediaFormat_get##_typeName(from, key, &value)) { \
+ AMediaFormat_set##_typeName(to, key, value); \
+ return true; \
+ } \
+ return false; \
+ }
+
+DEFINE_FORMAT_VALUE_COPY_FUNC(const char*, String);
+DEFINE_FORMAT_VALUE_COPY_FUNC(int64_t, Int64);
+DEFINE_FORMAT_VALUE_COPY_FUNC(int32_t, Int32);
+DEFINE_FORMAT_VALUE_COPY_FUNC(float, Float);
+
+void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to, const EntryCopier* entries,
+ size_t entryCount) {
+ if (from == nullptr || to == nullptr) {
+ LOG(ERROR) << "Cannot copy null formats";
+ return;
+ } else if (entries == nullptr || entryCount < 1) {
+ LOG(WARNING) << "No entries to copy";
+ return;
+ }
+
+ for (size_t i = 0; i < entryCount; ++i) {
+ if (!entries[i].copy(entries[i].key, from, to) && entries[i].copy2 != nullptr) {
+ entries[i].copy2(entries[i].key, from, to);
+ }
+ }
+}
+
+#define DEFINE_SET_DEFAULT_FORMAT_VALUE_FUNC(_type, _typeName) \
+ bool SetDefaultFormatValue##_typeName(const char* key, AMediaFormat* format, _type value) { \
+ _type tmp; \
+ if (!AMediaFormat_get##_typeName(format, key, &tmp)) { \
+ AMediaFormat_set##_typeName(format, key, value); \
+ return true; \
+ } \
+ return false; \
+ }
+
+DEFINE_SET_DEFAULT_FORMAT_VALUE_FUNC(float, Float);
+DEFINE_SET_DEFAULT_FORMAT_VALUE_FUNC(int32_t, Int32);
+
+} // namespace AMediaFormatUtils
\ No newline at end of file
diff --git a/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
new file mode 100644
index 0000000..35b1d33
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/PassthroughTrackTranscoder.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "PassthroughTrackTranscoder"
+
+#include <android-base/logging.h>
+#include <media/PassthroughTrackTranscoder.h>
+
+namespace android {
+
+PassthroughTrackTranscoder::BufferPool::~BufferPool() {
+ for (auto it = mAddressSizeMap.begin(); it != mAddressSizeMap.end(); ++it) {
+ delete[] it->first;
+ }
+}
+
+uint8_t* PassthroughTrackTranscoder::BufferPool::getBufferWithSize(size_t minimumBufferSize)
+ NO_THREAD_SAFETY_ANALYSIS {
+ std::unique_lock lock(mMutex);
+
+ // Wait if maximum number of buffers are allocated but none are free.
+ while (mAddressSizeMap.size() >= mMaxBufferCount && mFreeBufferMap.empty() && !mAborted) {
+ mCondition.wait(lock);
+ }
+
+ if (mAborted) {
+ return nullptr;
+ }
+
+ // Check if the free list contains a large enough buffer.
+ auto it = mFreeBufferMap.lower_bound(minimumBufferSize);
+ if (it != mFreeBufferMap.end()) {
+ uint8_t* buffer = it->second;
+ mFreeBufferMap.erase(it);
+ return buffer;
+ }
+
+ // If the maximum buffer count is reached, remove an existing free buffer.
+ if (mAddressSizeMap.size() >= mMaxBufferCount) {
+ auto it = mFreeBufferMap.begin();
+ mAddressSizeMap.erase(it->second);
+ delete[] it->second;
+ mFreeBufferMap.erase(it);
+ }
+
+ // Allocate a new buffer.
+ uint8_t* buffer = new (std::nothrow) uint8_t[minimumBufferSize];
+ if (buffer == nullptr) {
+ LOG(ERROR) << "Unable to allocate new buffer of size: " << minimumBufferSize;
+ return nullptr;
+ }
+
+ // Add the buffer to the tracking set.
+ mAddressSizeMap.emplace(buffer, minimumBufferSize);
+ return buffer;
+}
+
+void PassthroughTrackTranscoder::BufferPool::returnBuffer(uint8_t* buffer) {
+ std::scoped_lock lock(mMutex);
+
+ if (buffer == nullptr || mAddressSizeMap.find(buffer) == mAddressSizeMap.end()) {
+ LOG(WARNING) << "Ignoring untracked buffer " << buffer;
+ return;
+ }
+
+ mFreeBufferMap.emplace(mAddressSizeMap[buffer], buffer);
+ mCondition.notify_one();
+}
+
+void PassthroughTrackTranscoder::BufferPool::abort() {
+ std::scoped_lock lock(mMutex);
+ mAborted = true;
+ mCondition.notify_all();
+}
+
+media_status_t PassthroughTrackTranscoder::configureDestinationFormat(
+ const std::shared_ptr<AMediaFormat>& destinationFormat __unused) {
+ // Called by MediaTrackTranscoder. Passthrough doesn't care about destination so just return ok.
+ return AMEDIA_OK;
+}
+
+media_status_t PassthroughTrackTranscoder::runTranscodeLoop() {
+ MediaSampleInfo info;
+ std::shared_ptr<MediaSample> sample;
+
+ // Notify the track format as soon as we start. It's same as the source format.
+ notifyTrackFormatAvailable();
+
+ MediaSample::OnSampleReleasedCallback bufferReleaseCallback =
+ [bufferPool = mBufferPool](MediaSample* sample) {
+ bufferPool->returnBuffer(const_cast<uint8_t*>(sample->buffer));
+ };
+
+ // Move samples until EOS is reached or transcoding is stopped.
+ while (!mStopRequested && !mEosFromSource) {
+ media_status_t status = mMediaSampleReader->getSampleInfoForTrack(mTrackIndex, &info);
+
+ if (status == AMEDIA_OK) {
+ uint8_t* buffer = mBufferPool->getBufferWithSize(info.size);
+ if (buffer == nullptr) {
+ if (mStopRequested) {
+ break;
+ }
+
+ LOG(ERROR) << "Unable to get buffer from pool";
+ return AMEDIA_ERROR_IO; // TODO: Custom error codes?
+ }
+
+ sample = MediaSample::createWithReleaseCallback(
+ buffer, 0 /* offset */, 0 /* bufferId */, bufferReleaseCallback);
+
+ status = mMediaSampleReader->readSampleDataForTrack(mTrackIndex, buffer, info.size);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to read next sample data. Aborting transcode.";
+ return status;
+ }
+
+ } else if (status == AMEDIA_ERROR_END_OF_STREAM) {
+ sample = std::make_shared<MediaSample>();
+ mEosFromSource = true;
+ } else {
+ LOG(ERROR) << "Unable to get next sample info. Aborting transcode.";
+ return status;
+ }
+
+ sample->info = info;
+ onOutputSampleAvailable(sample);
+ }
+
+ if (mStopRequested && !mEosFromSource) {
+ return AMEDIA_ERROR_UNKNOWN; // TODO: Custom error codes?
+ }
+ return AMEDIA_OK;
+}
+
+void PassthroughTrackTranscoder::abortTranscodeLoop() {
+ mStopRequested = true;
+ mBufferPool->abort();
+}
+
+std::shared_ptr<AMediaFormat> PassthroughTrackTranscoder::getOutputFormat() const {
+ return mSourceFormat;
+}
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
new file mode 100644
index 0000000..4cf54f1
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/VideoTrackTranscoder.cpp
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "VideoTrackTranscoder"
+
+#include <android-base/logging.h>
+#include <media/NdkCommon.h>
+#include <media/VideoTrackTranscoder.h>
+#include <utils/AndroidThreads.h>
+
+using namespace AMediaFormatUtils;
+
+namespace android {
+
+// Check that the codec sample flags have the expected NDK meaning.
+static_assert(SAMPLE_FLAG_CODEC_CONFIG == AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG,
+ "Sample flag mismatch: CODEC_CONFIG");
+static_assert(SAMPLE_FLAG_END_OF_STREAM == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM,
+ "Sample flag mismatch: END_OF_STREAM");
+static_assert(SAMPLE_FLAG_PARTIAL_FRAME == AMEDIACODEC_BUFFER_FLAG_PARTIAL_FRAME,
+ "Sample flag mismatch: PARTIAL_FRAME");
+
+// Color format defined by surface. (See MediaCodecInfo.CodecCapabilities#COLOR_FormatSurface.)
+static constexpr int32_t kColorFormatSurface = 0x7f000789;
+// Default key frame interval in seconds.
+static constexpr float kDefaultKeyFrameIntervalSeconds = 1.0f;
+// Default codec operating rate.
+static constexpr int32_t kDefaultCodecOperatingRate = 240;
+// Default codec priority.
+static constexpr int32_t kDefaultCodecPriority = 1;
+// Default bitrate, in case source estimation fails.
+static constexpr int32_t kDefaultBitrateMbps = 10 * 1000 * 1000;
+
+template <typename T>
+void VideoTrackTranscoder::BlockingQueue<T>::push(T const& value, bool front) {
+ {
+ std::scoped_lock lock(mMutex);
+ if (mAborted) {
+ return;
+ }
+
+ if (front) {
+ mQueue.push_front(value);
+ } else {
+ mQueue.push_back(value);
+ }
+ }
+ mCondition.notify_one();
+}
+
+template <typename T>
+T VideoTrackTranscoder::BlockingQueue<T>::pop() {
+ std::unique_lock lock(mMutex);
+ while (mQueue.empty()) {
+ mCondition.wait(lock);
+ }
+ T value = mQueue.front();
+ mQueue.pop_front();
+ return value;
+}
+
+// Note: Do not call if another thread might waiting in pop.
+template <typename T>
+void VideoTrackTranscoder::BlockingQueue<T>::abort() {
+ std::scoped_lock lock(mMutex);
+ mAborted = true;
+ mQueue.clear();
+}
+
+// The CodecWrapper class is used to let AMediaCodec instances outlive the transcoder object itself
+// by giving the codec a weak pointer to the transcoder. Codecs wrapped in this object are kept
+// alive by the transcoder and the codec's outstanding buffers. Once the transcoder stops and all
+// output buffers have been released by downstream components the codec will also be released.
+class VideoTrackTranscoder::CodecWrapper {
+public:
+ CodecWrapper(AMediaCodec* codec, const std::weak_ptr<VideoTrackTranscoder>& transcoder)
+ : mCodec(codec), mTranscoder(transcoder), mCodecStarted(false) {}
+ ~CodecWrapper() {
+ if (mCodecStarted) {
+ AMediaCodec_stop(mCodec);
+ }
+ AMediaCodec_delete(mCodec);
+ }
+
+ AMediaCodec* getCodec() { return mCodec; }
+ std::shared_ptr<VideoTrackTranscoder> getTranscoder() const { return mTranscoder.lock(); };
+ void setStarted() { mCodecStarted = true; }
+
+private:
+ AMediaCodec* mCodec;
+ std::weak_ptr<VideoTrackTranscoder> mTranscoder;
+ bool mCodecStarted;
+};
+
+// Dispatch responses to codec callbacks onto the message queue.
+struct AsyncCodecCallbackDispatch {
+ static void onAsyncInputAvailable(AMediaCodec* codec, void* userdata, int32_t index) {
+ VideoTrackTranscoder::CodecWrapper* wrapper =
+ static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+ if (auto transcoder = wrapper->getTranscoder()) {
+ if (codec == transcoder->mDecoder) {
+ transcoder->mCodecMessageQueue.push(
+ [transcoder, index] { transcoder->enqueueInputSample(index); });
+ }
+ }
+ }
+
+ static void onAsyncOutputAvailable(AMediaCodec* codec, void* userdata, int32_t index,
+ AMediaCodecBufferInfo* bufferInfoPtr) {
+ VideoTrackTranscoder::CodecWrapper* wrapper =
+ static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+ AMediaCodecBufferInfo bufferInfo = *bufferInfoPtr;
+ if (auto transcoder = wrapper->getTranscoder()) {
+ transcoder->mCodecMessageQueue.push([transcoder, index, codec, bufferInfo] {
+ if (codec == transcoder->mDecoder) {
+ transcoder->transferBuffer(index, bufferInfo);
+ } else if (codec == transcoder->mEncoder->getCodec()) {
+ transcoder->dequeueOutputSample(index, bufferInfo);
+ }
+ });
+ }
+ }
+
+ static void onAsyncFormatChanged(AMediaCodec* codec, void* userdata, AMediaFormat* format) {
+ VideoTrackTranscoder::CodecWrapper* wrapper =
+ static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+ if (auto transcoder = wrapper->getTranscoder()) {
+ const char* kCodecName = (codec == transcoder->mDecoder ? "Decoder" : "Encoder");
+ LOG(DEBUG) << kCodecName << " format changed: " << AMediaFormat_toString(format);
+ if (codec == transcoder->mEncoder->getCodec()) {
+ transcoder->mCodecMessageQueue.push(
+ [transcoder, format] { transcoder->updateTrackFormat(format); });
+ }
+ }
+ }
+
+ static void onAsyncError(AMediaCodec* codec, void* userdata, media_status_t error,
+ int32_t actionCode, const char* detail) {
+ LOG(ERROR) << "Error from codec " << codec << ", userdata " << userdata << ", error "
+ << error << ", action " << actionCode << ", detail " << detail;
+ VideoTrackTranscoder::CodecWrapper* wrapper =
+ static_cast<VideoTrackTranscoder::CodecWrapper*>(userdata);
+ if (auto transcoder = wrapper->getTranscoder()) {
+ transcoder->mCodecMessageQueue.push(
+ [transcoder, error] {
+ transcoder->mStatus = error;
+ transcoder->mStopRequested = true;
+ },
+ true);
+ }
+ }
+};
+
+// static
+std::shared_ptr<VideoTrackTranscoder> VideoTrackTranscoder::create(
+ const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback) {
+ return std::shared_ptr<VideoTrackTranscoder>(new VideoTrackTranscoder(transcoderCallback));
+}
+
+VideoTrackTranscoder::~VideoTrackTranscoder() {
+ if (mDecoder != nullptr) {
+ AMediaCodec_delete(mDecoder);
+ }
+
+ if (mSurface != nullptr) {
+ ANativeWindow_release(mSurface);
+ }
+}
+
+// Creates and configures the codecs.
+media_status_t VideoTrackTranscoder::configureDestinationFormat(
+ const std::shared_ptr<AMediaFormat>& destinationFormat) {
+ media_status_t status = AMEDIA_OK;
+
+ if (destinationFormat == nullptr) {
+ LOG(ERROR) << "Destination format is null, use passthrough transcoder";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AMediaFormat* encoderFormat = AMediaFormat_new();
+ if (!encoderFormat || AMediaFormat_copy(encoderFormat, destinationFormat.get()) != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to copy destination format";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ int32_t bitrate;
+ if (!AMediaFormat_getInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, &bitrate)) {
+ status = mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &bitrate);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to estimate bitrate. Using default " << kDefaultBitrateMbps;
+ bitrate = kDefaultBitrateMbps;
+ }
+
+ LOG(INFO) << "Configuring bitrate " << bitrate;
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_BIT_RATE, bitrate);
+ }
+
+ SetDefaultFormatValueFloat(AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, encoderFormat,
+ kDefaultKeyFrameIntervalSeconds);
+ SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_OPERATING_RATE, encoderFormat,
+ kDefaultCodecOperatingRate);
+ SetDefaultFormatValueInt32(AMEDIAFORMAT_KEY_PRIORITY, encoderFormat, kDefaultCodecPriority);
+
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_COLOR_FORMAT, kColorFormatSurface);
+
+ // Always encode without rotation. The rotation degree will be transferred directly to
+ // MediaSampleWriter track format, and MediaSampleWriter will call AMediaMuxer_setOrientationHint.
+ AMediaFormat_setInt32(encoderFormat, AMEDIAFORMAT_KEY_ROTATION, 0);
+
+ mDestinationFormat = std::shared_ptr<AMediaFormat>(encoderFormat, &AMediaFormat_delete);
+
+ // Create and configure the encoder.
+ const char* destinationMime = nullptr;
+ bool ok = AMediaFormat_getString(mDestinationFormat.get(), AMEDIAFORMAT_KEY_MIME,
+ &destinationMime);
+ if (!ok) {
+ LOG(ERROR) << "Destination MIME type is required for transcoding.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ AMediaCodec* encoder = AMediaCodec_createEncoderByType(destinationMime);
+ if (encoder == nullptr) {
+ LOG(ERROR) << "Unable to create encoder for type " << destinationMime;
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+ mEncoder = std::make_shared<CodecWrapper>(encoder, shared_from_this());
+
+ status = AMediaCodec_configure(mEncoder->getCodec(), mDestinationFormat.get(),
+ NULL /* surface */, NULL /* crypto */,
+ AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to configure video encoder: " << status;
+ return status;
+ }
+
+ status = AMediaCodec_createInputSurface(mEncoder->getCodec(), &mSurface);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to create an encoder input surface: %d" << status;
+ return status;
+ }
+
+ // Create and configure the decoder.
+ const char* sourceMime = nullptr;
+ ok = AMediaFormat_getString(mSourceFormat.get(), AMEDIAFORMAT_KEY_MIME, &sourceMime);
+ if (!ok) {
+ LOG(ERROR) << "Source MIME type is required for transcoding.";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ mDecoder = AMediaCodec_createDecoderByType(sourceMime);
+ if (mDecoder == nullptr) {
+ LOG(ERROR) << "Unable to create decoder for type " << sourceMime;
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ auto decoderFormat = std::shared_ptr<AMediaFormat>(AMediaFormat_new(), &AMediaFormat_delete);
+ if (!decoderFormat ||
+ AMediaFormat_copy(decoderFormat.get(), mSourceFormat.get()) != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to copy source format";
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ // Prevent decoder from overwriting frames that the encoder has not yet consumed.
+ AMediaFormat_setInt32(decoderFormat.get(), TBD_AMEDIACODEC_PARAMETER_KEY_ALLOW_FRAME_DROP, 0);
+
+ // Copy over configurations that apply to both encoder and decoder.
+ static const EntryCopier kEncoderEntriesToCopy[] = {
+ ENTRY_COPIER2(AMEDIAFORMAT_KEY_OPERATING_RATE, Float, Int32),
+ ENTRY_COPIER(AMEDIAFORMAT_KEY_PRIORITY, Int32),
+ };
+ const size_t entryCount = sizeof(kEncoderEntriesToCopy) / sizeof(kEncoderEntriesToCopy[0]);
+ CopyFormatEntries(mDestinationFormat.get(), decoderFormat.get(), kEncoderEntriesToCopy,
+ entryCount);
+
+ status = AMediaCodec_configure(mDecoder, decoderFormat.get(), mSurface, NULL /* crypto */,
+ 0 /* flags */);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to configure video decoder: " << status;
+ return status;
+ }
+
+ // Configure codecs to run in async mode.
+ AMediaCodecOnAsyncNotifyCallback asyncCodecCallbacks = {
+ .onAsyncInputAvailable = AsyncCodecCallbackDispatch::onAsyncInputAvailable,
+ .onAsyncOutputAvailable = AsyncCodecCallbackDispatch::onAsyncOutputAvailable,
+ .onAsyncFormatChanged = AsyncCodecCallbackDispatch::onAsyncFormatChanged,
+ .onAsyncError = AsyncCodecCallbackDispatch::onAsyncError};
+
+ // Note: The decoder does not need its own wrapper because its lifetime is tied to the
+ // transcoder. But the same callbacks are reused for decoder and encoder so we pass the encoder
+ // wrapper as userdata here but never read the codec from it in the callback.
+ status = AMediaCodec_setAsyncNotifyCallback(mDecoder, asyncCodecCallbacks, mEncoder.get());
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to set decoder to async mode: " << status;
+ return status;
+ }
+
+ status = AMediaCodec_setAsyncNotifyCallback(mEncoder->getCodec(), asyncCodecCallbacks,
+ mEncoder.get());
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to set encoder to async mode: " << status;
+ return status;
+ }
+
+ return AMEDIA_OK;
+}
+
+void VideoTrackTranscoder::enqueueInputSample(int32_t bufferIndex) {
+ media_status_t status = AMEDIA_OK;
+
+ if (mEosFromSource) {
+ return;
+ }
+
+ status = mMediaSampleReader->getSampleInfoForTrack(mTrackIndex, &mSampleInfo);
+ if (status != AMEDIA_OK && status != AMEDIA_ERROR_END_OF_STREAM) {
+ LOG(ERROR) << "Error getting next sample info: " << status;
+ mStatus = status;
+ return;
+ }
+ const bool endOfStream = (status == AMEDIA_ERROR_END_OF_STREAM);
+
+ if (!endOfStream) {
+ size_t bufferSize = 0;
+ uint8_t* sourceBuffer = AMediaCodec_getInputBuffer(mDecoder, bufferIndex, &bufferSize);
+ if (sourceBuffer == nullptr) {
+ LOG(ERROR) << "Decoder returned a NULL input buffer.";
+ mStatus = AMEDIA_ERROR_UNKNOWN;
+ return;
+ } else if (bufferSize < mSampleInfo.size) {
+ LOG(ERROR) << "Decoder returned an input buffer that is smaller than the sample.";
+ mStatus = AMEDIA_ERROR_UNKNOWN;
+ return;
+ }
+
+ status = mMediaSampleReader->readSampleDataForTrack(mTrackIndex, sourceBuffer,
+ mSampleInfo.size);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to read next sample data. Aborting transcode.";
+ mStatus = status;
+ return;
+ }
+ } else {
+ LOG(DEBUG) << "EOS from source.";
+ mEosFromSource = true;
+ }
+
+ status = AMediaCodec_queueInputBuffer(mDecoder, bufferIndex, 0, mSampleInfo.size,
+ mSampleInfo.presentationTimeUs, mSampleInfo.flags);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to queue input buffer for decode: " << status;
+ mStatus = status;
+ return;
+ }
+}
+
+void VideoTrackTranscoder::transferBuffer(int32_t bufferIndex, AMediaCodecBufferInfo bufferInfo) {
+ if (bufferIndex >= 0) {
+ bool needsRender = bufferInfo.size > 0;
+ AMediaCodec_releaseOutputBuffer(mDecoder, bufferIndex, needsRender);
+ }
+
+ if (bufferInfo.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
+ LOG(DEBUG) << "EOS from decoder.";
+ media_status_t status = AMediaCodec_signalEndOfInputStream(mEncoder->getCodec());
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "SignalEOS on encoder returned error: " << status;
+ mStatus = status;
+ }
+ }
+}
+
+void VideoTrackTranscoder::dequeueOutputSample(int32_t bufferIndex,
+ AMediaCodecBufferInfo bufferInfo) {
+ if (bufferIndex >= 0) {
+ size_t sampleSize = 0;
+ uint8_t* buffer =
+ AMediaCodec_getOutputBuffer(mEncoder->getCodec(), bufferIndex, &sampleSize);
+
+ MediaSample::OnSampleReleasedCallback bufferReleaseCallback =
+ [encoder = mEncoder](MediaSample* sample) {
+ AMediaCodec_releaseOutputBuffer(encoder->getCodec(), sample->bufferId,
+ false /* render */);
+ };
+
+ std::shared_ptr<MediaSample> sample = MediaSample::createWithReleaseCallback(
+ buffer, bufferInfo.offset, bufferIndex, bufferReleaseCallback);
+ sample->info.size = bufferInfo.size;
+ sample->info.flags = bufferInfo.flags;
+ sample->info.presentationTimeUs = bufferInfo.presentationTimeUs;
+
+ onOutputSampleAvailable(sample);
+ } else if (bufferIndex == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
+ AMediaFormat* newFormat = AMediaCodec_getOutputFormat(mEncoder->getCodec());
+ LOG(DEBUG) << "Encoder output format changed: " << AMediaFormat_toString(newFormat);
+ }
+
+ if (bufferInfo.flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) {
+ LOG(DEBUG) << "EOS from encoder.";
+ mEosFromEncoder = true;
+ }
+}
+
+void VideoTrackTranscoder::updateTrackFormat(AMediaFormat* outputFormat) {
+ if (mActualOutputFormat != nullptr) {
+ LOG(WARNING) << "Ignoring duplicate format change.";
+ return;
+ }
+
+ AMediaFormat* formatCopy = AMediaFormat_new();
+ if (!formatCopy || AMediaFormat_copy(formatCopy, outputFormat) != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to copy outputFormat";
+ AMediaFormat_delete(formatCopy);
+ mStatus = AMEDIA_ERROR_INVALID_PARAMETER;
+ return;
+ }
+
+ // Generate the actual track format for muxer based on the encoder output format,
+ // since many vital information comes in the encoder format (eg. CSD).
+ // Transfer necessary fields from the user-configured track format (derived from
+ // source track format and user transcoding request) where needed.
+
+ // Transfer SAR settings:
+ // If mDestinationFormat has SAR set, it means the original source has SAR specified
+ // at container level. This is supposed to override any SAR settings in the bitstream,
+ // thus should always be transferred to the container of the transcoded file.
+ int32_t sarWidth, sarHeight;
+ if (AMediaFormat_getInt32(mSourceFormat.get(), AMEDIAFORMAT_KEY_SAR_WIDTH, &sarWidth) &&
+ (sarWidth > 0) &&
+ AMediaFormat_getInt32(mSourceFormat.get(), AMEDIAFORMAT_KEY_SAR_HEIGHT, &sarHeight) &&
+ (sarHeight > 0)) {
+ AMediaFormat_setInt32(formatCopy, AMEDIAFORMAT_KEY_SAR_WIDTH, sarWidth);
+ AMediaFormat_setInt32(formatCopy, AMEDIAFORMAT_KEY_SAR_HEIGHT, sarHeight);
+ }
+ // Transfer DAR settings.
+ int32_t displayWidth, displayHeight;
+ if (AMediaFormat_getInt32(mSourceFormat.get(), AMEDIAFORMAT_KEY_DISPLAY_WIDTH, &displayWidth) &&
+ (displayWidth > 0) &&
+ AMediaFormat_getInt32(mSourceFormat.get(), AMEDIAFORMAT_KEY_DISPLAY_HEIGHT,
+ &displayHeight) &&
+ (displayHeight > 0)) {
+ AMediaFormat_setInt32(formatCopy, AMEDIAFORMAT_KEY_DISPLAY_WIDTH, displayWidth);
+ AMediaFormat_setInt32(formatCopy, AMEDIAFORMAT_KEY_DISPLAY_HEIGHT, displayHeight);
+ }
+
+ // Transfer rotation settings.
+ // Note that muxer itself doesn't take rotation from the track format. It requires
+ // AMediaMuxer_setOrientationHint to set the rotation. Here we pass the rotation to
+ // MediaSampleWriter using the track format. MediaSampleWriter will then call
+ // AMediaMuxer_setOrientationHint as needed.
+ int32_t rotation;
+ if (AMediaFormat_getInt32(mSourceFormat.get(), AMEDIAFORMAT_KEY_ROTATION, &rotation) &&
+ (rotation != 0)) {
+ AMediaFormat_setInt32(formatCopy, AMEDIAFORMAT_KEY_ROTATION, rotation);
+ }
+
+ // Transfer track duration.
+ // Preserve the source track duration by sending it to MediaSampleWriter.
+ int64_t durationUs;
+ if (AMediaFormat_getInt64(mSourceFormat.get(), AMEDIAFORMAT_KEY_DURATION, &durationUs) &&
+ durationUs > 0) {
+ AMediaFormat_setInt64(formatCopy, AMEDIAFORMAT_KEY_DURATION, durationUs);
+ }
+
+ // TODO: transfer other fields as required.
+
+ mActualOutputFormat = std::shared_ptr<AMediaFormat>(formatCopy, &AMediaFormat_delete);
+
+ notifyTrackFormatAvailable();
+}
+
+media_status_t VideoTrackTranscoder::runTranscodeLoop() {
+ androidSetThreadPriority(0 /* tid (0 = current) */, ANDROID_PRIORITY_VIDEO);
+
+ // Push start decoder and encoder as two messages, so that these are subject to the
+ // stop request as well. If the session is cancelled (or paused) immediately after start,
+ // we don't need to waste time start then stop the codecs.
+ mCodecMessageQueue.push([this] {
+ media_status_t status = AMediaCodec_start(mDecoder);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to start video decoder: " << status;
+ mStatus = status;
+ }
+ });
+
+ mCodecMessageQueue.push([this] {
+ media_status_t status = AMediaCodec_start(mEncoder->getCodec());
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to start video encoder: " << status;
+ mStatus = status;
+ }
+ mEncoder->setStarted();
+ });
+
+ // Process codec events until EOS is reached, transcoding is stopped or an error occurs.
+ while (!mStopRequested && !mEosFromEncoder && mStatus == AMEDIA_OK) {
+ std::function<void()> message = mCodecMessageQueue.pop();
+ message();
+ }
+
+ mCodecMessageQueue.abort();
+ AMediaCodec_stop(mDecoder);
+
+ // Return error if transcoding was stopped before it finished.
+ if (mStopRequested && !mEosFromEncoder && mStatus == AMEDIA_OK) {
+ mStatus = AMEDIA_ERROR_UNKNOWN; // TODO: Define custom error codes?
+ }
+
+ return mStatus;
+}
+
+void VideoTrackTranscoder::abortTranscodeLoop() {
+ // Push abort message to the front of the codec event queue.
+ mCodecMessageQueue.push([this] { mStopRequested = true; }, true /* front */);
+}
+
+std::shared_ptr<AMediaFormat> VideoTrackTranscoder::getOutputFormat() const {
+ return mActualOutputFormat;
+}
+
+} // namespace android
diff --git a/media/libmediatranscoding/transcoder/benchmark/Android.bp b/media/libmediatranscoding/transcoder/benchmark/Android.bp
new file mode 100644
index 0000000..ce34702
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/benchmark/Android.bp
@@ -0,0 +1,23 @@
+cc_defaults {
+ name: "benchmarkdefaults",
+ shared_libs: ["libmediatranscoder", "libmediandk", "libbase", "libbinder_ndk"],
+ static_libs: ["libgoogle-benchmark"],
+}
+
+cc_test {
+ name: "MediaTranscoderBenchmark",
+ srcs: ["MediaTranscoderBenchmark.cpp"],
+ defaults: ["benchmarkdefaults"],
+}
+
+cc_test {
+ name: "MediaSampleReaderBenchmark",
+ srcs: ["MediaSampleReaderBenchmark.cpp"],
+ defaults: ["benchmarkdefaults"],
+}
+
+cc_test {
+ name: "MediaTrackTranscoderBenchmark",
+ srcs: ["MediaTrackTranscoderBenchmark.cpp"],
+ defaults: ["benchmarkdefaults"],
+}
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaSampleReaderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaSampleReaderBenchmark.cpp
new file mode 100644
index 0000000..f0b9304
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaSampleReaderBenchmark.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * MediaSampleReader benchmark tests.
+ *
+ * How to run the benchmark:
+ *
+ * 1. Download the media assets from http://go/transcodingbenchmark and push the directory
+ * ("TranscodingBenchmark") to /data/local/tmp.
+ *
+ * 2. Compile the benchmark and sync to device:
+ * $ mm -j72 && adb sync
+ *
+ * 3. Run:
+ * $ adb shell /data/nativetest64/MediaSampleReaderBenchmark/MediaSampleReaderBenchmark
+ */
+
+#define LOG_TAG "MediaSampleReaderBenchmark"
+
+#include <android-base/logging.h>
+#include <benchmark/benchmark.h>
+#include <fcntl.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <unistd.h>
+
+#include <thread>
+
+using namespace android;
+
+static void ReadMediaSamples(benchmark::State& state, const std::string& srcFileName,
+ bool readAudio, bool sequentialAccess = false) {
+ // Asset directory.
+ static const std::string kAssetDirectory = "/data/local/tmp/TranscodingBenchmark/";
+
+ int srcFd = 0;
+ std::string srcPath = kAssetDirectory + srcFileName;
+
+ if ((srcFd = open(srcPath.c_str(), O_RDONLY)) < 0) {
+ state.SkipWithError("Unable to open source file");
+ return;
+ }
+
+ const size_t fileSize = lseek(srcFd, 0, SEEK_END);
+ lseek(srcFd, 0, SEEK_SET);
+
+ for (auto _ : state) {
+ auto sampleReader = MediaSampleReaderNDK::createFromFd(srcFd, 0, fileSize);
+ if (sampleReader->setEnforceSequentialAccess(sequentialAccess) != AMEDIA_OK) {
+ state.SkipWithError("setEnforceSequentialAccess failed");
+ return;
+ }
+
+ // Select tracks.
+ std::vector<int> trackIndices;
+ for (int trackIndex = 0; trackIndex < sampleReader->getTrackCount(); ++trackIndex) {
+ const char* mime = nullptr;
+
+ AMediaFormat* trackFormat = sampleReader->getTrackFormat(trackIndex);
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if (strncmp(mime, "video/", 6) == 0) {
+ int32_t frameCount;
+ if (AMediaFormat_getInt32(trackFormat, AMEDIAFORMAT_KEY_FRAME_COUNT, &frameCount)) {
+ state.counters["VideoFrameRate"] =
+ benchmark::Counter(frameCount, benchmark::Counter::kIsRate);
+ }
+ } else if (!readAudio && strncmp(mime, "audio/", 6) == 0) {
+ continue;
+ }
+
+ trackIndices.push_back(trackIndex);
+ sampleReader->selectTrack(trackIndex);
+ }
+
+ // Start threads.
+ std::vector<std::thread> trackThreads;
+ for (auto trackIndex : trackIndices) {
+ trackThreads.emplace_back([trackIndex, sampleReader, &state] {
+ LOG(INFO) << "Track " << trackIndex << " started";
+ MediaSampleInfo info;
+
+ size_t bufferSize = 0;
+ std::unique_ptr<uint8_t[]> buffer;
+
+ while (true) {
+ media_status_t status = sampleReader->getSampleInfoForTrack(trackIndex, &info);
+ if (status == AMEDIA_ERROR_END_OF_STREAM) {
+ break;
+ }
+
+ if (info.size > bufferSize) {
+ bufferSize = info.size;
+ buffer.reset(new uint8_t[bufferSize]);
+ }
+
+ status = sampleReader->readSampleDataForTrack(trackIndex, buffer.get(),
+ bufferSize);
+ if (status != AMEDIA_OK) {
+ state.SkipWithError("Error reading sample data");
+ break;
+ }
+ }
+
+ LOG(INFO) << "Track " << trackIndex << " finished";
+ });
+ }
+
+ // Join threads.
+ for (auto& thread : trackThreads) {
+ thread.join();
+ }
+ }
+
+ close(srcFd);
+}
+
+// Benchmark registration wrapper for transcoding.
+#define TRANSCODER_BENCHMARK(func) \
+ BENCHMARK(func)->UseRealTime()->MeasureProcessCPUTime()->Unit(benchmark::kMillisecond)
+
+static void BM_MediaSampleReader_AudioVideo_Parallel(benchmark::State& state) {
+ ReadMediaSamples(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ true /* readAudio */);
+}
+
+static void BM_MediaSampleReader_AudioVideo_Sequential(benchmark::State& state) {
+ ReadMediaSamples(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ true /* readAudio */, true /* sequentialAccess */);
+}
+
+static void BM_MediaSampleReader_Video(benchmark::State& state) {
+ ReadMediaSamples(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ false /* readAudio */);
+}
+
+TRANSCODER_BENCHMARK(BM_MediaSampleReader_AudioVideo_Parallel);
+TRANSCODER_BENCHMARK(BM_MediaSampleReader_AudioVideo_Sequential);
+TRANSCODER_BENCHMARK(BM_MediaSampleReader_Video);
+
+BENCHMARK_MAIN();
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
new file mode 100644
index 0000000..aee0ed6
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTrackTranscoderBenchmark.cpp
@@ -0,0 +1,446 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Native media track transcoder benchmark tests.
+ *
+ * How to run the benchmark:
+ *
+ * 1. Download the media assets from http://go/transcodingbenchmark and push the directory
+ * ("TranscodingBenchmark") to /data/local/tmp.
+ *
+ * 2. Compile the benchmark and sync to device:
+ * $ mm -j72 && adb sync
+ *
+ * 3. Run:
+ * $ adb shell /data/nativetest64/MediaTrackTranscoderBenchmark/MediaTrackTranscoderBenchmark
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaTrackTranscoderBenchmark"
+
+#include <android-base/logging.h>
+#include <android/binder_process.h>
+#include <benchmark/benchmark.h>
+#include <fcntl.h>
+#include <media/MediaSampleReader.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/MediaTrackTranscoder.h>
+#include <media/MediaTrackTranscoderCallback.h>
+#include <media/NdkCommon.h>
+#include <media/PassthroughTrackTranscoder.h>
+#include <media/VideoTrackTranscoder.h>
+
+using namespace android;
+
+typedef enum {
+ kVideo,
+ kAudio,
+} MediaType;
+
+class TrackTranscoderCallbacks : public MediaTrackTranscoderCallback {
+public:
+ virtual void onTrackFormatAvailable(const MediaTrackTranscoder* transcoder __unused) override {}
+
+ virtual void onTrackFinished(const MediaTrackTranscoder* transcoder __unused) override {
+ std::unique_lock lock(mMutex);
+ mFinished = true;
+ mCondition.notify_all();
+ }
+
+ virtual void onTrackError(const MediaTrackTranscoder* transcoder __unused,
+ media_status_t status) override {
+ std::unique_lock lock(mMutex);
+ mFinished = true;
+ mStatus = status;
+ mCondition.notify_all();
+ }
+
+ void waitForTranscodingFinished() {
+ std::unique_lock lock(mMutex);
+ while (!mFinished) {
+ mCondition.wait(lock);
+ }
+ }
+
+ media_status_t mStatus = AMEDIA_OK;
+
+private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mFinished = false;
+};
+
+/**
+ * MockSampleReader holds a ringbuffer of the first samples in the provided source track. Samples
+ * are returned to the caller from the ringbuffer in a round-robin fashion with increasing
+ * timestamps. The number of samples returned before EOS matches the number of frames in the source
+ * track.
+ */
+class MockSampleReader : public MediaSampleReader {
+public:
+ static std::shared_ptr<MediaSampleReader> createFromFd(int fd, size_t offset, size_t size) {
+ AMediaExtractor* extractor = AMediaExtractor_new();
+ media_status_t status = AMediaExtractor_setDataSourceFd(extractor, fd, offset, size);
+ if (status != AMEDIA_OK) return nullptr;
+
+ auto sampleReader = std::shared_ptr<MockSampleReader>(new MockSampleReader(extractor));
+ return sampleReader;
+ }
+
+ AMediaFormat* getFileFormat() override { return AMediaExtractor_getFileFormat(mExtractor); }
+
+ size_t getTrackCount() const override { return AMediaExtractor_getTrackCount(mExtractor); }
+
+ AMediaFormat* getTrackFormat(int trackIndex) override {
+ return AMediaExtractor_getTrackFormat(mExtractor, trackIndex);
+ }
+
+ media_status_t selectTrack(int trackIndex) override {
+ if (mSelectedTrack >= 0) return AMEDIA_ERROR_UNSUPPORTED;
+ mSelectedTrack = trackIndex;
+
+ media_status_t status = AMediaExtractor_selectTrack(mExtractor, trackIndex);
+ if (status != AMEDIA_OK) return status;
+
+ // Get the sample count.
+ AMediaFormat* format = getTrackFormat(trackIndex);
+ const bool haveSampleCount =
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_FRAME_COUNT, &mSampleCount);
+ AMediaFormat_delete(format);
+
+ if (!haveSampleCount) {
+ LOG(ERROR) << "No sample count in track format.";
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ // Buffer samples.
+ const int32_t targetBufferCount = 60;
+ std::unique_ptr<uint8_t[]> buffer;
+ MediaSampleInfo info;
+ while (true) {
+ info.presentationTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+ info.flags = AMediaExtractor_getSampleFlags(mExtractor);
+ info.size = AMediaExtractor_getSampleSize(mExtractor);
+
+ // Finish buffering after either reading all the samples in the track or after
+ // completing the GOP satisfying the target count.
+ if (mSamples.size() == mSampleCount ||
+ (mSamples.size() >= targetBufferCount && info.flags & SAMPLE_FLAG_SYNC_SAMPLE)) {
+ break;
+ }
+
+ buffer.reset(new uint8_t[info.size]);
+
+ ssize_t bytesRead = AMediaExtractor_readSampleData(mExtractor, buffer.get(), info.size);
+ if (bytesRead != info.size) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ mSamples.emplace_back(std::move(buffer), info);
+
+ AMediaExtractor_advance(mExtractor);
+ }
+
+ mFirstPtsUs = mSamples[0].second.presentationTimeUs;
+ mPtsDiff = mSamples[1].second.presentationTimeUs - mSamples[0].second.presentationTimeUs;
+
+ return AMEDIA_OK;
+ }
+
+ media_status_t setEnforceSequentialAccess(bool enforce __unused) override { return AMEDIA_OK; }
+
+ media_status_t getEstimatedBitrateForTrack(int trackIndex __unused,
+ int32_t* bitrate __unused) override {
+ return AMEDIA_ERROR_UNSUPPORTED;
+ }
+
+ media_status_t getSampleInfoForTrack(int trackIndex, MediaSampleInfo* info) override {
+ if (trackIndex != mSelectedTrack) return AMEDIA_ERROR_INVALID_PARAMETER;
+
+ if (mCurrentSampleIndex >= mSampleCount) {
+ info->presentationTimeUs = 0;
+ info->size = 0;
+ info->flags = SAMPLE_FLAG_END_OF_STREAM;
+ return AMEDIA_ERROR_END_OF_STREAM;
+ }
+
+ *info = mSamples[mCurrentSampleIndex % mSamples.size()].second;
+ info->presentationTimeUs = mFirstPtsUs + mCurrentSampleIndex * mPtsDiff;
+ return AMEDIA_OK;
+ }
+
+ media_status_t readSampleDataForTrack(int trackIndex, uint8_t* buffer,
+ size_t bufferSize) override {
+ if (trackIndex != mSelectedTrack) return AMEDIA_ERROR_INVALID_PARAMETER;
+
+ if (mCurrentSampleIndex >= mSampleCount) return AMEDIA_ERROR_END_OF_STREAM;
+
+ auto& p = mSamples[mCurrentSampleIndex % mSamples.size()];
+
+ if (bufferSize < p.second.size) return AMEDIA_ERROR_INVALID_PARAMETER;
+ memcpy(buffer, p.first.get(), p.second.size);
+
+ advanceTrack(trackIndex);
+ return AMEDIA_OK;
+ }
+
+ void advanceTrack(int trackIndex) {
+ if (trackIndex != mSelectedTrack) return;
+ ++mCurrentSampleIndex;
+ }
+
+ virtual ~MockSampleReader() override { AMediaExtractor_delete(mExtractor); }
+
+private:
+ MockSampleReader(AMediaExtractor* extractor) : mExtractor(extractor) {}
+ AMediaExtractor* mExtractor = nullptr;
+ int32_t mSampleCount = 0;
+ std::vector<std::pair<std::unique_ptr<uint8_t[]>, MediaSampleInfo>> mSamples;
+ int mSelectedTrack = -1;
+ int32_t mCurrentSampleIndex = 0;
+ int64_t mFirstPtsUs = 0;
+ int64_t mPtsDiff = 0;
+};
+
+static std::shared_ptr<AMediaFormat> GetDefaultTrackFormat(MediaType mediaType,
+ AMediaFormat* sourceFormat) {
+ // Default video config.
+ static constexpr int32_t kVideoBitRate = 20 * 1000 * 1000; // 20 mbps
+ static constexpr float kVideoFrameRate = 30.0f; // 30 fps
+
+ AMediaFormat* format = nullptr;
+
+ if (mediaType == kVideo) {
+ format = AMediaFormat_new();
+ AMediaFormat_copy(format, sourceFormat);
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, AMEDIA_MIMETYPE_VIDEO_AVC);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_BIT_RATE, kVideoBitRate);
+ AMediaFormat_setFloat(format, AMEDIAFORMAT_KEY_FRAME_RATE, kVideoFrameRate);
+ }
+ // nothing for audio.
+
+ return std::shared_ptr<AMediaFormat>(format, &AMediaFormat_delete);
+}
+
+/** Gets a MediaSampleReader for the source file */
+static std::shared_ptr<MediaSampleReader> GetSampleReader(const std::string& srcFileName,
+ bool mock) {
+ // Asset directory
+ static const std::string kAssetDirectory = "/data/local/tmp/TranscodingBenchmark/";
+
+ int srcFd = 0;
+ std::string srcPath = kAssetDirectory + srcFileName;
+
+ if ((srcFd = open(srcPath.c_str(), O_RDONLY)) < 0) {
+ return nullptr;
+ }
+
+ const size_t fileSize = lseek(srcFd, 0, SEEK_END);
+ lseek(srcFd, 0, SEEK_SET);
+
+ std::shared_ptr<MediaSampleReader> sampleReader;
+
+ if (mock) {
+ sampleReader = MockSampleReader::createFromFd(srcFd, 0 /* offset */, fileSize);
+ } else {
+ sampleReader = MediaSampleReaderNDK::createFromFd(srcFd, 0 /* offset */, fileSize);
+ }
+
+ if (srcFd > 0) close(srcFd);
+ return sampleReader;
+}
+
+/**
+ * Configures a MediaTrackTranscoder with an empty sample consumer so that the samples are returned
+ * to the transcoder immediately.
+ */
+static void ConfigureEmptySampleConsumer(const std::shared_ptr<MediaTrackTranscoder>& transcoder,
+ uint32_t& sampleCount) {
+ transcoder->setSampleConsumer([&sampleCount](const std::shared_ptr<MediaSample>& sample) {
+ if (!(sample->info.flags & SAMPLE_FLAG_CODEC_CONFIG) && sample->info.size > 0) {
+ ++sampleCount;
+ }
+ });
+}
+
+/**
+ * Callback to edit track format for transcoding.
+ * @param dstFormat The default track format for the track type.
+ */
+using TrackFormatEditCallback = std::function<void(AMediaFormat* dstFormat)>;
+
+/**
+ * Configures a MediaTrackTranscoder with the provided MediaSampleReader, reading from the first
+ * track that matches the specified media type.
+ */
+static bool ConfigureSampleReader(const std::shared_ptr<MediaTrackTranscoder>& transcoder,
+ const std::shared_ptr<MediaSampleReader>& sampleReader,
+ MediaType mediaType,
+ const TrackFormatEditCallback& formatEditor) {
+ int srcTrackIndex = -1;
+ std::shared_ptr<AMediaFormat> srcTrackFormat = nullptr;
+
+ for (int trackIndex = 0; trackIndex < sampleReader->getTrackCount(); ++trackIndex) {
+ AMediaFormat* trackFormat = sampleReader->getTrackFormat(trackIndex);
+
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if ((mediaType == kVideo && strncmp(mime, "video/", 6) == 0) ||
+ (mediaType == kAudio && strncmp(mime, "audio/", 6) == 0)) {
+ srcTrackIndex = trackIndex;
+ srcTrackFormat = std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete);
+ break;
+ }
+ AMediaFormat_delete(trackFormat);
+ }
+
+ if (srcTrackIndex == -1) {
+ LOG(ERROR) << "No matching source track found";
+ return false;
+ }
+
+ media_status_t status = sampleReader->selectTrack(srcTrackIndex);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "Unable to select track";
+ return false;
+ }
+
+ auto destinationFormat = GetDefaultTrackFormat(mediaType, srcTrackFormat.get());
+ if (formatEditor != nullptr) {
+ formatEditor(destinationFormat.get());
+ }
+ status = transcoder->configure(sampleReader, srcTrackIndex, destinationFormat);
+ if (status != AMEDIA_OK) {
+ LOG(ERROR) << "transcoder configure returned " << status;
+ return false;
+ }
+
+ return true;
+}
+
+static void BenchmarkTranscoder(benchmark::State& state, const std::string& srcFileName,
+ bool mockReader, MediaType mediaType,
+ const TrackFormatEditCallback& formatEditor = nullptr) {
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_once(&once, ABinderProcess_startThreadPool);
+
+ for (auto _ : state) {
+ std::shared_ptr<TrackTranscoderCallbacks> callbacks =
+ std::make_shared<TrackTranscoderCallbacks>();
+ std::shared_ptr<MediaTrackTranscoder> transcoder;
+
+ if (mediaType == kVideo) {
+ transcoder = VideoTrackTranscoder::create(callbacks);
+ } else {
+ transcoder = std::make_shared<PassthroughTrackTranscoder>(callbacks);
+ }
+
+ std::shared_ptr<MediaSampleReader> sampleReader = GetSampleReader(srcFileName, mockReader);
+ if (sampleReader == nullptr) {
+ state.SkipWithError("Unable to create sample reader");
+ return;
+ }
+
+ if (!ConfigureSampleReader(transcoder, sampleReader, mediaType, formatEditor)) {
+ state.SkipWithError("Unable to configure the transcoder");
+ return;
+ }
+
+ uint32_t sampleCount = 0;
+ ConfigureEmptySampleConsumer(transcoder, sampleCount);
+
+ if (!transcoder->start()) {
+ state.SkipWithError("Unable to start the transcoder");
+ return;
+ }
+
+ callbacks->waitForTranscodingFinished();
+ transcoder->stop();
+
+ if (callbacks->mStatus != AMEDIA_OK) {
+ state.SkipWithError("Transcoder failed with error");
+ return;
+ }
+
+ LOG(DEBUG) << "Number of samples received: " << sampleCount;
+ state.counters["FrameRate"] = benchmark::Counter(sampleCount, benchmark::Counter::kIsRate);
+ }
+}
+
+static void BenchmarkTranscoderWithOperatingRate(benchmark::State& state,
+ const std::string& srcFile, bool mockReader,
+ MediaType mediaType) {
+ TrackFormatEditCallback editor;
+ const int32_t operatingRate = state.range(0);
+ const int32_t priority = state.range(1);
+
+ if (operatingRate >= 0 && priority >= 0) {
+ editor = [operatingRate, priority](AMediaFormat* format) {
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_OPERATING_RATE, operatingRate);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_PRIORITY, priority);
+ };
+ }
+ BenchmarkTranscoder(state, srcFile, mockReader, mediaType, editor);
+}
+
+//-------------------------------- AVC to AVC Benchmarks -------------------------------------------
+
+static void BM_VideoTranscode_AVC2AVC(benchmark::State& state) {
+ const char* srcFile = "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4";
+ BenchmarkTranscoderWithOperatingRate(state, srcFile, false /* mockReader */, kVideo);
+}
+
+static void BM_VideoTranscode_AVC2AVC_NoExtractor(benchmark::State& state) {
+ const char* srcFile = "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4";
+ BenchmarkTranscoderWithOperatingRate(state, srcFile, true /* mockReader */, kVideo);
+}
+
+//-------------------------------- HEVC to AVC Benchmarks ------------------------------------------
+
+static void BM_VideoTranscode_HEVC2AVC(benchmark::State& state) {
+ const char* srcFile = "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac.mp4";
+ BenchmarkTranscoderWithOperatingRate(state, srcFile, false /* mockReader */, kVideo);
+}
+
+static void BM_VideoTranscode_HEVC2AVC_NoExtractor(benchmark::State& state) {
+ const char* srcFile = "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac.mp4";
+ BenchmarkTranscoderWithOperatingRate(state, srcFile, true /* mockReader */, kVideo);
+}
+
+//-------------------------------- Benchmark Registration ------------------------------------------
+
+// Benchmark registration wrapper for transcoding.
+#define TRANSCODER_BENCHMARK(func) \
+ BENCHMARK(func)->UseRealTime()->MeasureProcessCPUTime()->Unit(benchmark::kMillisecond)
+
+// Benchmark registration for testing different operating rate and priority combinations.
+#define TRANSCODER_OPERATING_RATE_BENCHMARK(func) \
+ TRANSCODER_BENCHMARK(func) \
+ ->Args({-1, -1}) /* <-- Use default */ \
+ ->Args({240, 0}) \
+ ->Args({INT32_MAX, 0}) \
+ ->Args({240, 1}) \
+ ->Args({INT32_MAX, 1})
+
+TRANSCODER_OPERATING_RATE_BENCHMARK(BM_VideoTranscode_AVC2AVC);
+TRANSCODER_OPERATING_RATE_BENCHMARK(BM_VideoTranscode_AVC2AVC_NoExtractor);
+
+TRANSCODER_OPERATING_RATE_BENCHMARK(BM_VideoTranscode_HEVC2AVC);
+TRANSCODER_OPERATING_RATE_BENCHMARK(BM_VideoTranscode_HEVC2AVC_NoExtractor);
+
+BENCHMARK_MAIN();
diff --git a/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
new file mode 100644
index 0000000..f985a28
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/benchmark/MediaTranscoderBenchmark.cpp
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Native media transcoder library benchmark tests.
+ *
+ * How to run the benchmark:
+ *
+ * 1. Download the media assets from http://go/transcodingbenchmark and push the directory
+ * ("TranscodingBenchmark") to /data/local/tmp.
+ *
+ * 2. Compile the benchmark and sync to device:
+ * $ mm -j72 && adb sync
+ *
+ * 3. Run:
+ * $ adb shell /data/nativetest64/MediaTranscoderBenchmark/MediaTranscoderBenchmark
+ */
+
+#include <benchmark/benchmark.h>
+#include <fcntl.h>
+#include <media/MediaTranscoder.h>
+
+using namespace android;
+
+class TranscoderCallbacks : public MediaTranscoder::CallbackInterface {
+public:
+ virtual void onFinished(const MediaTranscoder* transcoder __unused) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mFinished = true;
+ mCondition.notify_all();
+ }
+
+ virtual void onError(const MediaTranscoder* transcoder __unused,
+ media_status_t error) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mFinished = true;
+ mStatus = error;
+ mCondition.notify_all();
+ }
+
+ virtual void onProgressUpdate(const MediaTranscoder* transcoder __unused,
+ int32_t progress __unused) override {}
+
+ virtual void onCodecResourceLost(const MediaTranscoder* transcoder __unused,
+ const std::shared_ptr<const Parcel>& pausedState
+ __unused) override {}
+
+ bool waitForTranscodingFinished() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mFinished) {
+ if (mCondition.wait_for(lock, std::chrono::minutes(5)) == std::cv_status::timeout) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ media_status_t mStatus = AMEDIA_OK;
+
+private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mFinished = false;
+};
+
+static AMediaFormat* CreateDefaultVideoFormat() {
+ // Default bitrate
+ static constexpr int32_t kVideoBitRate = 20 * 1000 * 1000; // 20Mbs
+
+ AMediaFormat* videoFormat = AMediaFormat_new();
+ AMediaFormat_setInt32(videoFormat, AMEDIAFORMAT_KEY_BIT_RATE, kVideoBitRate);
+ return videoFormat;
+}
+
+/**
+ * Callback to configure tracks for transcoding.
+ * @param mime The source track mime type.
+ * @param dstFormat The destination format if the track should be transcoded or nullptr if the track
+ * should be passed through.
+ * @return True if the track should be included in the output file.
+ */
+using TrackSelectionCallback = std::function<bool(const char* mime, AMediaFormat** dstFormat)>;
+
+static void TranscodeMediaFile(benchmark::State& state, const std::string& srcFileName,
+ const std::string& dstFileName,
+ TrackSelectionCallback trackSelectionCallback) {
+ // Write-only, create file if non-existent.
+ static constexpr int kDstOpenFlags = O_WRONLY | O_CREAT;
+ // User R+W permission.
+ static constexpr int kDstFileMode = S_IRUSR | S_IWUSR;
+ // Asset directory
+ static const std::string kAssetDirectory = "/data/local/tmp/TranscodingBenchmark/";
+
+ int srcFd = 0;
+ int dstFd = 0;
+
+ std::string srcPath = kAssetDirectory + srcFileName;
+ std::string dstPath = kAssetDirectory + dstFileName;
+
+ auto callbacks = std::make_shared<TranscoderCallbacks>();
+ media_status_t status = AMEDIA_OK;
+
+ if ((srcFd = open(srcPath.c_str(), O_RDONLY)) < 0) {
+ state.SkipWithError("Unable to open source file");
+ goto exit;
+ }
+ if ((dstFd = open(dstPath.c_str(), kDstOpenFlags, kDstFileMode)) < 0) {
+ state.SkipWithError("Unable to open destination file");
+ goto exit;
+ }
+
+ for (auto _ : state) {
+ auto transcoder = MediaTranscoder::create(callbacks, nullptr);
+
+ status = transcoder->configureSource(srcFd);
+ if (status != AMEDIA_OK) {
+ state.SkipWithError("Unable to configure transcoder source");
+ goto exit;
+ }
+
+ status = transcoder->configureDestination(dstFd);
+ if (status != AMEDIA_OK) {
+ state.SkipWithError("Unable to configure transcoder destination");
+ goto exit;
+ }
+
+ std::vector<std::shared_ptr<AMediaFormat>> trackFormats = transcoder->getTrackFormats();
+ for (int i = 0; i < trackFormats.size(); ++i) {
+ AMediaFormat* srcFormat = trackFormats[i].get();
+ AMediaFormat* dstFormat = nullptr;
+
+ const char* mime = nullptr;
+ if (!AMediaFormat_getString(srcFormat, AMEDIAFORMAT_KEY_MIME, &mime)) {
+ state.SkipWithError("Source track format does not have MIME type");
+ goto exit;
+ }
+
+ if (strncmp(mime, "video/", 6) == 0) {
+ int32_t frameCount;
+ if (AMediaFormat_getInt32(srcFormat, AMEDIAFORMAT_KEY_FRAME_COUNT, &frameCount)) {
+ state.counters["VideoFrameRate"] =
+ benchmark::Counter(frameCount, benchmark::Counter::kIsRate);
+ }
+ }
+
+ if (trackSelectionCallback(mime, &dstFormat)) {
+ status = transcoder->configureTrackFormat(i, dstFormat);
+ }
+
+ if (dstFormat != nullptr) {
+ AMediaFormat_delete(dstFormat);
+ }
+ if (status != AMEDIA_OK) {
+ state.SkipWithError("Unable to configure track");
+ goto exit;
+ }
+ }
+
+ status = transcoder->start();
+ if (status != AMEDIA_OK) {
+ state.SkipWithError("Unable to start transcoder");
+ goto exit;
+ }
+
+ if (!callbacks->waitForTranscodingFinished()) {
+ transcoder->cancel();
+ state.SkipWithError("Transcoder timed out");
+ goto exit;
+ }
+ if (callbacks->mStatus != AMEDIA_OK) {
+ state.SkipWithError("Transcoder error when running");
+ goto exit;
+ }
+ }
+
+exit:
+ if (srcFd > 0) close(srcFd);
+ if (dstFd > 0) close(dstFd);
+}
+
+/**
+ * Callback to edit track format for transcoding.
+ * @param dstFormat The default track format for the track type.
+ */
+using TrackFormatEditCallback = std::function<void(AMediaFormat* dstFormat)>;
+
+static void TranscodeMediaFile(benchmark::State& state, const std::string& srcFileName,
+ const std::string& dstFileName, bool includeAudio,
+ bool transcodeVideo,
+ const TrackFormatEditCallback& videoFormatEditor = nullptr) {
+ TranscodeMediaFile(state, srcFileName, dstFileName,
+ [=](const char* mime, AMediaFormat** dstFormatOut) -> bool {
+ *dstFormatOut = nullptr;
+ if (strncmp(mime, "video/", 6) == 0 && transcodeVideo) {
+ *dstFormatOut = CreateDefaultVideoFormat();
+ if (videoFormatEditor != nullptr) {
+ videoFormatEditor(*dstFormatOut);
+ }
+ } else if (strncmp(mime, "audio/", 6) == 0 && !includeAudio) {
+ return false;
+ }
+ return true;
+ });
+}
+
+static void SetMaxOperatingRate(AMediaFormat* format) {
+ AMediaFormat_setFloat(format, AMEDIAFORMAT_KEY_OPERATING_RATE, INT32_MAX);
+ AMediaFormat_setInt32(format, AMEDIAFORMAT_KEY_PRIORITY, 1);
+}
+
+//-------------------------------- AVC to AVC Benchmarks -------------------------------------------
+
+static void BM_TranscodeAvc2AvcAudioVideo2AudioVideo(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_aac_transcoded_AV.mp4",
+ true /* includeAudio */, true /* transcodeVideo */);
+}
+
+static void BM_TranscodeAvc2AvcVideo2Video(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_transcoded_V.mp4",
+ false /* includeAudio */, true /* transcodeVideo */);
+}
+
+static void BM_TranscodeAvc2AvcAV2AVMaxOperatingRate(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_aac_transcoded_AV.mp4",
+ true /* includeAudio */, true /* transcodeVideo */, SetMaxOperatingRate);
+}
+
+static void BM_TranscodeAvc2AvcV2VMaxOperatingRate(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_transcoded_V.mp4",
+ false /* includeAudio */, true /* transcodeVideo */, SetMaxOperatingRate);
+}
+
+//-------------------------------- HEVC to AVC Benchmarks ------------------------------------------
+
+static void BM_TranscodeHevc2AvcAudioVideo2AudioVideo(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac.mp4",
+ "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac_transcoded_AV.mp4",
+ true /* includeAudio */, true /* transcodeVideo */);
+}
+
+static void BM_TranscodeHevc2AvcVideo2Video(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3863frame_hevc_4Mbps_30fps.mp4",
+ "video_1920x1080_3863frame_hevc_4Mbps_30fps_transcoded_V.mp4",
+ false /* includeAudio */, true /* transcodeVideo */);
+}
+
+static void BM_TranscodeHevc2AvcAV2AVMaxOperatingRate(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac.mp4",
+ "video_1920x1080_3863frame_hevc_4Mbps_30fps_aac_transcoded_AV.mp4",
+ true /* includeAudio */, true /* transcodeVideo */, SetMaxOperatingRate);
+}
+
+static void BM_TranscodeHevc2AvcV2VMaxOperatingRate(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3863frame_hevc_4Mbps_30fps.mp4",
+ "video_1920x1080_3863frame_hevc_4Mbps_30fps_transcoded_V.mp4",
+ false /* includeAudio */, true /* transcodeVideo */, SetMaxOperatingRate);
+}
+
+//-------------------------------- Passthrough Benchmarks ------------------------------------------
+
+static void BM_TranscodeAudioVideoPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps_aac.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_aac_passthrough_AV.mp4",
+ true /* includeAudio */, false /* transcodeVideo */);
+}
+static void BM_TranscodeVideoPassthrough(benchmark::State& state) {
+ TranscodeMediaFile(state, "video_1920x1080_3648frame_h264_22Mbps_30fps.mp4",
+ "video_1920x1080_3648frame_h264_22Mbps_30fps_passthrough_AV.mp4",
+ false /* includeAudio */, false /* transcodeVideo */);
+}
+
+//-------------------------------- Benchmark Registration ------------------------------------------
+
+// Benchmark registration wrapper for transcoding.
+#define TRANSCODER_BENCHMARK(func) \
+ BENCHMARK(func)->UseRealTime()->MeasureProcessCPUTime()->Unit(benchmark::kMillisecond)
+
+TRANSCODER_BENCHMARK(BM_TranscodeAvc2AvcAudioVideo2AudioVideo);
+TRANSCODER_BENCHMARK(BM_TranscodeAvc2AvcVideo2Video);
+TRANSCODER_BENCHMARK(BM_TranscodeAvc2AvcAV2AVMaxOperatingRate);
+TRANSCODER_BENCHMARK(BM_TranscodeAvc2AvcV2VMaxOperatingRate);
+
+TRANSCODER_BENCHMARK(BM_TranscodeHevc2AvcAudioVideo2AudioVideo);
+TRANSCODER_BENCHMARK(BM_TranscodeHevc2AvcVideo2Video);
+TRANSCODER_BENCHMARK(BM_TranscodeHevc2AvcAV2AVMaxOperatingRate);
+TRANSCODER_BENCHMARK(BM_TranscodeHevc2AvcV2VMaxOperatingRate);
+
+TRANSCODER_BENCHMARK(BM_TranscodeAudioVideoPassthrough);
+TRANSCODER_BENCHMARK(BM_TranscodeVideoPassthrough);
+
+BENCHMARK_MAIN();
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSample.h b/media/libmediatranscoding/transcoder/include/media/MediaSample.h
new file mode 100644
index 0000000..8a239a6
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSample.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SAMPLE_H
+#define ANDROID_MEDIA_SAMPLE_H
+
+#include <cstdint>
+#include <functional>
+#include <memory>
+
+namespace android {
+
+/**
+ * Media sample flags.
+ * These flags purposely match the media NDK's buffer and extractor flags with one exception. The
+ * NDK extractor's flag for encrypted samples (AMEDIAEXTRACTOR_SAMPLE_FLAG_ENCRYPTED) is equal to 2,
+ * i.e. the same as SAMPLE_FLAG_CODEC_CONFIG below and NDK's AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG.
+ * Sample producers based on the NDK's extractor is responsible for catching those values.
+ * Note that currently the media transcoder does not support encrypted samples.
+ */
+enum : uint32_t {
+ SAMPLE_FLAG_SYNC_SAMPLE = 1,
+ SAMPLE_FLAG_CODEC_CONFIG = 2,
+ SAMPLE_FLAG_END_OF_STREAM = 4,
+ SAMPLE_FLAG_PARTIAL_FRAME = 8,
+};
+
+/**
+ * MediaSampleInfo is an object that carries information about a compressed media sample without
+ * holding any sample data.
+ */
+struct MediaSampleInfo {
+ /** The sample's presentation timestamp in microseconds. */
+ int64_t presentationTimeUs = 0;
+
+ /** The size of the compressed sample data in bytes. */
+ size_t size = 0;
+
+ /** Sample flags. */
+ uint32_t flags = 0;
+};
+
+/**
+ * MediaSample holds a compressed media sample in memory.
+ */
+struct MediaSample {
+ /**
+ * Callback to notify that a media sample is about to be released, giving the creator a chance
+ * to reclaim the data buffer backing the sample. Once this callback returns, the media sample
+ * instance *will* be released so it cannot be used outside of the callback. To enable the
+ * callback, create the media sample with {@link #createWithReleaseCallback}.
+ * @param sample The sample to be released.
+ */
+ using OnSampleReleasedCallback = std::function<void(MediaSample* sample)>;
+
+ /**
+ * Creates a new media sample instance with a registered release callback. The release callback
+ * will get called right before the media sample is released giving the creator a chance to
+ * reclaim the buffer.
+ * @param buffer Byte buffer containing the sample's compressed data.
+ * @param dataOffset Offset, in bytes, to the sample's compressed data inside the buffer.
+ * @param bufferId Buffer identifier that can be used to identify the buffer on release.
+ * @param releaseCallback The sample release callback.
+ * @return A new media sample instance.
+ */
+ static std::shared_ptr<MediaSample> createWithReleaseCallback(
+ uint8_t* buffer, size_t dataOffset, uint32_t bufferId,
+ OnSampleReleasedCallback releaseCallback) {
+ MediaSample* sample = new MediaSample(buffer, dataOffset, bufferId, releaseCallback);
+ return std::shared_ptr<MediaSample>(
+ sample, std::bind(&MediaSample::releaseSample, std::placeholders::_1));
+ }
+
+ /**
+ * Byte buffer containing the sample's compressed data. The media sample instance does not take
+ * ownership of the buffer and will not automatically release the memory, but the caller can
+ * register a release callback by creating the media sample with
+ * {@link #createWithReleaseCallback}.
+ */
+ const uint8_t* buffer = nullptr;
+
+ /** Offset, in bytes, to the sample's compressed data inside the buffer. */
+ size_t dataOffset = 0;
+
+ /**
+ * Buffer identifier. This identifier is likely only meaningful to the sample data producer and
+ * can be used for reclaiming the buffer once a consumer is done processing it.
+ */
+ uint32_t bufferId = 0xBAADF00D;
+
+ /** Media sample information. */
+ MediaSampleInfo info;
+
+ MediaSample() = default;
+
+private:
+ MediaSample(uint8_t* buffer, size_t dataOffset, uint32_t bufferId,
+ OnSampleReleasedCallback releaseCallback)
+ : buffer(buffer),
+ dataOffset(dataOffset),
+ bufferId(bufferId),
+ mReleaseCallback(releaseCallback){};
+
+ static void releaseSample(MediaSample* sample) {
+ if (sample->mReleaseCallback != nullptr) {
+ sample->mReleaseCallback(sample);
+ }
+ delete sample;
+ }
+
+ // Do not allow copying to prevent dangling pointers in the copied object after the original is
+ // released.
+ MediaSample(const MediaSample&) = delete;
+ MediaSample& operator=(const MediaSample&) = delete;
+
+ const OnSampleReleasedCallback mReleaseCallback = nullptr;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_SAMPLE_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleQueue.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleQueue.h
new file mode 100644
index 0000000..c6cf1a4
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleQueue.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SAMPLE_QUEUE_H
+#define ANDROID_MEDIA_SAMPLE_QUEUE_H
+
+#include <media/MediaSample.h>
+#include <utils/Mutex.h>
+
+#include <memory>
+#include <mutex>
+#include <queue>
+
+namespace android {
+
+/**
+ * MediaSampleQueue asynchronously connects a producer and a consumer of media samples.
+ * Media samples flows through the queue in FIFO order. If the queue is empty the consumer will be
+ * blocked until a new media sample is added or until the producer aborts the queue operation.
+ */
+class MediaSampleQueue {
+public:
+ /**
+ * Enqueues a media sample at the end of the queue and notifies potentially waiting consumers.
+ * If the queue has previously been aborted this method does nothing.
+ * @param sample The media sample to enqueue.
+ * @return True if the queue has been aborted.
+ */
+ bool enqueue(const std::shared_ptr<MediaSample>& sample);
+
+ /**
+ * Removes the next media sample from the queue and returns it. If the queue has previously been
+ * aborted this method returns null. Note that this method will block while the queue is empty.
+ * @param[out] sample The next media sample in the queue.
+ * @return True if the queue has been aborted.
+ */
+ bool dequeue(std::shared_ptr<MediaSample>* sample /* nonnull */);
+
+ /**
+ * Checks if the queue currently holds any media samples.
+ * @return True if the queue is empty or has been aborted. False otherwise.
+ */
+ bool isEmpty();
+
+ /**
+ * Aborts the queue operation. This clears the queue and notifies waiting consumers. After the
+ * has been aborted it is not possible to enqueue more samples, and dequeue will return null.
+ */
+ void abort();
+
+private:
+ std::queue<std::shared_ptr<MediaSample>> mSampleQueue GUARDED_BY(mMutex);
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mAborted GUARDED_BY(mMutex) = false;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_SAMPLE_QUEUE_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h
new file mode 100644
index 0000000..7b6fbef
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleReader.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SAMPLE_READER_H
+#define ANDROID_MEDIA_SAMPLE_READER_H
+
+#include <media/MediaSample.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaFormat.h>
+
+namespace android {
+
+/**
+ * MediaSampleReader is an interface for reading media samples from a container. MediaSampleReader
+ * allows for reading samples from multiple tracks on individual threads independently of each other
+ * while preserving the order of samples. Due to poor non-sequential access performance of the
+ * underlying extractor, MediaSampleReader can optionally enforce sequential sample access by
+ * blocking requests for tracks that the underlying extractor does not currently point to. Waiting
+ * threads are serviced once the reader advances to a sample from the specified track. Due to this
+ * it is important to read samples and advance the reader from all selected tracks to avoid hanging
+ * other tracks. MediaSampleReader implementations are thread safe and sample access should be done
+ * on one thread per selected track.
+ */
+class MediaSampleReader {
+public:
+ /**
+ * Returns the file format of the media container as a AMediaFormat.
+ * The caller is responsible for releasing the format when finished with it using
+ * AMediaFormat_delete().
+ * @return The file media format.
+ */
+ virtual AMediaFormat* getFileFormat() = 0;
+
+ /**
+ * Returns the number of tracks in the media container.
+ * @return The number of tracks.
+ */
+ virtual size_t getTrackCount() const = 0;
+
+ /**
+ * Returns the media format of a specific track as a AMediaFormat.
+ * The caller is responsible for releasing the format when finished with it using
+ * AMediaFormat_delete().
+ * @param trackIndex The track index (zero-based).
+ * @return The track media format.
+ */
+ virtual AMediaFormat* getTrackFormat(int trackIndex) = 0;
+
+ /**
+ * Select a track for sample access. Tracks must be selected in order for sample information and
+ * sample data to be available for that track. Samples for selected tracks must be accessed on
+ * its own thread to avoid blocking other tracks.
+ * @param trackIndex The track to select.
+ * @return AMEDIA_OK on success.
+ */
+ virtual media_status_t selectTrack(int trackIndex) = 0;
+
+ /**
+ * Toggles sequential access enforcement on or off. When the reader enforces sequential access
+ * calls to read sample information will block unless the underlying extractor points to the
+ * specified track.
+ * @param enforce True to enforce sequential access.
+ * @return AMEDIA_OK on success.
+ */
+ virtual media_status_t setEnforceSequentialAccess(bool enforce) = 0;
+
+ /**
+ * Estimates the bitrate of a source track by sampling sample sizes. The bitrate is returned in
+ * megabits per second (Mbps). This method will fail if the track only contains a single sample
+ * and does not have an associated duration.
+ * @param trackIndex The source track index.
+ * @param bitrate Output param for the bitrate.
+ * @return AMEDIA_OK on success.
+ */
+ virtual media_status_t getEstimatedBitrateForTrack(int trackIndex, int32_t* bitrate);
+
+ /**
+ * Returns the sample information for the current sample in the specified track. Note that this
+ * method will block until the reader advances to a sample belonging to the requested track if
+ * the reader is in sequential access mode.
+ * @param trackIndex The track index (zero-based).
+ * @param info Pointer to a MediaSampleInfo object where the sample information is written.
+ * @return AMEDIA_OK on success, AMEDIA_ERROR_END_OF_STREAM if there are no more samples to read
+ * from the track and AMEDIA_ERROR_INVALID_PARAMETER if trackIndex is out of bounds or the
+ * info pointer is NULL. Other AMEDIA_ERROR_* return values may not be recoverable.
+ */
+ virtual media_status_t getSampleInfoForTrack(int trackIndex, MediaSampleInfo* info) = 0;
+
+ /**
+ * Returns the sample data for the current sample in the specified track into the supplied
+ * buffer. Note that this method will block until the reader advances to a sample belonging to
+ * the requested track if the reader is in sequential access mode. Upon successful return this
+ * method will also advance the specified track to the next sample.
+ * @param trackIndex The track index (zero-based).
+ * @param buffer The buffer to write the sample's data to.
+ * @param bufferSize The size of the supplied buffer.
+ * @return AMEDIA_OK on success, AMEDIA_ERROR_END_OF_STREAM if there are no more samples to read
+ * from the track and AMEDIA_ERROR_INVALID_PARAMETER if trackIndex is out of bounds, if the
+ * buffer pointer is NULL or if bufferSize is too small for the sample. Other AMEDIA_ERROR_*
+ * return values may not be recoverable.
+ */
+ virtual media_status_t readSampleDataForTrack(int trackIndex, uint8_t* buffer,
+ size_t bufferSize) = 0;
+
+ /**
+ * Advance the specified track to the next sample. If the reader is in sequential access mode
+ * and the current sample belongs to the specified track, the reader will also advance to the
+ * next sample and wake up any threads waiting on the new track.
+ * @param trackIndex The track index (zero-based).
+ */
+ virtual void advanceTrack(int trackIndex) = 0;
+
+ /** Destructor. */
+ virtual ~MediaSampleReader() = default;
+
+ /** Constructor. */
+ MediaSampleReader() = default;
+
+private:
+ MediaSampleReader(const MediaSampleReader&) = delete;
+ MediaSampleReader& operator=(const MediaSampleReader&) = delete;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_SAMPLE_READER_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h
new file mode 100644
index 0000000..2032def
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleReaderNDK.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SAMPLE_READER_NDK_H
+#define ANDROID_MEDIA_SAMPLE_READER_NDK_H
+
+#include <media/MediaSampleReader.h>
+#include <media/NdkMediaExtractor.h>
+
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+namespace android {
+
+/**
+ * MediaSampleReaderNDK is a concrete implementation of the MediaSampleReader interface based on the
+ * media NDK extractor.
+ */
+class MediaSampleReaderNDK : public MediaSampleReader {
+public:
+ /**
+ * Creates a new MediaSampleReaderNDK instance wrapped in a shared pointer.
+ * @param fd Source file descriptor. The caller is responsible for closing the fd and it is safe
+ * to do so when this method returns.
+ * @param offset Source data offset.
+ * @param size Source data size.
+ * @return A shared pointer referencing the new MediaSampleReaderNDK instance on success, or an
+ * empty shared pointer if an error occurred.
+ */
+ static std::shared_ptr<MediaSampleReader> createFromFd(int fd, size_t offset, size_t size);
+
+ AMediaFormat* getFileFormat() override;
+ size_t getTrackCount() const override;
+ AMediaFormat* getTrackFormat(int trackIndex) override;
+ media_status_t selectTrack(int trackIndex) override;
+ media_status_t setEnforceSequentialAccess(bool enforce) override;
+ media_status_t getEstimatedBitrateForTrack(int trackIndex, int32_t* bitrate) override;
+ media_status_t getSampleInfoForTrack(int trackIndex, MediaSampleInfo* info) override;
+ media_status_t readSampleDataForTrack(int trackIndex, uint8_t* buffer,
+ size_t bufferSize) override;
+ void advanceTrack(int trackIndex) override;
+
+ virtual ~MediaSampleReaderNDK() override;
+
+private:
+ /**
+ * SamplePosition describes the position of a single sample in the media file using its
+ * timestamp and index in the file.
+ */
+ struct SamplePosition {
+ uint64_t index = 0;
+ int64_t timeStampUs = 0;
+ bool isSet = false;
+
+ void set(uint64_t sampleIndex, int64_t sampleTimeUs) {
+ index = sampleIndex;
+ timeStampUs = sampleTimeUs;
+ isSet = true;
+ }
+
+ void reset() { isSet = false; }
+ };
+
+ /**
+ * SampleCursor keeps track of the sample position for a specific track. When the track is
+ * advanced, previous is set to current, current to next and next is reset. As the extractor
+ * advances over the combined timeline of tracks, it updates current and next for the track it
+ * points to if they are not already set.
+ */
+ struct SampleCursor {
+ SamplePosition previous;
+ SamplePosition current;
+ SamplePosition next;
+ };
+
+ /**
+ * Creates a new MediaSampleReaderNDK object from an AMediaExtractor. The extractor needs to be
+ * initialized with a valid data source before attempting to create a MediaSampleReaderNDK.
+ * @param extractor The initialized media extractor.
+ */
+ MediaSampleReaderNDK(AMediaExtractor* extractor);
+
+ /** Advances the track to next sample. */
+ void advanceTrack_l(int trackIndex);
+
+ /** Advances the extractor to next sample. */
+ bool advanceExtractor_l();
+
+ /** Moves the extractor backwards to the specified sample. */
+ media_status_t seekExtractorBackwards_l(int64_t targetTimeUs, int targetTrackIndex,
+ uint64_t targetSampleIndex);
+
+ /** Moves the extractor to the specified sample. */
+ media_status_t moveToSample_l(SamplePosition& pos, int trackIndex);
+
+ /** Moves the extractor to the next sample of the specified track. */
+ media_status_t moveToTrack_l(int trackIndex);
+
+ /** In sequential mode, waits for the extractor to reach the next sample for the track. */
+ media_status_t waitForTrack_l(int trackIndex, std::unique_lock<std::mutex>& lockHeld);
+
+ /**
+ * Ensures the extractor is ready for the next sample of the track regardless of access mode.
+ */
+ media_status_t primeExtractorForTrack_l(int trackIndex, std::unique_lock<std::mutex>& lockHeld);
+
+ AMediaExtractor* mExtractor = nullptr;
+ std::mutex mExtractorMutex;
+ const size_t mTrackCount;
+
+ int mExtractorTrackIndex = -1;
+ uint64_t mExtractorSampleIndex = 0;
+
+ bool mEosReached = false;
+ bool mEnforceSequentialAccess = false;
+
+ // Maps selected track indices to condition variables for sequential sample access control.
+ std::map<int, std::condition_variable> mTrackSignals;
+
+ // Samples cursor for each track in the file.
+ std::vector<SampleCursor> mTrackCursors;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_SAMPLE_READER_NDK_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h b/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h
new file mode 100644
index 0000000..f762556
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaSampleWriter.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SAMPLE_WRITER_H
+#define ANDROID_MEDIA_SAMPLE_WRITER_H
+
+#include <media/MediaSample.h>
+#include <media/NdkMediaCodec.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaFormat.h>
+#include <utils/Mutex.h>
+
+#include <condition_variable>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <thread>
+#include <unordered_map>
+
+namespace android {
+
+/**
+ * Muxer interface used by MediaSampleWriter.
+ * Methods in this interface are guaranteed to be called sequentially by MediaSampleWriter.
+ */
+class MediaSampleWriterMuxerInterface {
+public:
+ /**
+ * Adds a new track to the muxer.
+ * @param trackFormat Format of the new track.
+ * @return A non-negative track index on success, or a negative number on failure.
+ */
+ virtual ssize_t addTrack(AMediaFormat* trackFormat) = 0;
+
+ /** Starts the muxer. */
+ virtual media_status_t start() = 0;
+ /**
+ * Writes sample data to a previously added track.
+ * @param trackIndex Index of the track the sample data belongs to.
+ * @param data The sample data.
+ * @param info The sample information.
+ * @return The number of bytes written.
+ */
+ virtual media_status_t writeSampleData(size_t trackIndex, const uint8_t* data,
+ const AMediaCodecBufferInfo* info) = 0;
+
+ /** Stops the muxer. */
+ virtual media_status_t stop() = 0;
+ virtual ~MediaSampleWriterMuxerInterface() = default;
+};
+
+/**
+ * MediaSampleWriter is a wrapper around a muxer. The sample writer puts samples on a queue that
+ * is serviced by an internal thread to minimize blocking time for clients. MediaSampleWriter also
+ * provides progress reporting. The default muxer interface implementation is based
+ * directly on AMediaMuxer.
+ */
+class MediaSampleWriter : public std::enable_shared_from_this<MediaSampleWriter> {
+public:
+ /** Function prototype for delivering media samples to the writer. */
+ using MediaSampleConsumerFunction =
+ std::function<void(const std::shared_ptr<MediaSample>& sample)>;
+
+ /** Callback interface. */
+ class CallbackInterface {
+ public:
+ /**
+ * Sample writer finished. The finished callback is only called after the sample writer has
+ * been successfully started.
+ */
+ virtual void onFinished(const MediaSampleWriter* writer, media_status_t status) = 0;
+
+ /** Sample writer progress update in percent. */
+ virtual void onProgressUpdate(const MediaSampleWriter* writer, int32_t progress) = 0;
+
+ virtual ~CallbackInterface() = default;
+ };
+
+ static std::shared_ptr<MediaSampleWriter> Create();
+
+ /**
+ * Initializes the sample writer with its default muxer implementation. MediaSampleWriter needs
+ * to be initialized before tracks are added and can only be initialized once.
+ * @param fd An open file descriptor to write to. The caller is responsible for closing this
+ * file descriptor and it is safe to do so once this method returns.
+ * @param callbacks Client callback object that gets called by the sample writer.
+ * @return True if the writer was successfully initialized.
+ */
+ bool init(int fd, const std::weak_ptr<CallbackInterface>& callbacks /* nonnull */);
+
+ /**
+ * Initializes the sample writer with a custom muxer interface implementation.
+ * @param muxer The custom muxer interface implementation.
+ * @param @param callbacks Client callback object that gets called by the sample writer.
+ * @return True if the writer was successfully initialized.
+ */
+ bool init(const std::shared_ptr<MediaSampleWriterMuxerInterface>& muxer /* nonnull */,
+ const std::weak_ptr<CallbackInterface>& callbacks /* nonnull */);
+
+ /**
+ * Adds a new track to the sample writer. Tracks must be added after the sample writer has been
+ * initialized and before it is started.
+ * @param trackFormat The format of the track to add.
+ * @return A sample consumer to add samples to if the track was successfully added, or nullptr
+ * if the track could not be added.
+ */
+ MediaSampleConsumerFunction addTrack(
+ const std::shared_ptr<AMediaFormat>& trackFormat /* nonnull */);
+
+ /**
+ * Starts the sample writer. The sample writer will start processing samples and writing them to
+ * its muxer on an internal thread. MediaSampleWriter can only be started once.
+ * @return True if the sample writer was successfully started.
+ */
+ bool start();
+
+ /**
+ * Stops the sample writer. If the sample writer is not yet finished its operation will be
+ * aborted and an error value will be returned to the client in the callback supplied to
+ * {@link #start}. If the sample writer has already finished and the client callback has fired
+ * the writer has already automatically stopped and there is no need to call stop manually. Once
+ * the sample writer has been stopped it cannot be restarted.
+ * @return True if the sample writer was successfully stopped on this call. False if the sample
+ * writer was already stopped or was never started.
+ */
+ bool stop();
+
+ /** Destructor. */
+ ~MediaSampleWriter();
+
+private:
+ struct TrackRecord {
+ TrackRecord(int64_t durationUs)
+ : mDurationUs(durationUs),
+ mFirstSampleTimeUs(0),
+ mPrevSampleTimeUs(INT64_MIN),
+ mFirstSampleTimeSet(false),
+ mReachedEos(false){};
+
+ TrackRecord() : TrackRecord(0){};
+
+ int64_t mDurationUs;
+ int64_t mFirstSampleTimeUs;
+ int64_t mPrevSampleTimeUs;
+ bool mFirstSampleTimeSet;
+ bool mReachedEos;
+ };
+
+ // Track index and sample.
+ using SampleEntry = std::pair<size_t, std::shared_ptr<MediaSample>>;
+
+ struct SampleComparator {
+ // Return true if lhs should come after rhs in the sample queue.
+ bool operator()(const SampleEntry& lhs, const SampleEntry& rhs) {
+ const bool lhsEos = lhs.second->info.flags & SAMPLE_FLAG_END_OF_STREAM;
+ const bool rhsEos = rhs.second->info.flags & SAMPLE_FLAG_END_OF_STREAM;
+
+ if (lhsEos && !rhsEos) {
+ return true;
+ } else if (!lhsEos && rhsEos) {
+ return false;
+ } else if (lhsEos && rhsEos) {
+ return lhs.first > rhs.first;
+ }
+
+ return lhs.second->info.presentationTimeUs > rhs.second->info.presentationTimeUs;
+ }
+ };
+
+ std::weak_ptr<CallbackInterface> mCallbacks;
+ std::shared_ptr<MediaSampleWriterMuxerInterface> mMuxer;
+
+ std::mutex mMutex; // Protects sample queue and state.
+ std::condition_variable mSampleSignal;
+ std::thread mThread;
+ std::unordered_map<size_t, TrackRecord> mTracks;
+ std::priority_queue<SampleEntry, std::vector<SampleEntry>, SampleComparator> mSampleQueue
+ GUARDED_BY(mMutex);
+
+ enum : int {
+ UNINITIALIZED,
+ INITIALIZED,
+ STARTED,
+ STOPPED,
+ } mState GUARDED_BY(mMutex);
+
+ MediaSampleWriter() : mState(UNINITIALIZED){};
+ void addSampleToTrack(size_t trackIndex, const std::shared_ptr<MediaSample>& sample);
+ media_status_t writeSamples();
+ media_status_t runWriterLoop();
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_SAMPLE_WRITER_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h
new file mode 100644
index 0000000..c5e161c
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoder.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRACK_TRANSCODER_H
+#define ANDROID_MEDIA_TRACK_TRANSCODER_H
+
+#include <media/MediaSampleQueue.h>
+#include <media/MediaSampleReader.h>
+#include <media/MediaSampleWriter.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaFormat.h>
+#include <utils/Mutex.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+namespace android {
+
+class MediaTrackTranscoderCallback;
+
+/**
+ * Base class for all track transcoders. MediaTrackTranscoder operates asynchronously on an internal
+ * thread and communicates through a MediaTrackTranscoderCallback instance. Transcoded samples are
+ * enqueued on the MediaTrackTranscoder's output queue. Samples need to be dequeued from the output
+ * queue or the transcoder will run out of buffers and stall. Once the consumer is done with a
+ * transcoded sample it is the consumer's responsibility to as soon as possible release all
+ * references to that sample in order to return the buffer to the transcoder. MediaTrackTranscoder
+ * is an abstract class and instances are created through one of the concrete subclasses.
+ *
+ * The base class MediaTrackTranscoder is responsible for thread and state management and guarantees
+ * that operations {configure, start, stop} are sent to the derived class in correct order.
+ * MediaTrackTranscoder is also responsible for delivering callback notifications once the
+ * transcoder has been successfully started.
+ */
+class MediaTrackTranscoder {
+public:
+ /**
+ * Configures the track transcoder with an input MediaSampleReader and a destination format.
+ * A track transcoder have to be configured before it is started.
+ * @param mediaSampleReader The MediaSampleReader to read input samples from.
+ * @param trackIndex The index of the track to transcode in mediaSampleReader.
+ * @param destinationFormat The destination format.
+ * @return AMEDIA_OK if the track transcoder was successfully configured.
+ */
+ media_status_t configure(const std::shared_ptr<MediaSampleReader>& mediaSampleReader,
+ int trackIndex,
+ const std::shared_ptr<AMediaFormat>& destinationFormat);
+
+ /**
+ * Starts the track transcoder. Once started the track transcoder have to be stopped by calling
+ * {@link #stop}, even after completing successfully. Start should only be called once.
+ * @return True if the track transcoder started, or false if it had already been started.
+ */
+ bool start();
+
+ /**
+ * Stops the track transcoder. Once the transcoding has been stopped it cannot be restarted
+ * again. It is safe to call stop multiple times.
+ * @return True if the track transcoder stopped, or false if it was already stopped.
+ */
+ bool stop();
+
+ /**
+ * Set the sample consumer function. The MediaTrackTranscoder will deliver transcoded samples to
+ * this function. If the MediaTrackTranscoder is started before a consumer is set the transcoder
+ * will buffer a limited number of samples internally before stalling. Once a consumer has been
+ * set the internally buffered samples will be delivered to the consumer.
+ * @param sampleConsumer The sample consumer function.
+ */
+ void setSampleConsumer(const MediaSampleWriter::MediaSampleConsumerFunction& sampleConsumer);
+
+ /**
+ * Retrieves the track transcoder's final output format. The output is available after the
+ * track transcoder has been successfully configured.
+ * @return The track output format.
+ */
+ virtual std::shared_ptr<AMediaFormat> getOutputFormat() const = 0;
+
+ virtual ~MediaTrackTranscoder() = default;
+
+protected:
+ MediaTrackTranscoder(const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback)
+ : mTranscoderCallback(transcoderCallback){};
+
+ // Called by subclasses when the actual track format becomes available.
+ void notifyTrackFormatAvailable();
+
+ // Called by subclasses when a transcoded sample is available.
+ void onOutputSampleAvailable(const std::shared_ptr<MediaSample>& sample);
+
+ // configureDestinationFormat needs to be implemented by subclasses, and gets called on an
+ // external thread before start.
+ virtual media_status_t configureDestinationFormat(
+ const std::shared_ptr<AMediaFormat>& destinationFormat) = 0;
+
+ // runTranscodeLoop needs to be implemented by subclasses, and gets called on
+ // MediaTrackTranscoder's internal thread when the track transcoder is started.
+ virtual media_status_t runTranscodeLoop() = 0;
+
+ // abortTranscodeLoop needs to be implemented by subclasses, and should request transcoding to
+ // be aborted as soon as possible. It should be safe to call abortTranscodeLoop multiple times.
+ virtual void abortTranscodeLoop() = 0;
+
+ std::shared_ptr<MediaSampleReader> mMediaSampleReader;
+ int mTrackIndex;
+ std::shared_ptr<AMediaFormat> mSourceFormat;
+
+private:
+ std::mutex mSampleMutex;
+ MediaSampleQueue mSampleQueue GUARDED_BY(mSampleMutex);
+ MediaSampleWriter::MediaSampleConsumerFunction mSampleConsumer GUARDED_BY(mSampleMutex);
+ const std::weak_ptr<MediaTrackTranscoderCallback> mTranscoderCallback;
+ std::mutex mStateMutex;
+ std::thread mTranscodingThread GUARDED_BY(mStateMutex);
+ enum {
+ UNINITIALIZED,
+ CONFIGURED,
+ STARTED,
+ STOPPED,
+ } mState GUARDED_BY(mStateMutex) = UNINITIALIZED;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRACK_TRANSCODER_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h
new file mode 100644
index 0000000..654171e
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTrackTranscoderCallback.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRACK_TRANSCODER_CALLBACK_H
+#define ANDROID_MEDIA_TRACK_TRANSCODER_CALLBACK_H
+
+#include <media/NdkMediaError.h>
+
+namespace android {
+
+class MediaTrackTranscoder;
+
+/** Callback interface for MediaTrackTranscoder. */
+class MediaTrackTranscoderCallback {
+public:
+ /**
+ * Called when the MediaTrackTranscoder's actual track format becomes available.
+ * @param transcoder The MediaTrackTranscoder whose track format becomes available.
+ */
+ virtual void onTrackFormatAvailable(const MediaTrackTranscoder* transcoder);
+ /**
+ * Called when the MediaTrackTranscoder instance have finished transcoding all media samples
+ * successfully.
+ * @param transcoder The MediaTrackTranscoder that finished the transcoding.
+ */
+ virtual void onTrackFinished(const MediaTrackTranscoder* transcoder);
+
+ /**
+ * Called when the MediaTrackTranscoder instance encountered an error it could not recover from.
+ * @param transcoder The MediaTrackTranscoder that encountered the error.
+ * @param status The non-zero error code describing the encountered error.
+ */
+ virtual void onTrackError(const MediaTrackTranscoder* transcoder, media_status_t status);
+
+protected:
+ virtual ~MediaTrackTranscoderCallback() = default;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRACK_TRANSCODER_CALLBACK_H
diff --git a/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
new file mode 100644
index 0000000..9a367ca
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/MediaTranscoder.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODER_H
+#define ANDROID_MEDIA_TRANSCODER_H
+
+#include <media/MediaSampleWriter.h>
+#include <media/MediaTrackTranscoderCallback.h>
+#include <media/NdkMediaError.h>
+#include <media/NdkMediaFormat.h>
+#include <utils/Mutex.h>
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <unordered_set>
+
+namespace android {
+
+class MediaSampleReader;
+class Parcel;
+
+class MediaTranscoder : public std::enable_shared_from_this<MediaTranscoder>,
+ public MediaTrackTranscoderCallback,
+ public MediaSampleWriter::CallbackInterface {
+public:
+ /** Callbacks from transcoder to client. */
+ class CallbackInterface {
+ public:
+ /** Transcoder finished successfully. */
+ virtual void onFinished(const MediaTranscoder* transcoder) = 0;
+
+ /** Transcoder encountered an unrecoverable error. */
+ virtual void onError(const MediaTranscoder* transcoder, media_status_t error) = 0;
+
+ /** Transcoder progress update reported in percent from 0 to 100. */
+ virtual void onProgressUpdate(const MediaTranscoder* transcoder, int32_t progress) = 0;
+
+ /**
+ * Transcoder lost codec resources and paused operations. The client can resume transcoding
+ * again when resources are available by either:
+ * 1) Calling resume on the same MediaTranscoder instance.
+ * 2) Creating a new MediaTranscoding instance with the paused state and then calling
+ * resume.
+ */
+ virtual void onCodecResourceLost(const MediaTranscoder* transcoder,
+ const std::shared_ptr<const Parcel>& pausedState) = 0;
+
+ virtual ~CallbackInterface() = default;
+ };
+
+ /**
+ * Creates a new MediaTranscoder instance. If the supplied paused state is valid, the transcoder
+ * will be initialized with the paused state and be ready to be resumed right away. It is not
+ * possible to change any configurations on a paused transcoder.
+ */
+ static std::shared_ptr<MediaTranscoder> create(
+ const std::shared_ptr<CallbackInterface>& callbacks,
+ const std::shared_ptr<const Parcel>& pausedState = nullptr);
+
+ /** Configures source from path fd. */
+ media_status_t configureSource(int fd);
+
+ /** Gets the media formats of all tracks in the file. */
+ std::vector<std::shared_ptr<AMediaFormat>> getTrackFormats() const;
+
+ /**
+ * Configures transcoding of a track. Tracks that are not configured will not present in the
+ * final transcoded file, i.e. tracks will be dropped by default. Passing nullptr for
+ * trackFormat means the track will be copied unchanged ("passthrough") to the destination.
+ * Track configurations must be done after the source has been configured.
+ * Note: trackFormat is not modified but cannot be const.
+ */
+ media_status_t configureTrackFormat(size_t trackIndex, AMediaFormat* trackFormat);
+
+ /** Configures destination from fd. */
+ media_status_t configureDestination(int fd);
+
+ /** Starts transcoding. No configurations can be made once the transcoder has started. */
+ media_status_t start();
+
+ /**
+ * Pauses transcoding. The transcoder's paused state is returned through pausedState. The
+ * paused state is only needed for resuming transcoding with a new MediaTranscoder instance. The
+ * caller can resume transcoding with the current MediaTranscoder instance at any time by
+ * calling resume(). It is not required to cancel a paused transcoder. The paused state is
+ * independent and the caller can always initialize a new transcoder instance with the same
+ * paused state. If the caller wishes to abandon a paused transcoder's operation they can
+ * release the transcoder instance, clear the paused state and delete the partial destination
+ * file. The caller can optionally call cancel to let the transcoder clean up the partial
+ * destination file.
+ *
+ * TODO: use NDK AParcel instead
+ * libbinder shouldn't be used by mainline modules. When transcoding goes mainline
+ * it needs to be replaced by stable AParcel.
+ */
+ media_status_t pause(std::shared_ptr<const Parcel>* pausedState);
+
+ /** Resumes a paused transcoding. */
+ media_status_t resume();
+
+ /** Cancels the transcoding. Once canceled the transcoding can not be restarted. Client
+ * will be responsible for cleaning up the abandoned file. */
+ media_status_t cancel();
+
+ virtual ~MediaTranscoder() = default;
+
+private:
+ MediaTranscoder(const std::shared_ptr<CallbackInterface>& callbacks);
+
+ // MediaTrackTranscoderCallback
+ virtual void onTrackFormatAvailable(const MediaTrackTranscoder* transcoder) override;
+ virtual void onTrackFinished(const MediaTrackTranscoder* transcoder) override;
+ virtual void onTrackError(const MediaTrackTranscoder* transcoder,
+ media_status_t status) override;
+ // ~MediaTrackTranscoderCallback
+
+ // MediaSampleWriter::CallbackInterface
+ virtual void onFinished(const MediaSampleWriter* writer, media_status_t status) override;
+ virtual void onProgressUpdate(const MediaSampleWriter* writer, int32_t progress) override;
+ // ~MediaSampleWriter::CallbackInterface
+
+ void onSampleWriterFinished(media_status_t status);
+ void sendCallback(media_status_t status);
+
+ std::shared_ptr<CallbackInterface> mCallbacks;
+ std::shared_ptr<MediaSampleReader> mSampleReader;
+ std::shared_ptr<MediaSampleWriter> mSampleWriter;
+ std::vector<std::shared_ptr<AMediaFormat>> mSourceTrackFormats;
+ std::vector<std::shared_ptr<MediaTrackTranscoder>> mTrackTranscoders;
+ std::mutex mTracksAddedMutex;
+ std::unordered_set<const MediaTrackTranscoder*> mTracksAdded GUARDED_BY(mTracksAddedMutex);
+
+ std::atomic_bool mCallbackSent = false;
+ std::atomic_bool mCancelled = false;
+};
+
+} // namespace android
+#endif // ANDROID_MEDIA_TRANSCODER_H
diff --git a/media/libmediatranscoding/transcoder/include/media/NdkCommon.h b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
new file mode 100644
index 0000000..1a72be3
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/NdkCommon.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_TRANSCODING_NDK_COMMON_H
+#define ANDROID_MEDIA_TRANSCODING_NDK_COMMON_H
+
+#include <media/NdkMediaFormat.h>
+
+extern const char* AMEDIA_MIMETYPE_VIDEO_VP8;
+extern const char* AMEDIA_MIMETYPE_VIDEO_VP9;
+extern const char* AMEDIA_MIMETYPE_VIDEO_AV1;
+extern const char* AMEDIA_MIMETYPE_VIDEO_AVC;
+extern const char* AMEDIA_MIMETYPE_VIDEO_HEVC;
+extern const char* AMEDIA_MIMETYPE_VIDEO_MPEG4;
+extern const char* AMEDIA_MIMETYPE_VIDEO_H263;
+
+// TODO(b/146420990)
+// TODO: make MediaTranscoder use the consts from this header.
+typedef enum {
+ OUTPUT_FORMAT_START = 0,
+ OUTPUT_FORMAT_MPEG_4 = OUTPUT_FORMAT_START,
+ OUTPUT_FORMAT_WEBM = OUTPUT_FORMAT_START + 1,
+ OUTPUT_FORMAT_THREE_GPP = OUTPUT_FORMAT_START + 2,
+ OUTPUT_FORMAT_HEIF = OUTPUT_FORMAT_START + 3,
+ OUTPUT_FORMAT_OGG = OUTPUT_FORMAT_START + 4,
+ OUTPUT_FORMAT_LIST_END = OUTPUT_FORMAT_START + 4,
+} MuxerFormat;
+
+// Color formats supported by encoder - should mirror supportedColorList
+// from MediaCodecConstants.h (are these going to be deprecated)
+static constexpr int COLOR_FormatYUV420SemiPlanar = 21;
+static constexpr int COLOR_FormatYUV420Flexible = 0x7F420888;
+static constexpr int COLOR_FormatSurface = 0x7f000789;
+
+// constants not defined in NDK
+extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_ALLOW_FRAME_DROP;
+extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_REQUEST_SYNC_FRAME;
+extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_VIDEO_BITRATE;
+extern const char* TBD_AMEDIACODEC_PARAMETER_KEY_MAX_B_FRAMES;
+static constexpr int TBD_AMEDIACODEC_BUFFER_FLAG_KEY_FRAME = 0x1;
+
+static constexpr int kBitrateModeConstant = 2;
+
+namespace AMediaFormatUtils {
+
+typedef struct {
+ const char* key;
+ bool (*copy)(const char* key, AMediaFormat* from, AMediaFormat* to);
+ bool (*copy2)(const char* key, AMediaFormat* from, AMediaFormat* to);
+} EntryCopier;
+
+#define ENTRY_COPIER(keyName, typeName) \
+ { keyName, AMediaFormatUtils::CopyFormatEntry##typeName, nullptr }
+#define ENTRY_COPIER2(keyName, typeName, typeName2) \
+ { \
+ keyName, AMediaFormatUtils::CopyFormatEntry##typeName, \
+ AMediaFormatUtils::CopyFormatEntry##typeName2 \
+ }
+
+bool CopyFormatEntryString(const char* key, AMediaFormat* from, AMediaFormat* to);
+bool CopyFormatEntryInt64(const char* key, AMediaFormat* from, AMediaFormat* to);
+bool CopyFormatEntryInt32(const char* key, AMediaFormat* from, AMediaFormat* to);
+bool CopyFormatEntryFloat(const char* key, AMediaFormat* from, AMediaFormat* to);
+
+void CopyFormatEntries(AMediaFormat* from, AMediaFormat* to, const EntryCopier* entries,
+ size_t entryCount);
+
+bool SetDefaultFormatValueFloat(const char* key, AMediaFormat* format, float value);
+bool SetDefaultFormatValueInt32(const char* key, AMediaFormat* format, int32_t value);
+
+} // namespace AMediaFormatUtils
+#endif // ANDROID_MEDIA_TRANSCODING_NDK_COMMON_H
diff --git a/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h
new file mode 100644
index 0000000..b9491ed
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/PassthroughTrackTranscoder.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_PASSTHROUGH_TRACK_TRANSCODER_H
+#define ANDROID_PASSTHROUGH_TRACK_TRANSCODER_H
+
+#include <media/MediaTrackTranscoder.h>
+#include <media/NdkMediaFormat.h>
+
+#include <condition_variable>
+#include <map>
+#include <mutex>
+#include <unordered_map>
+
+namespace android {
+
+/**
+ * Track transcoder for passthrough mode. Passthrough mode copies sample data from a track unchanged
+ * from source file to destination file. This track transcoder uses an internal pool of buffers.
+ * When the maximum number of buffers are allocated and all of them are waiting on the output queue
+ * the transcoder will stall until samples are dequeued from the output queue and released.
+ */
+class PassthroughTrackTranscoder : public MediaTrackTranscoder {
+public:
+ /** Maximum number of buffers to be allocated at a given time. */
+ static constexpr int kMaxBufferCountDefault = 16;
+
+ PassthroughTrackTranscoder(
+ const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback)
+ : MediaTrackTranscoder(transcoderCallback),
+ mBufferPool(std::make_shared<BufferPool>(kMaxBufferCountDefault)){};
+ virtual ~PassthroughTrackTranscoder() override = default;
+
+private:
+ friend class BufferPoolTests;
+
+ /** Class to pool and reuse buffers. */
+ class BufferPool {
+ public:
+ explicit BufferPool(int maxBufferCount) : mMaxBufferCount(maxBufferCount){};
+ ~BufferPool();
+
+ /**
+ * Retrieve a buffer from the pool. Buffers are allocated on demand. This method will block
+ * if the maximum number of buffers is reached and there are no free buffers available.
+ * @param minimumBufferSize The minimum size of the buffer.
+ * @return The buffer or nullptr if allocation failed or the pool was aborted.
+ */
+ uint8_t* getBufferWithSize(size_t minimumBufferSize);
+
+ /**
+ * Return a buffer to the pool.
+ * @param buffer The buffer to return.
+ */
+ void returnBuffer(uint8_t* buffer);
+
+ /** Wakes up threads waiting on buffers and prevents new buffers from being returned. */
+ void abort();
+
+ private:
+ // Maximum number of active buffers at a time.
+ const int mMaxBufferCount;
+
+ // Map containing all tracked buffers.
+ std::unordered_map<uint8_t*, size_t> mAddressSizeMap GUARDED_BY(mMutex);
+
+ // Map containing the currently free buffers.
+ std::multimap<size_t, uint8_t*> mFreeBufferMap GUARDED_BY(mMutex);
+
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mAborted GUARDED_BY(mMutex) = false;
+ };
+
+ // MediaTrackTranscoder
+ media_status_t runTranscodeLoop() override;
+ void abortTranscodeLoop() override;
+ media_status_t configureDestinationFormat(
+ const std::shared_ptr<AMediaFormat>& destinationFormat) override;
+ std::shared_ptr<AMediaFormat> getOutputFormat() const override;
+ // ~MediaTrackTranscoder
+
+ std::shared_ptr<BufferPool> mBufferPool;
+ bool mEosFromSource = false;
+ std::atomic_bool mStopRequested = false;
+};
+
+} // namespace android
+#endif // ANDROID_PASSTHROUGH_TRACK_TRANSCODER_H
diff --git a/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
new file mode 100644
index 0000000..d000d7f
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/include/media/VideoTrackTranscoder.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_VIDEO_TRACK_TRANSCODER_H
+#define ANDROID_VIDEO_TRACK_TRANSCODER_H
+
+#include <android/native_window.h>
+#include <media/MediaTrackTranscoder.h>
+#include <media/NdkMediaCodec.h>
+#include <media/NdkMediaFormat.h>
+
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+
+namespace android {
+
+/**
+ * Track transcoder for video tracks. VideoTrackTranscoder uses AMediaCodec from the Media NDK
+ * internally. The two media codecs are run in asynchronous mode and shares uncompressed buffers
+ * using a native surface (ANativeWindow). Codec callback events are placed on a message queue and
+ * serviced in order on the transcoding thread managed by MediaTrackTranscoder.
+ */
+class VideoTrackTranscoder : public std::enable_shared_from_this<VideoTrackTranscoder>,
+ public MediaTrackTranscoder {
+public:
+ static std::shared_ptr<VideoTrackTranscoder> create(
+ const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback);
+
+ virtual ~VideoTrackTranscoder() override;
+
+private:
+ friend struct AsyncCodecCallbackDispatch;
+
+ // Minimal blocking queue used as a message queue by VideoTrackTranscoder.
+ template <typename T>
+ class BlockingQueue {
+ public:
+ void push(T const& value, bool front = false);
+ T pop();
+ void abort();
+
+ private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ std::deque<T> mQueue;
+ bool mAborted = false;
+ };
+ class CodecWrapper;
+
+ VideoTrackTranscoder(const std::weak_ptr<MediaTrackTranscoderCallback>& transcoderCallback)
+ : MediaTrackTranscoder(transcoderCallback){};
+
+ // MediaTrackTranscoder
+ media_status_t runTranscodeLoop() override;
+ void abortTranscodeLoop() override;
+ media_status_t configureDestinationFormat(
+ const std::shared_ptr<AMediaFormat>& destinationFormat) override;
+ std::shared_ptr<AMediaFormat> getOutputFormat() const override;
+ // ~MediaTrackTranscoder
+
+ // Enqueues an input sample with the decoder.
+ void enqueueInputSample(int32_t bufferIndex);
+
+ // Moves a decoded buffer from the decoder's output to the encoder's input.
+ void transferBuffer(int32_t bufferIndex, AMediaCodecBufferInfo bufferInfo);
+
+ // Dequeues an encoded buffer from the encoder and adds it to the output queue.
+ void dequeueOutputSample(int32_t bufferIndex, AMediaCodecBufferInfo bufferInfo);
+
+ // Updates the video track's actual format based on encoder output format.
+ void updateTrackFormat(AMediaFormat* outputFormat);
+
+ AMediaCodec* mDecoder = nullptr;
+ std::shared_ptr<CodecWrapper> mEncoder;
+ ANativeWindow* mSurface = nullptr;
+ bool mEosFromSource = false;
+ bool mEosFromEncoder = false;
+ bool mStopRequested = false;
+ media_status_t mStatus = AMEDIA_OK;
+ MediaSampleInfo mSampleInfo;
+ BlockingQueue<std::function<void()>> mCodecMessageQueue;
+ std::shared_ptr<AMediaFormat> mDestinationFormat;
+ std::shared_ptr<AMediaFormat> mActualOutputFormat;
+};
+
+} // namespace android
+#endif // ANDROID_VIDEO_TRACK_TRANSCODER_H
diff --git a/media/libmediatranscoding/transcoder/tests/Android.bp b/media/libmediatranscoding/transcoder/tests/Android.bp
new file mode 100644
index 0000000..7ae6261
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/Android.bp
@@ -0,0 +1,93 @@
+// Unit tests for libmediatranscoder.
+
+filegroup {
+ name: "test_assets",
+ srcs: ["assets/*"],
+}
+
+cc_defaults {
+ name: "testdefaults",
+
+ header_libs: [
+ "libbase_headers",
+ "libmedia_headers",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libmediandk",
+ "libmediatranscoder_asan",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ address: true,
+ },
+
+ data: [":test_assets"],
+ test_config_template: "AndroidTestTemplate.xml",
+ test_suites: ["device-tests", "TranscoderTests"],
+}
+
+// MediaSampleReaderNDK unit test
+cc_test {
+ name: "MediaSampleReaderNDKTests",
+ defaults: ["testdefaults"],
+ srcs: ["MediaSampleReaderNDKTests.cpp"],
+}
+
+// MediaSampleQueue unit test
+cc_test {
+ name: "MediaSampleQueueTests",
+ defaults: ["testdefaults"],
+ srcs: ["MediaSampleQueueTests.cpp"],
+}
+
+// MediaTrackTranscoder unit test
+cc_test {
+ name: "MediaTrackTranscoderTests",
+ defaults: ["testdefaults"],
+ srcs: ["MediaTrackTranscoderTests.cpp"],
+ shared_libs: ["libbinder_ndk"],
+}
+
+// VideoTrackTranscoder unit test
+cc_test {
+ name: "VideoTrackTranscoderTests",
+ defaults: ["testdefaults"],
+ srcs: ["VideoTrackTranscoderTests.cpp"],
+}
+
+// PassthroughTrackTranscoder unit test
+cc_test {
+ name: "PassthroughTrackTranscoderTests",
+ defaults: ["testdefaults"],
+ srcs: ["PassthroughTrackTranscoderTests.cpp"],
+ shared_libs: ["libcrypto"],
+}
+
+// MediaSampleWriter unit test
+cc_test {
+ name: "MediaSampleWriterTests",
+ defaults: ["testdefaults"],
+ srcs: ["MediaSampleWriterTests.cpp"],
+}
+
+// MediaTranscoder unit test
+cc_test {
+ name: "MediaTranscoderTests",
+ defaults: ["testdefaults"],
+ srcs: ["MediaTranscoderTests.cpp"],
+ shared_libs: ["libbinder_ndk"],
+}
diff --git a/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
new file mode 100644
index 0000000..a9a7e2e
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/AndroidTestTemplate.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Unit test configuration for {MODULE}">
+ <option name="test-suite-tag" value="TranscoderTests" />
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="false" />
+ <option name="push-file"
+ key="assets"
+ value="/data/local/tmp/TranscodingTestAssets" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="module-name" value="{MODULE}" />
+ </test>
+</configuration>
+
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleQueueTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleQueueTests.cpp
new file mode 100644
index 0000000..6357e4d
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleQueueTests.cpp
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaSampleQueue
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleQueueTests"
+
+#include <android-base/logging.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleQueue.h>
+
+#include <thread>
+
+namespace android {
+
+/** Duration to use when delaying threads to order operations. */
+static constexpr int64_t kThreadDelayDurationMs = 100;
+
+class MediaSampleQueueTests : public ::testing::Test {
+public:
+ MediaSampleQueueTests() { LOG(DEBUG) << "MediaSampleQueueTests created"; }
+ ~MediaSampleQueueTests() { LOG(DEBUG) << "MediaSampleQueueTests destroyed"; }
+};
+
+static std::shared_ptr<MediaSample> newSample(uint32_t id) {
+ return MediaSample::createWithReleaseCallback(nullptr /* buffer */, 0 /* offset */, id,
+ nullptr /* callback */);
+}
+
+TEST_F(MediaSampleQueueTests, TestSequentialDequeueOrder) {
+ LOG(DEBUG) << "TestSequentialDequeueOrder Starts";
+
+ static constexpr int kNumSamples = 4;
+ MediaSampleQueue sampleQueue;
+ EXPECT_TRUE(sampleQueue.isEmpty());
+
+ // Enqueue loop.
+ for (int i = 0; i < kNumSamples; ++i) {
+ sampleQueue.enqueue(newSample(i));
+ EXPECT_FALSE(sampleQueue.isEmpty());
+ }
+
+ // Dequeue loop.
+ for (int i = 0; i < kNumSamples; ++i) {
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_NE(sample, nullptr);
+ EXPECT_EQ(sample->bufferId, i);
+ EXPECT_FALSE(aborted);
+ }
+ EXPECT_TRUE(sampleQueue.isEmpty());
+}
+
+TEST_F(MediaSampleQueueTests, TestInterleavedDequeueOrder) {
+ LOG(DEBUG) << "TestInterleavedDequeueOrder Starts";
+
+ static constexpr int kNumSamples = 4;
+ MediaSampleQueue sampleQueue;
+
+ // Enqueue and dequeue.
+ for (int i = 0; i < kNumSamples; ++i) {
+ sampleQueue.enqueue(newSample(i));
+ EXPECT_FALSE(sampleQueue.isEmpty());
+
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_NE(sample, nullptr);
+ EXPECT_EQ(sample->bufferId, i);
+ EXPECT_FALSE(aborted);
+ EXPECT_TRUE(sampleQueue.isEmpty());
+ }
+}
+
+TEST_F(MediaSampleQueueTests, TestBlockingDequeue) {
+ LOG(DEBUG) << "TestBlockingDequeue Starts";
+
+ MediaSampleQueue sampleQueue;
+
+ std::thread enqueueThread([&sampleQueue] {
+ // Note: This implementation is a bit racy. Any amount of sleep will not guarantee that the
+ // main thread will be blocked on the sample queue by the time this thread calls enqueue.
+ // But we can say with high confidence that it will and the test will not fail regardless.
+ std::this_thread::sleep_for(std::chrono::milliseconds(kThreadDelayDurationMs));
+ sampleQueue.enqueue(newSample(1));
+ });
+
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_NE(sample, nullptr);
+ EXPECT_EQ(sample->bufferId, 1);
+ EXPECT_FALSE(aborted);
+ EXPECT_TRUE(sampleQueue.isEmpty());
+
+ enqueueThread.join();
+}
+
+TEST_F(MediaSampleQueueTests, TestDequeueBufferRelease) {
+ LOG(DEBUG) << "TestDequeueBufferRelease Starts";
+
+ static constexpr int kNumSamples = 4;
+ std::vector<bool> bufferReleased(kNumSamples, false);
+
+ MediaSample::OnSampleReleasedCallback callback = [&bufferReleased](MediaSample* sample) {
+ bufferReleased[sample->bufferId] = true;
+ };
+
+ MediaSampleQueue sampleQueue;
+ for (int i = 0; i < kNumSamples; ++i) {
+ bool aborted = sampleQueue.enqueue(
+ MediaSample::createWithReleaseCallback(nullptr, 0, i, callback));
+ EXPECT_FALSE(aborted);
+ }
+
+ for (int i = 0; i < kNumSamples; ++i) {
+ EXPECT_FALSE(bufferReleased[i]);
+ }
+
+ for (int i = 0; i < kNumSamples; ++i) {
+ {
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_NE(sample, nullptr);
+ EXPECT_EQ(sample->bufferId, i);
+ EXPECT_FALSE(bufferReleased[i]);
+ EXPECT_FALSE(aborted);
+ }
+
+ for (int j = 0; j < kNumSamples; ++j) {
+ EXPECT_EQ(bufferReleased[j], j <= i);
+ }
+ }
+}
+
+TEST_F(MediaSampleQueueTests, TestAbortBufferRelease) {
+ LOG(DEBUG) << "TestAbortBufferRelease Starts";
+
+ static constexpr int kNumSamples = 4;
+ std::vector<bool> bufferReleased(kNumSamples, false);
+
+ MediaSample::OnSampleReleasedCallback callback = [&bufferReleased](MediaSample* sample) {
+ bufferReleased[sample->bufferId] = true;
+ };
+
+ MediaSampleQueue sampleQueue;
+ for (int i = 0; i < kNumSamples; ++i) {
+ bool aborted = sampleQueue.enqueue(
+ MediaSample::createWithReleaseCallback(nullptr, 0, i, callback));
+ EXPECT_FALSE(aborted);
+ }
+
+ for (int i = 0; i < kNumSamples; ++i) {
+ EXPECT_FALSE(bufferReleased[i]);
+ }
+
+ EXPECT_FALSE(sampleQueue.isEmpty());
+ sampleQueue.abort();
+ EXPECT_TRUE(sampleQueue.isEmpty());
+
+ for (int i = 0; i < kNumSamples; ++i) {
+ EXPECT_TRUE(bufferReleased[i]);
+ }
+}
+
+TEST_F(MediaSampleQueueTests, TestNonEmptyAbort) {
+ LOG(DEBUG) << "TestNonEmptyAbort Starts";
+
+ MediaSampleQueue sampleQueue;
+ bool aborted = sampleQueue.enqueue(newSample(1));
+ EXPECT_FALSE(aborted);
+
+ sampleQueue.abort();
+
+ std::shared_ptr<MediaSample> sample;
+ aborted = sampleQueue.dequeue(&sample);
+ EXPECT_TRUE(aborted);
+ EXPECT_EQ(sample, nullptr);
+
+ aborted = sampleQueue.enqueue(sample);
+ EXPECT_TRUE(aborted);
+}
+
+TEST_F(MediaSampleQueueTests, TestEmptyAbort) {
+ LOG(DEBUG) << "TestEmptyAbort Starts";
+
+ MediaSampleQueue sampleQueue;
+ sampleQueue.abort();
+
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_TRUE(aborted);
+ EXPECT_EQ(sample, nullptr);
+
+ aborted = sampleQueue.enqueue(sample);
+ EXPECT_TRUE(aborted);
+}
+
+TEST_F(MediaSampleQueueTests, TestBlockingAbort) {
+ LOG(DEBUG) << "TestBlockingAbort Starts";
+
+ MediaSampleQueue sampleQueue;
+
+ std::thread abortingThread([&sampleQueue] {
+ // Note: This implementation is a bit racy. Any amount of sleep will not guarantee that the
+ // main thread will be blocked on the sample queue by the time this thread calls abort.
+ // But we can say with high confidence that it will and the test will not fail regardless.
+ std::this_thread::sleep_for(std::chrono::milliseconds(kThreadDelayDurationMs));
+ sampleQueue.abort();
+ });
+
+ std::shared_ptr<MediaSample> sample;
+ bool aborted = sampleQueue.dequeue(&sample);
+ EXPECT_TRUE(aborted);
+ EXPECT_EQ(sample, nullptr);
+
+ abortingThread.join();
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp
new file mode 100644
index 0000000..9c9c8b5
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleReaderNDKTests.cpp
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaSampleReaderNDK
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleReaderNDKTests"
+
+#include <android-base/logging.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <utils/Timers.h>
+
+#include <cmath>
+#include <mutex>
+#include <thread>
+
+// TODO(b/153453392): Test more asset types and validate sample data from readSampleDataForTrack.
+// TODO(b/153453392): Test for sequential and parallel (single thread and multi thread) access.
+// TODO(b/153453392): Test for switching between sequential and parallel access in different points
+// of time.
+
+namespace android {
+
+#define SEC_TO_USEC(s) ((s)*1000 * 1000)
+
+class MediaSampleReaderNDKTests : public ::testing::Test {
+public:
+ MediaSampleReaderNDKTests() { LOG(DEBUG) << "MediaSampleReaderNDKTests created"; }
+
+ void SetUp() override {
+ LOG(DEBUG) << "MediaSampleReaderNDKTests set up";
+ const char* sourcePath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+
+ mExtractor = AMediaExtractor_new();
+ ASSERT_NE(mExtractor, nullptr);
+
+ mSourceFd = open(sourcePath, O_RDONLY);
+ ASSERT_GT(mSourceFd, 0);
+
+ mFileSize = lseek(mSourceFd, 0, SEEK_END);
+ lseek(mSourceFd, 0, SEEK_SET);
+
+ media_status_t status =
+ AMediaExtractor_setDataSourceFd(mExtractor, mSourceFd, 0, mFileSize);
+ ASSERT_EQ(status, AMEDIA_OK);
+
+ mTrackCount = AMediaExtractor_getTrackCount(mExtractor);
+ for (size_t trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ AMediaExtractor_selectTrack(mExtractor, trackIndex);
+ }
+ }
+
+ void initExtractorTimestamps() {
+ // Save all sample timestamps, per track, as reported by the extractor.
+ mExtractorTimestamps.resize(mTrackCount);
+ do {
+ const int trackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ const int64_t sampleTime = AMediaExtractor_getSampleTime(mExtractor);
+
+ mExtractorTimestamps[trackIndex].push_back(sampleTime);
+ } while (AMediaExtractor_advance(mExtractor));
+
+ AMediaExtractor_seekTo(mExtractor, 0, AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
+ }
+
+ std::vector<int32_t> getTrackBitrates() {
+ size_t totalSize[mTrackCount];
+ memset(totalSize, 0, sizeof(totalSize));
+
+ do {
+ const int trackIndex = AMediaExtractor_getSampleTrackIndex(mExtractor);
+ totalSize[trackIndex] += AMediaExtractor_getSampleSize(mExtractor);
+ } while (AMediaExtractor_advance(mExtractor));
+
+ AMediaExtractor_seekTo(mExtractor, 0, AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
+
+ std::vector<int32_t> bitrates;
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ int64_t durationUs;
+ AMediaFormat* trackFormat = AMediaExtractor_getTrackFormat(mExtractor, trackIndex);
+ EXPECT_NE(trackFormat, nullptr);
+ EXPECT_TRUE(AMediaFormat_getInt64(trackFormat, AMEDIAFORMAT_KEY_DURATION, &durationUs));
+ bitrates.push_back(roundf((float)totalSize[trackIndex] * 8 * 1000000 / durationUs));
+ }
+
+ return bitrates;
+ }
+
+ void TearDown() override {
+ LOG(DEBUG) << "MediaSampleReaderNDKTests tear down";
+ AMediaExtractor_delete(mExtractor);
+ close(mSourceFd);
+ }
+
+ ~MediaSampleReaderNDKTests() { LOG(DEBUG) << "MediaSampleReaderNDKTests destroyed"; }
+
+ AMediaExtractor* mExtractor = nullptr;
+ size_t mTrackCount;
+ int mSourceFd;
+ size_t mFileSize;
+ std::vector<std::vector<int64_t>> mExtractorTimestamps;
+};
+
+TEST_F(MediaSampleReaderNDKTests, TestSampleTimes) {
+ LOG(DEBUG) << "TestSampleTimes Starts";
+
+ std::shared_ptr<MediaSampleReader> sampleReader =
+ MediaSampleReaderNDK::createFromFd(mSourceFd, 0, mFileSize);
+ ASSERT_TRUE(sampleReader);
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ EXPECT_EQ(sampleReader->selectTrack(trackIndex), AMEDIA_OK);
+ }
+
+ // Initialize the extractor timestamps.
+ initExtractorTimestamps();
+
+ std::mutex timestampMutex;
+ std::vector<std::thread> trackThreads;
+ std::vector<std::vector<int64_t>> readerTimestamps(mTrackCount);
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ trackThreads.emplace_back([sampleReader, trackIndex, ×tampMutex, &readerTimestamps] {
+ MediaSampleInfo info;
+ while (true) {
+ media_status_t status = sampleReader->getSampleInfoForTrack(trackIndex, &info);
+ if (status != AMEDIA_OK) {
+ EXPECT_EQ(status, AMEDIA_ERROR_END_OF_STREAM);
+ EXPECT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) != 0);
+ break;
+ }
+ ASSERT_TRUE((info.flags & SAMPLE_FLAG_END_OF_STREAM) == 0);
+ timestampMutex.lock();
+ readerTimestamps[trackIndex].push_back(info.presentationTimeUs);
+ timestampMutex.unlock();
+ sampleReader->advanceTrack(trackIndex);
+ }
+ });
+ }
+
+ for (auto& thread : trackThreads) {
+ thread.join();
+ }
+
+ for (int trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ LOG(DEBUG) << "Track " << trackIndex << ", comparing "
+ << readerTimestamps[trackIndex].size() << " samples.";
+ EXPECT_EQ(readerTimestamps[trackIndex].size(), mExtractorTimestamps[trackIndex].size());
+ for (size_t sampleIndex = 0; sampleIndex < readerTimestamps[trackIndex].size();
+ sampleIndex++) {
+ EXPECT_EQ(readerTimestamps[trackIndex][sampleIndex],
+ mExtractorTimestamps[trackIndex][sampleIndex]);
+ }
+ }
+}
+
+TEST_F(MediaSampleReaderNDKTests, TestEstimatedBitrateAccuracy) {
+ // Just put a somewhat reasonable upper bound on the estimated bitrate expected in our test
+ // assets. This is mostly to make sure the estimation is not way off.
+ static constexpr int32_t kMaxEstimatedBitrate = 100 * 1000 * 1000; // 100 Mbps
+
+ auto sampleReader = MediaSampleReaderNDK::createFromFd(mSourceFd, 0, mFileSize);
+ ASSERT_TRUE(sampleReader);
+
+ std::vector<int32_t> actualTrackBitrates = getTrackBitrates();
+ for (int trackIndex = 0; trackIndex < mTrackCount; ++trackIndex) {
+ EXPECT_EQ(sampleReader->selectTrack(trackIndex), AMEDIA_OK);
+
+ int32_t bitrate;
+ EXPECT_EQ(sampleReader->getEstimatedBitrateForTrack(trackIndex, &bitrate), AMEDIA_OK);
+ EXPECT_GT(bitrate, 0);
+ EXPECT_LT(bitrate, kMaxEstimatedBitrate);
+
+ // Note: The test asset currently used in this test is shorter than the sampling duration
+ // used to estimate the bitrate in the sample reader. So for now the estimation should be
+ // exact but if/when a longer asset is used a reasonable delta needs to be defined.
+ EXPECT_EQ(bitrate, actualTrackBitrates[trackIndex]);
+ }
+}
+
+TEST_F(MediaSampleReaderNDKTests, TestInvalidFd) {
+ std::shared_ptr<MediaSampleReader> sampleReader =
+ MediaSampleReaderNDK::createFromFd(0, 0, mFileSize);
+ ASSERT_TRUE(sampleReader == nullptr);
+
+ sampleReader = MediaSampleReaderNDK::createFromFd(-1, 0, mFileSize);
+ ASSERT_TRUE(sampleReader == nullptr);
+}
+
+TEST_F(MediaSampleReaderNDKTests, TestZeroSize) {
+ std::shared_ptr<MediaSampleReader> sampleReader =
+ MediaSampleReaderNDK::createFromFd(mSourceFd, 0, 0);
+ ASSERT_TRUE(sampleReader == nullptr);
+}
+
+TEST_F(MediaSampleReaderNDKTests, TestInvalidOffset) {
+ std::shared_ptr<MediaSampleReader> sampleReader =
+ MediaSampleReaderNDK::createFromFd(mSourceFd, mFileSize, mFileSize);
+ ASSERT_TRUE(sampleReader == nullptr);
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
new file mode 100644
index 0000000..46f3e9b
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/MediaSampleWriterTests.cpp
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaSampleWriter
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaSampleWriterTests"
+
+#include <android-base/logging.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleQueue.h>
+#include <media/MediaSampleWriter.h>
+#include <media/NdkMediaExtractor.h>
+
+#include <condition_variable>
+#include <list>
+#include <mutex>
+
+namespace android {
+
+/** Muxer interface to enable MediaSampleWriter testing. */
+class TestMuxer : public MediaSampleWriterMuxerInterface {
+public:
+ // MuxerInterface
+ ssize_t addTrack(AMediaFormat* trackFormat) override {
+ mEventQueue.push_back(AddTrack(trackFormat));
+ return mTrackCount++;
+ }
+ media_status_t start() override {
+ mEventQueue.push_back(Start());
+ return AMEDIA_OK;
+ }
+
+ media_status_t writeSampleData(size_t trackIndex, const uint8_t* data,
+ const AMediaCodecBufferInfo* info) override {
+ mEventQueue.push_back(WriteSample(trackIndex, data, info));
+ return AMEDIA_OK;
+ }
+ media_status_t stop() override {
+ mEventQueue.push_back(Stop());
+ return AMEDIA_OK;
+ }
+ // ~MuxerInterface
+
+ struct Event {
+ enum { NoEvent, AddTrack, Start, WriteSample, Stop } type = NoEvent;
+ const AMediaFormat* format = nullptr;
+ size_t trackIndex = 0;
+ const uint8_t* data = nullptr;
+ AMediaCodecBufferInfo info{};
+ };
+
+ static constexpr Event NoEvent = {Event::NoEvent, nullptr, 0, nullptr, {}};
+
+ static Event AddTrack(const AMediaFormat* format) {
+ return {.type = Event::AddTrack, .format = format};
+ }
+
+ static Event Start() { return {.type = Event::Start}; }
+ static Event Stop() { return {.type = Event::Stop}; }
+
+ static Event WriteSample(size_t trackIndex, const uint8_t* data,
+ const AMediaCodecBufferInfo* info) {
+ return {.type = Event::WriteSample, .trackIndex = trackIndex, .data = data, .info = *info};
+ }
+
+ static Event WriteSampleWithPts(size_t trackIndex, int64_t pts) {
+ return {.type = Event::WriteSample, .trackIndex = trackIndex, .info = {0, 0, pts, 0}};
+ }
+
+ void pushEvent(const Event& e) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mEventQueue.push_back(e);
+ mCondition.notify_one();
+ }
+
+ const Event& popEvent(bool wait = false) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (wait && mEventQueue.empty()) {
+ mCondition.wait_for(lock, std::chrono::milliseconds(200));
+ }
+
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+ return mPoppedEvent;
+ }
+
+private:
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+ ssize_t mTrackCount = 0;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+};
+
+bool operator==(const AMediaCodecBufferInfo& lhs, const AMediaCodecBufferInfo& rhs) {
+ return lhs.offset == rhs.offset && lhs.size == rhs.size &&
+ lhs.presentationTimeUs == rhs.presentationTimeUs && lhs.flags == rhs.flags;
+}
+
+bool operator==(const TestMuxer::Event& lhs, const TestMuxer::Event& rhs) {
+ return lhs.type == rhs.type && lhs.format == rhs.format && lhs.trackIndex == rhs.trackIndex &&
+ lhs.data == rhs.data && lhs.info == rhs.info;
+}
+
+/** Represents a media source file. */
+class TestMediaSource {
+public:
+ void init() {
+ static const char* sourcePath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+
+ mExtractor = AMediaExtractor_new();
+ ASSERT_NE(mExtractor, nullptr);
+
+ int sourceFd = open(sourcePath, O_RDONLY);
+ ASSERT_GT(sourceFd, 0);
+
+ off_t fileSize = lseek(sourceFd, 0, SEEK_END);
+ lseek(sourceFd, 0, SEEK_SET);
+
+ media_status_t status = AMediaExtractor_setDataSourceFd(mExtractor, sourceFd, 0, fileSize);
+ ASSERT_EQ(status, AMEDIA_OK);
+ close(sourceFd);
+
+ mTrackCount = AMediaExtractor_getTrackCount(mExtractor);
+ ASSERT_GT(mTrackCount, 1);
+ for (size_t trackIndex = 0; trackIndex < mTrackCount; trackIndex++) {
+ AMediaFormat* trackFormat = AMediaExtractor_getTrackFormat(mExtractor, trackIndex);
+ ASSERT_NE(trackFormat, nullptr);
+
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+ if (strncmp(mime, "video/", 6) == 0) {
+ mVideoTrackIndex = trackIndex;
+ } else if (strncmp(mime, "audio/", 6) == 0) {
+ mAudioTrackIndex = trackIndex;
+ }
+
+ mTrackFormats.push_back(
+ std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete));
+
+ AMediaExtractor_selectTrack(mExtractor, trackIndex);
+ }
+ EXPECT_GE(mVideoTrackIndex, 0);
+ EXPECT_GE(mAudioTrackIndex, 0);
+ }
+
+ void reset() const {
+ media_status_t status = AMediaExtractor_seekTo(mExtractor, 0 /* seekPosUs */,
+ AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC);
+ ASSERT_EQ(status, AMEDIA_OK);
+ }
+
+ AMediaExtractor* mExtractor = nullptr;
+ size_t mTrackCount = 0;
+ std::vector<std::shared_ptr<AMediaFormat>> mTrackFormats;
+ int mVideoTrackIndex = -1;
+ int mAudioTrackIndex = -1;
+};
+
+class TestCallbacks : public MediaSampleWriter::CallbackInterface {
+public:
+ TestCallbacks(bool expectSuccess = true) : mExpectSuccess(expectSuccess) {}
+
+ bool hasFinished() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ return mFinished;
+ }
+
+ // MediaSampleWriter::CallbackInterface
+ virtual void onFinished(const MediaSampleWriter* writer __unused,
+ media_status_t status) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ EXPECT_FALSE(mFinished);
+ if (mExpectSuccess) {
+ EXPECT_EQ(status, AMEDIA_OK);
+ } else {
+ EXPECT_NE(status, AMEDIA_OK);
+ }
+ mFinished = true;
+ mCondition.notify_all();
+ }
+
+ virtual void onProgressUpdate(const MediaSampleWriter* writer __unused,
+ int32_t progress) override {
+ EXPECT_GT(progress, mLastProgress);
+ EXPECT_GE(progress, 0);
+ EXPECT_LE(progress, 100);
+
+ mLastProgress = progress;
+ mProgressUpdateCount++;
+ }
+ // ~MediaSampleWriter::CallbackInterface
+
+ void waitForWritingFinished() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mFinished) {
+ mCondition.wait(lock);
+ }
+ }
+
+ uint32_t getProgressUpdateCount() const { return mProgressUpdateCount; }
+
+private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mFinished = false;
+ bool mExpectSuccess;
+ int32_t mLastProgress = -1;
+ uint32_t mProgressUpdateCount = 0;
+};
+
+class MediaSampleWriterTests : public ::testing::Test {
+public:
+ MediaSampleWriterTests() { LOG(DEBUG) << "MediaSampleWriterTests created"; }
+ ~MediaSampleWriterTests() { LOG(DEBUG) << "MediaSampleWriterTests destroyed"; }
+
+ static const TestMediaSource& getMediaSource() {
+ static TestMediaSource sMediaSource;
+ static std::once_flag sOnceToken;
+
+ std::call_once(sOnceToken, [] { sMediaSource.init(); });
+
+ sMediaSource.reset();
+ return sMediaSource;
+ }
+
+ static std::shared_ptr<MediaSample> newSample(int64_t ptsUs, uint32_t flags, size_t size,
+ size_t offset, const uint8_t* buffer) {
+ auto sample = std::make_shared<MediaSample>();
+ sample->info.presentationTimeUs = ptsUs;
+ sample->info.flags = flags;
+ sample->info.size = size;
+ sample->dataOffset = offset;
+ sample->buffer = buffer;
+ return sample;
+ }
+
+ static std::shared_ptr<MediaSample> newSampleEos() {
+ return newSample(0, SAMPLE_FLAG_END_OF_STREAM, 0, 0, nullptr);
+ }
+
+ static std::shared_ptr<MediaSample> newSampleWithPts(int64_t ptsUs) {
+ static uint32_t sampleCount = 0;
+
+ // Use sampleCount to get a unique mock sample.
+ uint32_t sampleId = ++sampleCount;
+ return newSample(ptsUs, 0, sampleId, sampleId, reinterpret_cast<const uint8_t*>(sampleId));
+ }
+
+ static std::shared_ptr<MediaSample> newSampleWithPtsOnly(int64_t ptsUs) {
+ return newSample(ptsUs, 0, 0, 0, nullptr);
+ }
+
+ void SetUp() override {
+ LOG(DEBUG) << "MediaSampleWriterTests set up";
+ mTestMuxer = std::make_shared<TestMuxer>();
+ }
+
+ void TearDown() override {
+ LOG(DEBUG) << "MediaSampleWriterTests tear down";
+ mTestMuxer.reset();
+ }
+
+protected:
+ std::shared_ptr<TestMuxer> mTestMuxer;
+ std::shared_ptr<TestCallbacks> mTestCallbacks = std::make_shared<TestCallbacks>();
+};
+
+TEST_F(MediaSampleWriterTests, TestAddTrackWithoutInit) {
+ const TestMediaSource& mediaSource = getMediaSource();
+
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_EQ(writer->addTrack(mediaSource.mTrackFormats[0]), nullptr);
+}
+
+TEST_F(MediaSampleWriterTests, TestStartWithoutInit) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_FALSE(writer->start());
+}
+
+TEST_F(MediaSampleWriterTests, TestStartWithoutTracks) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(mTestMuxer, mTestCallbacks));
+ EXPECT_FALSE(writer->start());
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::NoEvent);
+}
+
+TEST_F(MediaSampleWriterTests, TestAddInvalidTrack) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(mTestMuxer, mTestCallbacks));
+
+ EXPECT_EQ(writer->addTrack(nullptr), nullptr);
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::NoEvent);
+}
+
+TEST_F(MediaSampleWriterTests, TestDoubleStartStop) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+
+ std::shared_ptr<TestCallbacks> callbacks =
+ std::make_shared<TestCallbacks>(false /* expectSuccess */);
+ EXPECT_TRUE(writer->init(mTestMuxer, callbacks));
+
+ const TestMediaSource& mediaSource = getMediaSource();
+ EXPECT_NE(writer->addTrack(mediaSource.mTrackFormats[0]), nullptr);
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::AddTrack(mediaSource.mTrackFormats[0].get()));
+
+ ASSERT_TRUE(writer->start());
+ EXPECT_FALSE(writer->start());
+
+ EXPECT_TRUE(writer->stop());
+ EXPECT_TRUE(callbacks->hasFinished());
+ EXPECT_FALSE(writer->stop());
+}
+
+TEST_F(MediaSampleWriterTests, TestStopWithoutStart) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(mTestMuxer, mTestCallbacks));
+
+ const TestMediaSource& mediaSource = getMediaSource();
+ EXPECT_NE(writer->addTrack(mediaSource.mTrackFormats[0]), nullptr);
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::AddTrack(mediaSource.mTrackFormats[0].get()));
+
+ EXPECT_FALSE(writer->stop());
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::NoEvent);
+}
+
+TEST_F(MediaSampleWriterTests, TestStartWithoutCallback) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+
+ std::weak_ptr<MediaSampleWriter::CallbackInterface> unassignedWp;
+ EXPECT_FALSE(writer->init(mTestMuxer, unassignedWp));
+
+ std::shared_ptr<MediaSampleWriter::CallbackInterface> unassignedSp;
+ EXPECT_FALSE(writer->init(mTestMuxer, unassignedSp));
+
+ const TestMediaSource& mediaSource = getMediaSource();
+ EXPECT_EQ(writer->addTrack(mediaSource.mTrackFormats[0]), nullptr);
+ ASSERT_FALSE(writer->start());
+}
+
+TEST_F(MediaSampleWriterTests, TestProgressUpdate) {
+ const TestMediaSource& mediaSource = getMediaSource();
+
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(mTestMuxer, mTestCallbacks));
+
+ std::shared_ptr<AMediaFormat> videoFormat =
+ std::shared_ptr<AMediaFormat>(AMediaFormat_new(), &AMediaFormat_delete);
+ AMediaFormat_copy(videoFormat.get(),
+ mediaSource.mTrackFormats[mediaSource.mVideoTrackIndex].get());
+
+ AMediaFormat_setInt64(videoFormat.get(), AMEDIAFORMAT_KEY_DURATION, 100);
+ auto sampleConsumer = writer->addTrack(videoFormat);
+ EXPECT_NE(sampleConsumer, nullptr);
+ ASSERT_TRUE(writer->start());
+
+ for (int64_t pts = 0; pts < 100; ++pts) {
+ sampleConsumer(newSampleWithPts(pts));
+ }
+ sampleConsumer(newSampleEos());
+ mTestCallbacks->waitForWritingFinished();
+
+ EXPECT_EQ(mTestCallbacks->getProgressUpdateCount(), 100);
+}
+
+TEST_F(MediaSampleWriterTests, TestInterleaving) {
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(mTestMuxer, mTestCallbacks));
+
+ // Use two tracks for this test.
+ static constexpr int kNumTracks = 2;
+ MediaSampleWriter::MediaSampleConsumerFunction sampleConsumers[kNumTracks];
+ std::vector<std::pair<std::shared_ptr<MediaSample>, size_t>> addedSamples;
+ const TestMediaSource& mediaSource = getMediaSource();
+
+ for (int trackIdx = 0; trackIdx < kNumTracks; ++trackIdx) {
+ auto trackFormat = mediaSource.mTrackFormats[trackIdx % mediaSource.mTrackCount];
+ sampleConsumers[trackIdx] = writer->addTrack(trackFormat);
+ EXPECT_NE(sampleConsumers[trackIdx], nullptr);
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::AddTrack(trackFormat.get()));
+ }
+
+ // Create samples in the expected interleaved order for easy verification.
+ auto addSampleToTrackWithPts = [&addedSamples, &sampleConsumers](int trackIndex, int64_t pts) {
+ auto sample = newSampleWithPts(pts);
+ sampleConsumers[trackIndex](sample);
+ addedSamples.emplace_back(sample, trackIndex);
+ };
+
+ addSampleToTrackWithPts(0, 0);
+ addSampleToTrackWithPts(1, 4);
+
+ addSampleToTrackWithPts(0, 1);
+ addSampleToTrackWithPts(0, 2);
+ addSampleToTrackWithPts(0, 3);
+ addSampleToTrackWithPts(0, 10);
+
+ addSampleToTrackWithPts(1, 5);
+ addSampleToTrackWithPts(1, 6);
+ addSampleToTrackWithPts(1, 11);
+
+ addSampleToTrackWithPts(0, 12);
+ addSampleToTrackWithPts(1, 13);
+
+ for (int trackIndex = 0; trackIndex < kNumTracks; ++trackIndex) {
+ sampleConsumers[trackIndex](newSampleEos());
+ }
+
+ // Start the writer.
+ ASSERT_TRUE(writer->start());
+
+ // Wait for writer to complete.
+ mTestCallbacks->waitForWritingFinished();
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::Start());
+
+ std::sort(addedSamples.begin(), addedSamples.end(),
+ [](const std::pair<std::shared_ptr<MediaSample>, size_t>& left,
+ const std::pair<std::shared_ptr<MediaSample>, size_t>& right) {
+ return left.first->info.presentationTimeUs < right.first->info.presentationTimeUs;
+ });
+
+ // Verify sample order.
+ for (auto entry : addedSamples) {
+ auto sample = entry.first;
+ auto trackIndex = entry.second;
+
+ const TestMuxer::Event& event = mTestMuxer->popEvent();
+ EXPECT_EQ(event.type, TestMuxer::Event::WriteSample);
+ EXPECT_EQ(event.trackIndex, trackIndex);
+ EXPECT_EQ(event.data, sample->buffer);
+ EXPECT_EQ(event.info.offset, sample->dataOffset);
+ EXPECT_EQ(event.info.size, sample->info.size);
+ EXPECT_EQ(event.info.presentationTimeUs, sample->info.presentationTimeUs);
+ EXPECT_EQ(event.info.flags, sample->info.flags);
+ }
+
+ // Verify EOS samples.
+ for (int trackIndex = 0; trackIndex < kNumTracks; ++trackIndex) {
+ auto trackFormat = mediaSource.mTrackFormats[trackIndex % mediaSource.mTrackCount];
+ int64_t duration = 0;
+ AMediaFormat_getInt64(trackFormat.get(), AMEDIAFORMAT_KEY_DURATION, &duration);
+
+ // EOS timestamp = first sample timestamp + duration.
+ const int64_t endTime = duration + (trackIndex == 1 ? 4 : 0);
+ const AMediaCodecBufferInfo info = {0, 0, endTime, AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM};
+
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::WriteSample(trackIndex, nullptr, &info));
+ }
+
+ EXPECT_EQ(mTestMuxer->popEvent(), TestMuxer::Stop());
+ EXPECT_TRUE(writer->stop());
+ EXPECT_TRUE(mTestCallbacks->hasFinished());
+}
+
+// Convenience function for reading a sample from an AMediaExtractor represented as a MediaSample.
+static std::shared_ptr<MediaSample> readSampleAndAdvance(AMediaExtractor* extractor,
+ size_t* trackIndexOut) {
+ int trackIndex = AMediaExtractor_getSampleTrackIndex(extractor);
+ if (trackIndex < 0) {
+ return nullptr;
+ }
+
+ if (trackIndexOut != nullptr) {
+ *trackIndexOut = trackIndex;
+ }
+
+ ssize_t sampleSize = AMediaExtractor_getSampleSize(extractor);
+ int64_t sampleTimeUs = AMediaExtractor_getSampleTime(extractor);
+ uint32_t flags = AMediaExtractor_getSampleFlags(extractor);
+
+ size_t bufferSize = static_cast<size_t>(sampleSize);
+ uint8_t* buffer = new uint8_t[bufferSize];
+
+ ssize_t dataRead = AMediaExtractor_readSampleData(extractor, buffer, bufferSize);
+ EXPECT_EQ(dataRead, sampleSize);
+
+ auto sample = MediaSample::createWithReleaseCallback(
+ buffer, 0 /* offset */, 0 /* id */, [buffer](MediaSample*) { delete[] buffer; });
+ sample->info.size = bufferSize;
+ sample->info.presentationTimeUs = sampleTimeUs;
+ sample->info.flags = flags;
+
+ (void)AMediaExtractor_advance(extractor);
+ return sample;
+}
+
+TEST_F(MediaSampleWriterTests, TestDefaultMuxer) {
+ // Write samples straight from an extractor and validate output file.
+ static const char* destinationPath =
+ "/data/local/tmp/MediaSampleWriterTests_TestDefaultMuxer_output.MP4";
+ const int destinationFd =
+ open(destinationPath, O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH);
+ ASSERT_GT(destinationFd, 0);
+
+ // Initialize writer.
+ std::shared_ptr<MediaSampleWriter> writer = MediaSampleWriter::Create();
+ EXPECT_TRUE(writer->init(destinationFd, mTestCallbacks));
+ close(destinationFd);
+
+ // Add tracks.
+ const TestMediaSource& mediaSource = getMediaSource();
+ std::vector<MediaSampleWriter::MediaSampleConsumerFunction> sampleConsumers;
+
+ for (size_t trackIndex = 0; trackIndex < mediaSource.mTrackCount; trackIndex++) {
+ auto consumer = writer->addTrack(mediaSource.mTrackFormats[trackIndex]);
+ sampleConsumers.push_back(consumer);
+ }
+
+ // Start the writer.
+ ASSERT_TRUE(writer->start());
+
+ // Enqueue samples and finally End Of Stream.
+ std::shared_ptr<MediaSample> sample;
+ size_t trackIndex;
+ while ((sample = readSampleAndAdvance(mediaSource.mExtractor, &trackIndex)) != nullptr) {
+ sampleConsumers[trackIndex](sample);
+ }
+ for (trackIndex = 0; trackIndex < mediaSource.mTrackCount; trackIndex++) {
+ sampleConsumers[trackIndex](newSampleEos());
+ }
+
+ // Wait for writer.
+ mTestCallbacks->waitForWritingFinished();
+ EXPECT_TRUE(writer->stop());
+
+ // Compare output file with source.
+ mediaSource.reset();
+
+ AMediaExtractor* extractor = AMediaExtractor_new();
+ ASSERT_NE(extractor, nullptr);
+
+ int sourceFd = open(destinationPath, O_RDONLY);
+ ASSERT_GT(sourceFd, 0);
+
+ off_t fileSize = lseek(sourceFd, 0, SEEK_END);
+ lseek(sourceFd, 0, SEEK_SET);
+
+ media_status_t status = AMediaExtractor_setDataSourceFd(extractor, sourceFd, 0, fileSize);
+ ASSERT_EQ(status, AMEDIA_OK);
+ close(sourceFd);
+
+ size_t trackCount = AMediaExtractor_getTrackCount(extractor);
+ EXPECT_EQ(trackCount, mediaSource.mTrackCount);
+
+ for (size_t trackIndex = 0; trackIndex < trackCount; trackIndex++) {
+ AMediaFormat* trackFormat = AMediaExtractor_getTrackFormat(extractor, trackIndex);
+ ASSERT_NE(trackFormat, nullptr);
+
+ AMediaExtractor_selectTrack(extractor, trackIndex);
+ }
+
+ // Compare samples.
+ std::shared_ptr<MediaSample> sample1 = readSampleAndAdvance(mediaSource.mExtractor, nullptr);
+ std::shared_ptr<MediaSample> sample2 = readSampleAndAdvance(extractor, nullptr);
+
+ while (sample1 != nullptr && sample2 != nullptr) {
+ EXPECT_EQ(sample1->info.presentationTimeUs, sample2->info.presentationTimeUs);
+ EXPECT_EQ(sample1->info.size, sample2->info.size);
+ EXPECT_EQ(sample1->info.flags, sample2->info.flags);
+
+ EXPECT_EQ(memcmp(sample1->buffer, sample2->buffer, sample1->info.size), 0);
+
+ sample1 = readSampleAndAdvance(mediaSource.mExtractor, nullptr);
+ sample2 = readSampleAndAdvance(extractor, nullptr);
+ }
+ EXPECT_EQ(sample1, nullptr);
+ EXPECT_EQ(sample2, nullptr);
+
+ AMediaExtractor_delete(extractor);
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
new file mode 100644
index 0000000..83f0a4a
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/MediaTrackTranscoderTests.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTrackTranscoder
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaTrackTranscoderTests"
+
+#include <android-base/logging.h>
+#include <android/binder_process.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/MediaTrackTranscoder.h>
+#include <media/PassthroughTrackTranscoder.h>
+#include <media/VideoTrackTranscoder.h>
+
+#include "TrackTranscoderTestUtils.h"
+
+namespace android {
+
+/** TrackTranscoder types to test. */
+enum TrackTranscoderType {
+ VIDEO,
+ PASSTHROUGH,
+};
+
+class MediaTrackTranscoderTests : public ::testing::TestWithParam<TrackTranscoderType> {
+public:
+ MediaTrackTranscoderTests() { LOG(DEBUG) << "MediaTrackTranscoderTests created"; }
+
+ void SetUp() override {
+ LOG(DEBUG) << "MediaTrackTranscoderTests set up";
+
+ // Need to start a thread pool to prevent AMediaExtractor binder calls from starving
+ // (b/155663561).
+ ABinderProcess_startThreadPool();
+
+ mCallback = std::make_shared<TestCallback>();
+
+ switch (GetParam()) {
+ case VIDEO:
+ mTranscoder = VideoTrackTranscoder::create(mCallback);
+ break;
+ case PASSTHROUGH:
+ mTranscoder = std::make_shared<PassthroughTrackTranscoder>(mCallback);
+ break;
+ }
+ ASSERT_NE(mTranscoder, nullptr);
+
+ initSampleReader();
+ }
+
+ void initSampleReader() {
+ const char* sourcePath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+
+ const int sourceFd = open(sourcePath, O_RDONLY);
+ ASSERT_GT(sourceFd, 0);
+
+ const size_t fileSize = lseek(sourceFd, 0, SEEK_END);
+ lseek(sourceFd, 0, SEEK_SET);
+
+ mMediaSampleReader = MediaSampleReaderNDK::createFromFd(sourceFd, 0 /* offset */, fileSize);
+ ASSERT_NE(mMediaSampleReader, nullptr);
+ close(sourceFd);
+
+ for (size_t trackIndex = 0; trackIndex < mMediaSampleReader->getTrackCount();
+ ++trackIndex) {
+ AMediaFormat* trackFormat = mMediaSampleReader->getTrackFormat(trackIndex);
+ ASSERT_NE(trackFormat, nullptr);
+
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+ ASSERT_NE(mime, nullptr);
+
+ if (GetParam() == VIDEO && strncmp(mime, "video/", 6) == 0) {
+ mTrackIndex = trackIndex;
+
+ mSourceFormat = std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete);
+ ASSERT_NE(mSourceFormat, nullptr);
+
+ mDestinationFormat =
+ TrackTranscoderTestUtils::getDefaultVideoDestinationFormat(trackFormat);
+ ASSERT_NE(mDestinationFormat, nullptr);
+ break;
+ } else if (GetParam() == PASSTHROUGH && strncmp(mime, "audio/", 6) == 0) {
+ // TODO(lnilsson): Test metadata track passthrough after hkuang@ provides sample.
+ mTrackIndex = trackIndex;
+
+ mSourceFormat = std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete);
+ ASSERT_NE(mSourceFormat, nullptr);
+ break;
+ }
+
+ AMediaFormat_delete(trackFormat);
+ }
+
+ ASSERT_NE(mSourceFormat, nullptr);
+ EXPECT_EQ(mMediaSampleReader->selectTrack(mTrackIndex), AMEDIA_OK);
+ }
+
+ // Drains the transcoder's output queue in a loop.
+ void drainOutputSamples(int numSamplesToSave = 0) {
+ mTranscoder->setSampleConsumer(
+ [this, numSamplesToSave](const std::shared_ptr<MediaSample>& sample) {
+ ASSERT_NE(sample, nullptr);
+
+ mGotEndOfStream = (sample->info.flags & SAMPLE_FLAG_END_OF_STREAM) != 0;
+
+ if (mSavedSamples.size() < numSamplesToSave) {
+ mSavedSamples.push_back(sample);
+ }
+
+ if (mSavedSamples.size() == numSamplesToSave || mGotEndOfStream) {
+ mSamplesSavedSemaphore.signal();
+ }
+ });
+ }
+
+ void TearDown() override { LOG(DEBUG) << "MediaTrackTranscoderTests tear down"; }
+
+ ~MediaTrackTranscoderTests() { LOG(DEBUG) << "MediaTrackTranscoderTests destroyed"; }
+
+protected:
+ std::shared_ptr<MediaTrackTranscoder> mTranscoder;
+ std::shared_ptr<TestCallback> mCallback;
+
+ std::shared_ptr<MediaSampleReader> mMediaSampleReader;
+ int mTrackIndex;
+
+ std::shared_ptr<AMediaFormat> mSourceFormat;
+ std::shared_ptr<AMediaFormat> mDestinationFormat;
+
+ std::vector<std::shared_ptr<MediaSample>> mSavedSamples;
+ OneShotSemaphore mSamplesSavedSemaphore;
+ bool mGotEndOfStream = false;
+};
+
+TEST_P(MediaTrackTranscoderTests, WaitNormalOperation) {
+ LOG(DEBUG) << "Testing WaitNormalOperation";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(mTranscoder->start());
+ drainOutputSamples();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_TRUE(mGotEndOfStream);
+}
+
+TEST_P(MediaTrackTranscoderTests, StopNormalOperation) {
+ LOG(DEBUG) << "Testing StopNormalOperation";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->start());
+ EXPECT_TRUE(mTranscoder->stop());
+}
+
+TEST_P(MediaTrackTranscoderTests, StartWithoutConfigure) {
+ LOG(DEBUG) << "Testing StartWithoutConfigure";
+ EXPECT_FALSE(mTranscoder->start());
+}
+
+TEST_P(MediaTrackTranscoderTests, StopWithoutStart) {
+ LOG(DEBUG) << "Testing StopWithoutStart";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ EXPECT_FALSE(mTranscoder->stop());
+}
+
+TEST_P(MediaTrackTranscoderTests, DoubleStartStop) {
+ LOG(DEBUG) << "Testing DoubleStartStop";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->start());
+ EXPECT_FALSE(mTranscoder->start());
+ EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_FALSE(mTranscoder->stop());
+}
+
+TEST_P(MediaTrackTranscoderTests, DoubleConfigure) {
+ LOG(DEBUG) << "Testing DoubleConfigure";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_ERROR_UNSUPPORTED);
+}
+
+TEST_P(MediaTrackTranscoderTests, ConfigureAfterFail) {
+ LOG(DEBUG) << "Testing ConfigureAfterFail";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, -1, mDestinationFormat),
+ AMEDIA_ERROR_INVALID_PARAMETER);
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+}
+
+TEST_P(MediaTrackTranscoderTests, RestartAfterStop) {
+ LOG(DEBUG) << "Testing RestartAfterStop";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->start());
+ EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_FALSE(mTranscoder->start());
+}
+
+TEST_P(MediaTrackTranscoderTests, RestartAfterFinish) {
+ LOG(DEBUG) << "Testing RestartAfterFinish";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(mTranscoder->start());
+ drainOutputSamples();
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_FALSE(mTranscoder->start());
+ EXPECT_TRUE(mGotEndOfStream);
+}
+
+TEST_P(MediaTrackTranscoderTests, HoldSampleAfterTranscoderRelease) {
+ LOG(DEBUG) << "Testing HoldSampleAfterTranscoderRelease";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(mTranscoder->start());
+ drainOutputSamples(1 /* numSamplesToSave */);
+ EXPECT_EQ(mCallback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(mTranscoder->stop());
+ EXPECT_TRUE(mGotEndOfStream);
+
+ mTranscoder.reset();
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(20));
+ mSavedSamples.clear();
+}
+
+TEST_P(MediaTrackTranscoderTests, HoldSampleAfterTranscoderStop) {
+ LOG(DEBUG) << "Testing HoldSampleAfterTranscoderStop";
+ EXPECT_EQ(mTranscoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(mTranscoder->start());
+ drainOutputSamples(1 /* numSamplesToSave */);
+ mSamplesSavedSemaphore.wait();
+ EXPECT_TRUE(mTranscoder->stop());
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(20));
+ mSavedSamples.clear();
+}
+
+TEST_P(MediaTrackTranscoderTests, NullSampleReader) {
+ LOG(DEBUG) << "Testing NullSampleReader";
+ std::shared_ptr<MediaSampleReader> nullSampleReader;
+ EXPECT_NE(mTranscoder->configure(nullSampleReader, mTrackIndex, mDestinationFormat), AMEDIA_OK);
+ ASSERT_FALSE(mTranscoder->start());
+}
+
+TEST_P(MediaTrackTranscoderTests, InvalidTrackIndex) {
+ LOG(DEBUG) << "Testing InvalidTrackIndex";
+ EXPECT_NE(mTranscoder->configure(mMediaSampleReader, -1, mDestinationFormat), AMEDIA_OK);
+ EXPECT_NE(mTranscoder->configure(mMediaSampleReader, mMediaSampleReader->getTrackCount(),
+ mDestinationFormat),
+ AMEDIA_OK);
+}
+
+}; // namespace android
+
+using namespace android;
+
+INSTANTIATE_TEST_SUITE_P(MediaTrackTranscoderTestsAll, MediaTrackTranscoderTests,
+ ::testing::Values(VIDEO, PASSTHROUGH));
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
new file mode 100644
index 0000000..7a968eb
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/MediaTranscoderTests.cpp
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscoder
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscoderTests"
+
+#include <android-base/logging.h>
+#include <android/binder_process.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/MediaTranscoder.h>
+#include <media/NdkCommon.h>
+
+namespace android {
+
+#define DEFINE_FORMAT_VALUE_EQUAL_FUNC(_type, _typeName) \
+ static bool equal##_typeName(const char* key, AMediaFormat* src, AMediaFormat* dst) { \
+ _type srcVal, dstVal; \
+ bool srcPresent = AMediaFormat_get##_typeName(src, key, &srcVal); \
+ bool dstPresent = AMediaFormat_get##_typeName(dst, key, &dstVal); \
+ return (srcPresent == dstPresent) && (!srcPresent || (srcVal == dstVal)); \
+ }
+
+DEFINE_FORMAT_VALUE_EQUAL_FUNC(int64_t, Int64);
+DEFINE_FORMAT_VALUE_EQUAL_FUNC(int32_t, Int32);
+
+struct FormatVerifierEntry {
+ const char* key;
+ std::function<bool(const char*, AMediaFormat*, AMediaFormat*)> equal;
+};
+
+static const FormatVerifierEntry kFieldsToPreserve[] = {
+ {AMEDIAFORMAT_KEY_DURATION, equalInt64}, {AMEDIAFORMAT_KEY_WIDTH, equalInt32},
+ {AMEDIAFORMAT_KEY_HEIGHT, equalInt32}, {AMEDIAFORMAT_KEY_FRAME_RATE, equalInt32},
+ {AMEDIAFORMAT_KEY_FRAME_COUNT, equalInt32}, {AMEDIAFORMAT_KEY_DISPLAY_WIDTH, equalInt32},
+ {AMEDIAFORMAT_KEY_DISPLAY_HEIGHT, equalInt32}, {AMEDIAFORMAT_KEY_SAR_WIDTH, equalInt32},
+ {AMEDIAFORMAT_KEY_SAR_HEIGHT, equalInt32}, {AMEDIAFORMAT_KEY_ROTATION, equalInt32},
+};
+
+class TestCallbacks : public MediaTranscoder::CallbackInterface {
+public:
+ virtual void onFinished(const MediaTranscoder* transcoder __unused) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ EXPECT_FALSE(mFinished);
+ mFinished = true;
+ mCondition.notify_all();
+ }
+
+ virtual void onError(const MediaTranscoder* transcoder __unused,
+ media_status_t error) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ EXPECT_NE(error, AMEDIA_OK);
+ EXPECT_FALSE(mFinished);
+ mFinished = true;
+ mStatus = error;
+ mCondition.notify_all();
+ }
+
+ virtual void onProgressUpdate(const MediaTranscoder* transcoder __unused,
+ int32_t progress) override {
+ std::unique_lock<std::mutex> lock(mMutex);
+ if (progress > 0 && !mProgressMade) {
+ mProgressMade = true;
+ mCondition.notify_all();
+ }
+ }
+
+ virtual void onCodecResourceLost(const MediaTranscoder* transcoder __unused,
+ const std::shared_ptr<const Parcel>& pausedState
+ __unused) override {}
+
+ void waitForTranscodingFinished() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mFinished) {
+ mCondition.wait(lock);
+ }
+ }
+
+ void waitForProgressMade() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mProgressMade && !mFinished) {
+ mCondition.wait(lock);
+ }
+ }
+ media_status_t mStatus = AMEDIA_OK;
+
+private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mFinished = false;
+ bool mProgressMade = false;
+};
+
+// Write-only, create file if non-existent, don't overwrite existing file.
+static constexpr int kOpenFlags = O_WRONLY | O_CREAT | O_EXCL;
+// User R+W permission.
+static constexpr int kFileMode = S_IRUSR | S_IWUSR;
+
+class MediaTranscoderTests : public ::testing::Test {
+public:
+ MediaTranscoderTests() { LOG(DEBUG) << "MediaTranscoderTests created"; }
+ ~MediaTranscoderTests() { LOG(DEBUG) << "MediaTranscoderTests destroyed"; }
+
+ void SetUp() override {
+ LOG(DEBUG) << "MediaTranscoderTests set up";
+ mCallbacks = std::make_shared<TestCallbacks>();
+ ABinderProcess_startThreadPool();
+ }
+
+ void TearDown() override {
+ LOG(DEBUG) << "MediaTranscoderTests tear down";
+ mCallbacks.reset();
+ }
+
+ void deleteFile(const char* path) { unlink(path); }
+
+ float getFileSizeDiffPercent(const char* path1, const char* path2, bool absolute = false) {
+ struct stat s1, s2;
+ EXPECT_EQ(stat(path1, &s1), 0);
+ EXPECT_EQ(stat(path2, &s2), 0);
+
+ int64_t diff = s2.st_size - s1.st_size;
+ if (absolute && diff < 0) diff = -diff;
+
+ return (float)diff * 100.0f / s1.st_size;
+ }
+
+ typedef enum {
+ kRunToCompletion,
+ kCancelAfterProgress,
+ kCancelAfterStart,
+ } TranscodeExecutionControl;
+
+ using FormatConfigurationCallback = std::function<AMediaFormat*(AMediaFormat*)>;
+ media_status_t transcodeHelper(const char* srcPath, const char* destPath,
+ FormatConfigurationCallback formatCallback,
+ TranscodeExecutionControl executionControl = kRunToCompletion) {
+ auto transcoder = MediaTranscoder::create(mCallbacks, nullptr);
+ EXPECT_NE(transcoder, nullptr);
+
+ const int srcFd = open(srcPath, O_RDONLY);
+ EXPECT_EQ(transcoder->configureSource(srcFd), AMEDIA_OK);
+
+ std::vector<std::shared_ptr<AMediaFormat>> trackFormats = transcoder->getTrackFormats();
+ EXPECT_GT(trackFormats.size(), 0);
+
+ for (int i = 0; i < trackFormats.size(); ++i) {
+ AMediaFormat* format = formatCallback(trackFormats[i].get());
+ EXPECT_EQ(transcoder->configureTrackFormat(i, format), AMEDIA_OK);
+
+ // Save original video track format for verification.
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormats[i].get(), AMEDIAFORMAT_KEY_MIME, &mime);
+ if (strncmp(mime, "video/", 6) == 0) {
+ mSourceVideoFormat = trackFormats[i];
+ }
+
+ if (format != nullptr) {
+ AMediaFormat_delete(format);
+ }
+ }
+ deleteFile(destPath);
+ const int dstFd = open(destPath, kOpenFlags, kFileMode);
+ EXPECT_EQ(transcoder->configureDestination(dstFd), AMEDIA_OK);
+
+ media_status_t startStatus = transcoder->start();
+ EXPECT_EQ(startStatus, AMEDIA_OK);
+ if (startStatus == AMEDIA_OK) {
+ switch (executionControl) {
+ case kCancelAfterProgress:
+ mCallbacks->waitForProgressMade();
+ FALLTHROUGH_INTENDED;
+ case kCancelAfterStart:
+ transcoder->cancel();
+ break;
+ case kRunToCompletion:
+ default:
+ mCallbacks->waitForTranscodingFinished();
+ break;
+ }
+ }
+ close(srcFd);
+ close(dstFd);
+
+ return mCallbacks->mStatus;
+ }
+
+ void testTranscodeVideo(const char* srcPath, const char* destPath, const char* dstMime,
+ int32_t bitrate = 0) {
+ EXPECT_EQ(transcodeHelper(srcPath, destPath,
+ [dstMime, bitrate](AMediaFormat* sourceFormat) {
+ AMediaFormat* format = nullptr;
+ const char* mime = nullptr;
+ AMediaFormat_getString(sourceFormat, AMEDIAFORMAT_KEY_MIME,
+ &mime);
+
+ if (strncmp(mime, "video/", 6) == 0 &&
+ (bitrate > 0 || dstMime != nullptr)) {
+ format = AMediaFormat_new();
+
+ if (bitrate > 0) {
+ AMediaFormat_setInt32(
+ format, AMEDIAFORMAT_KEY_BIT_RATE, bitrate);
+ }
+
+ if (dstMime != nullptr) {
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME,
+ dstMime);
+ }
+ }
+ return format;
+ }),
+ AMEDIA_OK);
+
+ if (dstMime != nullptr) {
+ std::vector<FormatVerifierEntry> extraVerifiers = {
+ {AMEDIAFORMAT_KEY_MIME,
+ [dstMime](const char* key, AMediaFormat* src __unused, AMediaFormat* dst) {
+ const char* mime = nullptr;
+ AMediaFormat_getString(dst, key, &mime);
+ return !strcmp(mime, dstMime);
+ }},
+ };
+ verifyOutputFormat(destPath, &extraVerifiers);
+ } else {
+ verifyOutputFormat(destPath);
+ }
+ }
+
+ void verifyOutputFormat(const char* destPath,
+ const std::vector<FormatVerifierEntry>* extraVerifiers = nullptr) {
+ int dstFd = open(destPath, O_RDONLY);
+ EXPECT_GT(dstFd, 0);
+ ssize_t fileSize = lseek(dstFd, 0, SEEK_END);
+ lseek(dstFd, 0, SEEK_SET);
+
+ std::shared_ptr<MediaSampleReader> sampleReader =
+ MediaSampleReaderNDK::createFromFd(dstFd, 0, fileSize);
+ ASSERT_NE(sampleReader, nullptr);
+
+ std::shared_ptr<AMediaFormat> videoFormat;
+ const size_t trackCount = sampleReader->getTrackCount();
+ for (size_t trackIndex = 0; trackIndex < trackCount; ++trackIndex) {
+ AMediaFormat* trackFormat = sampleReader->getTrackFormat(static_cast<int>(trackIndex));
+ if (trackFormat != nullptr) {
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if (strncmp(mime, "video/", 6) == 0) {
+ LOG(INFO) << "Track # " << trackIndex << ": "
+ << AMediaFormat_toString(trackFormat);
+ videoFormat = std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete);
+ break;
+ }
+ }
+ }
+
+ EXPECT_NE(videoFormat, nullptr);
+
+ LOG(INFO) << "source video format: " << AMediaFormat_toString(mSourceVideoFormat.get());
+ LOG(INFO) << "transcoded video format: " << AMediaFormat_toString(videoFormat.get());
+
+ for (int i = 0; i < (sizeof(kFieldsToPreserve) / sizeof(kFieldsToPreserve[0])); ++i) {
+ EXPECT_TRUE(kFieldsToPreserve[i].equal(kFieldsToPreserve[i].key,
+ mSourceVideoFormat.get(), videoFormat.get()))
+ << "Failed at key " << kFieldsToPreserve[i].key;
+ }
+
+ if (extraVerifiers != nullptr) {
+ for (int i = 0; i < extraVerifiers->size(); ++i) {
+ const FormatVerifierEntry& entry = (*extraVerifiers)[i];
+ EXPECT_TRUE(entry.equal(entry.key, mSourceVideoFormat.get(), videoFormat.get()));
+ }
+ }
+
+ close(dstFd);
+ }
+
+ std::shared_ptr<TestCallbacks> mCallbacks;
+ std::shared_ptr<AMediaFormat> mSourceVideoFormat;
+};
+
+TEST_F(MediaTranscoderTests, TestPassthrough) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_Passthrough.MP4";
+ testTranscodeVideo(srcPath, destPath, nullptr);
+}
+
+TEST_F(MediaTranscoderTests, TestVideoTranscode_AvcToAvc_Basic) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_VideoTranscode_AvcToAvc_Basic.MP4";
+ testTranscodeVideo(srcPath, destPath, AMEDIA_MIMETYPE_VIDEO_AVC);
+}
+
+TEST_F(MediaTranscoderTests, TestVideoTranscode_HevcToAvc_Basic) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/jets_hevc_1280x720_20Mbps.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_VideoTranscode_HevcToAvc_Basic.MP4";
+ testTranscodeVideo(srcPath, destPath, AMEDIA_MIMETYPE_VIDEO_AVC);
+}
+
+TEST_F(MediaTranscoderTests, TestVideoTranscode_HevcToAvc_Rotation) {
+ const char* srcPath =
+ "/data/local/tmp/TranscodingTestAssets/desk_hevc_1920x1080_aac_48KHz_rot90.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_VideoTranscode_HevcToAvc_Rotation.MP4";
+ testTranscodeVideo(srcPath, destPath, AMEDIA_MIMETYPE_VIDEO_AVC);
+}
+
+TEST_F(MediaTranscoderTests, TestPreserveBitrate) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_PreserveBitrate.MP4";
+ testTranscodeVideo(srcPath, destPath, AMEDIA_MIMETYPE_VIDEO_AVC);
+
+ // Require maximum of 10% difference in file size.
+ EXPECT_LT(getFileSizeDiffPercent(srcPath, destPath, true /* absolute*/), 10);
+}
+
+TEST_F(MediaTranscoderTests, TestCustomBitrate) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+ const char* destPath1 = "/data/local/tmp/MediaTranscoder_CustomBitrate_2Mbps.MP4";
+ const char* destPath2 = "/data/local/tmp/MediaTranscoder_CustomBitrate_8Mbps.MP4";
+ testTranscodeVideo(srcPath, destPath1, AMEDIA_MIMETYPE_VIDEO_AVC, 2 * 1000 * 1000);
+ mCallbacks = std::make_shared<TestCallbacks>();
+ testTranscodeVideo(srcPath, destPath2, AMEDIA_MIMETYPE_VIDEO_AVC, 8 * 1000 * 1000);
+
+ // The source asset is very short and heavily compressed from the beginning so don't expect the
+ // requested bitrate to be exactly matched. However 40% difference seems reasonable.
+ EXPECT_GT(getFileSizeDiffPercent(destPath1, destPath2), 40);
+}
+
+static AMediaFormat* getAVCVideoFormat(AMediaFormat* sourceFormat) {
+ AMediaFormat* format = nullptr;
+ const char* mime = nullptr;
+ AMediaFormat_getString(sourceFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if (strncmp(mime, "video/", 6) == 0) {
+ format = AMediaFormat_new();
+ AMediaFormat_setString(format, AMEDIAFORMAT_KEY_MIME, AMEDIA_MIMETYPE_VIDEO_AVC);
+ }
+
+ return format;
+}
+
+TEST_F(MediaTranscoderTests, TestCancelAfterProgress) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_Cancel.MP4";
+
+ for (int i = 0; i < 32; ++i) {
+ EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kCancelAfterProgress),
+ AMEDIA_OK);
+ mCallbacks = std::make_shared<TestCallbacks>();
+ }
+}
+
+TEST_F(MediaTranscoderTests, TestCancelAfterStart) {
+ const char* srcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+ const char* destPath = "/data/local/tmp/MediaTranscoder_Cancel.MP4";
+
+ for (int i = 0; i < 32; ++i) {
+ EXPECT_EQ(transcodeHelper(srcPath, destPath, getAVCVideoFormat, kCancelAfterStart),
+ AMEDIA_OK);
+ mCallbacks = std::make_shared<TestCallbacks>();
+ }
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp
new file mode 100644
index 0000000..9713e17
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/PassthroughTrackTranscoderTests.cpp
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for PassthroughTrackTranscoder
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "PassthroughTrackTranscoderTests"
+
+#include <android-base/logging.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/NdkMediaExtractor.h>
+#include <media/PassthroughTrackTranscoder.h>
+#include <openssl/md5.h>
+
+#include <vector>
+
+#include "TrackTranscoderTestUtils.h"
+
+namespace android {
+
+class PassthroughTrackTranscoderTests : public ::testing::Test {
+public:
+ PassthroughTrackTranscoderTests() { LOG(DEBUG) << "PassthroughTrackTranscoderTests created"; }
+
+ void SetUp() override { LOG(DEBUG) << "PassthroughTrackTranscoderTests set up"; }
+
+ void initSourceAndExtractor() {
+ const char* sourcePath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+
+ mExtractor = AMediaExtractor_new();
+ ASSERT_NE(mExtractor, nullptr);
+
+ mSourceFd = open(sourcePath, O_RDONLY);
+ ASSERT_GT(mSourceFd, 0);
+
+ mSourceFileSize = lseek(mSourceFd, 0, SEEK_END);
+ lseek(mSourceFd, 0, SEEK_SET);
+
+ media_status_t status =
+ AMediaExtractor_setDataSourceFd(mExtractor, mSourceFd, 0, mSourceFileSize);
+ ASSERT_EQ(status, AMEDIA_OK);
+
+ const size_t trackCount = AMediaExtractor_getTrackCount(mExtractor);
+ for (size_t trackIndex = 0; trackIndex < trackCount; trackIndex++) {
+ AMediaFormat* trackFormat = AMediaExtractor_getTrackFormat(mExtractor, trackIndex);
+ ASSERT_NE(trackFormat, nullptr);
+
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+ ASSERT_NE(mime, nullptr);
+
+ if (strncmp(mime, "audio/", 6) == 0) {
+ mTrackIndex = trackIndex;
+ AMediaExtractor_selectTrack(mExtractor, trackIndex);
+ break;
+ }
+
+ AMediaFormat_delete(trackFormat);
+ }
+ }
+
+ void TearDown() override {
+ LOG(DEBUG) << "PassthroughTrackTranscoderTests tear down";
+ if (mExtractor != nullptr) {
+ AMediaExtractor_delete(mExtractor);
+ mExtractor = nullptr;
+ }
+ if (mSourceFd > 0) {
+ close(mSourceFd);
+ mSourceFd = -1;
+ }
+ }
+
+ ~PassthroughTrackTranscoderTests() {
+ LOG(DEBUG) << "PassthroughTrackTranscoderTests destroyed";
+ }
+
+ int mSourceFd = -1;
+ size_t mSourceFileSize;
+ int mTrackIndex;
+ AMediaExtractor* mExtractor = nullptr;
+};
+
+/** Helper class for comparing sample data using checksums. */
+class SampleID {
+public:
+ SampleID(const uint8_t* sampleData, ssize_t sampleSize) : mSize{sampleSize} {
+ MD5_CTX md5Ctx;
+ MD5_Init(&md5Ctx);
+ MD5_Update(&md5Ctx, sampleData, sampleSize);
+ MD5_Final(mChecksum, &md5Ctx);
+ }
+
+ bool operator==(const SampleID& rhs) const {
+ return mSize == rhs.mSize && memcmp(mChecksum, rhs.mChecksum, MD5_DIGEST_LENGTH) == 0;
+ }
+
+ uint8_t mChecksum[MD5_DIGEST_LENGTH];
+ ssize_t mSize;
+};
+
+/**
+ * Tests that the output samples of PassthroughTrackTranscoder are identical to the source samples
+ * and in correct order.
+ */
+TEST_F(PassthroughTrackTranscoderTests, SampleEquality) {
+ LOG(DEBUG) << "Testing SampleEquality";
+
+ ssize_t bufferSize = 1024;
+ auto buffer = std::make_unique<uint8_t[]>(bufferSize);
+
+ initSourceAndExtractor();
+
+ // Loop through all samples of a track and store size and checksums.
+ std::vector<SampleID> sampleChecksums;
+
+ int64_t sampleTime = AMediaExtractor_getSampleTime(mExtractor);
+ while (sampleTime != -1) {
+ if (AMediaExtractor_getSampleTrackIndex(mExtractor) == mTrackIndex) {
+ ssize_t sampleSize = AMediaExtractor_getSampleSize(mExtractor);
+ if (bufferSize < sampleSize) {
+ bufferSize = sampleSize;
+ buffer = std::make_unique<uint8_t[]>(bufferSize);
+ }
+
+ ssize_t bytesRead =
+ AMediaExtractor_readSampleData(mExtractor, buffer.get(), bufferSize);
+ ASSERT_EQ(bytesRead, sampleSize);
+
+ SampleID sampleId{buffer.get(), sampleSize};
+ sampleChecksums.push_back(sampleId);
+ }
+
+ AMediaExtractor_advance(mExtractor);
+ sampleTime = AMediaExtractor_getSampleTime(mExtractor);
+ }
+
+ // Create and start the transcoder.
+ std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
+ PassthroughTrackTranscoder transcoder{callback};
+
+ std::shared_ptr<MediaSampleReader> mediaSampleReader =
+ MediaSampleReaderNDK::createFromFd(mSourceFd, 0, mSourceFileSize);
+ EXPECT_NE(mediaSampleReader, nullptr);
+
+ EXPECT_EQ(mediaSampleReader->selectTrack(mTrackIndex), AMEDIA_OK);
+ EXPECT_EQ(transcoder.configure(mediaSampleReader, mTrackIndex, nullptr /* destinationFormat */),
+ AMEDIA_OK);
+ ASSERT_TRUE(transcoder.start());
+
+ // Pull transcoder's output samples and compare against input checksums.
+ bool eos = false;
+ uint64_t sampleCount = 0;
+ transcoder.setSampleConsumer(
+ [&sampleCount, &sampleChecksums, &eos](const std::shared_ptr<MediaSample>& sample) {
+ ASSERT_NE(sample, nullptr);
+ EXPECT_FALSE(eos);
+
+ if (sample->info.flags & SAMPLE_FLAG_END_OF_STREAM) {
+ eos = true;
+ } else {
+ SampleID sampleId{sample->buffer, static_cast<ssize_t>(sample->info.size)};
+ EXPECT_TRUE(sampleId == sampleChecksums[sampleCount]);
+ ++sampleCount;
+ }
+ });
+
+ callback->waitUntilFinished();
+ EXPECT_EQ(sampleCount, sampleChecksums.size());
+ EXPECT_TRUE(transcoder.stop());
+}
+
+/** Class for testing PassthroughTrackTranscoder's built in buffer pool. */
+class BufferPoolTests : public ::testing::Test {
+public:
+ static constexpr int kMaxBuffers = 5;
+
+ void SetUp() override {
+ LOG(DEBUG) << "BufferPoolTests set up";
+ mBufferPool = std::make_shared<PassthroughTrackTranscoder::BufferPool>(kMaxBuffers);
+ }
+
+ void TearDown() override {
+ LOG(DEBUG) << "BufferPoolTests tear down";
+ mBufferPool.reset();
+ }
+
+ std::shared_ptr<PassthroughTrackTranscoder::BufferPool> mBufferPool;
+};
+
+TEST_F(BufferPoolTests, BufferReuse) {
+ LOG(DEBUG) << "Testing BufferReuse";
+
+ uint8_t* buffer1 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer1, nullptr);
+
+ uint8_t* buffer2 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer2, nullptr);
+ EXPECT_NE(buffer2, buffer1);
+
+ mBufferPool->returnBuffer(buffer1);
+
+ uint8_t* buffer3 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer3, nullptr);
+ EXPECT_NE(buffer3, buffer2);
+ EXPECT_EQ(buffer3, buffer1);
+
+ mBufferPool->returnBuffer(buffer2);
+
+ uint8_t* buffer4 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer4, nullptr);
+ EXPECT_NE(buffer4, buffer1);
+ EXPECT_EQ(buffer4, buffer2);
+}
+
+TEST_F(BufferPoolTests, SmallestAvailableBuffer) {
+ LOG(DEBUG) << "Testing SmallestAvailableBuffer";
+
+ uint8_t* buffer1 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer1, nullptr);
+
+ uint8_t* buffer2 = mBufferPool->getBufferWithSize(15);
+ EXPECT_NE(buffer2, nullptr);
+ EXPECT_NE(buffer2, buffer1);
+
+ uint8_t* buffer3 = mBufferPool->getBufferWithSize(20);
+ EXPECT_NE(buffer3, nullptr);
+ EXPECT_NE(buffer3, buffer1);
+ EXPECT_NE(buffer3, buffer2);
+
+ mBufferPool->returnBuffer(buffer1);
+ mBufferPool->returnBuffer(buffer2);
+ mBufferPool->returnBuffer(buffer3);
+
+ uint8_t* buffer4 = mBufferPool->getBufferWithSize(11);
+ EXPECT_NE(buffer4, nullptr);
+ EXPECT_EQ(buffer4, buffer2);
+
+ uint8_t* buffer5 = mBufferPool->getBufferWithSize(11);
+ EXPECT_NE(buffer5, nullptr);
+ EXPECT_EQ(buffer5, buffer3);
+}
+
+TEST_F(BufferPoolTests, AddAfterAbort) {
+ LOG(DEBUG) << "Testing AddAfterAbort";
+
+ uint8_t* buffer1 = mBufferPool->getBufferWithSize(10);
+ EXPECT_NE(buffer1, nullptr);
+ mBufferPool->returnBuffer(buffer1);
+
+ mBufferPool->abort();
+ uint8_t* buffer2 = mBufferPool->getBufferWithSize(10);
+ EXPECT_EQ(buffer2, nullptr);
+}
+
+TEST_F(BufferPoolTests, MaximumBuffers) {
+ LOG(DEBUG) << "Testing MaximumBuffers";
+
+ static constexpr size_t kBufferBaseSize = 10;
+ std::unordered_map<uint8_t*, size_t> addressSizeMap;
+
+ // Get kMaxBuffers * 2 new buffers with increasing size.
+ // (Note: Once kMaxBuffers have been allocated, the pool will delete old buffers to accommodate
+ // new ones making the deleted buffers free to be reused by the system's heap memory allocator.
+ // So we cannot test that each new pointer is unique here.)
+ for (int i = 0; i < kMaxBuffers * 2; i++) {
+ size_t size = kBufferBaseSize + i;
+ uint8_t* buffer = mBufferPool->getBufferWithSize(size);
+ EXPECT_NE(buffer, nullptr);
+ addressSizeMap[buffer] = size;
+ mBufferPool->returnBuffer(buffer);
+ }
+
+ // Verify that the pool now contains the kMaxBuffers largest buffers allocated above and that
+ // the buffer of matching size is returned.
+ for (int i = kMaxBuffers; i < kMaxBuffers * 2; i++) {
+ size_t size = kBufferBaseSize + i;
+ uint8_t* buffer = mBufferPool->getBufferWithSize(size);
+ EXPECT_NE(buffer, nullptr);
+
+ auto it = addressSizeMap.find(buffer);
+ ASSERT_NE(it, addressSizeMap.end());
+ EXPECT_EQ(it->second, size);
+ mBufferPool->returnBuffer(buffer);
+ }
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/README.md b/media/libmediatranscoding/transcoder/tests/README.md
new file mode 100644
index 0000000..59417b0
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/README.md
@@ -0,0 +1,14 @@
+## Transcoder Testing ##
+---
+#### Transcoder unit tests :
+To run all transcoder unit tests, run the supplied script from this folder:
+
+```
+./build_and_run_all_unit_tests.sh
+```
+
+To run individual unit test modules, use atest:
+
+```
+atest MediaSampleReaderNDK
+```
diff --git a/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h b/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h
new file mode 100644
index 0000000..8d05353
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/TrackTranscoderTestUtils.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/MediaTrackTranscoder.h>
+#include <media/MediaTrackTranscoderCallback.h>
+
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+
+namespace android {
+
+//
+// This file contains test utilities used by more than one track transcoder test.
+//
+
+class TrackTranscoderTestUtils {
+public:
+ static std::shared_ptr<AMediaFormat> getDefaultVideoDestinationFormat(
+ AMediaFormat* sourceFormat, bool includeBitrate = true) {
+ // Default video destination format setup.
+ static constexpr float kFrameRate = 30.0f;
+ static constexpr float kIFrameInterval = 30.0f;
+ static constexpr int32_t kBitRate = 2 * 1000 * 1000;
+ static constexpr int32_t kColorFormatSurface = 0x7f000789;
+
+ AMediaFormat* destinationFormat = AMediaFormat_new();
+ AMediaFormat_copy(destinationFormat, sourceFormat);
+ AMediaFormat_setFloat(destinationFormat, AMEDIAFORMAT_KEY_FRAME_RATE, kFrameRate);
+ AMediaFormat_setFloat(destinationFormat, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
+ kIFrameInterval);
+ if (includeBitrate) {
+ AMediaFormat_setInt32(destinationFormat, AMEDIAFORMAT_KEY_BIT_RATE, kBitRate);
+ }
+ AMediaFormat_setInt32(destinationFormat, AMEDIAFORMAT_KEY_COLOR_FORMAT,
+ kColorFormatSurface);
+
+ return std::shared_ptr<AMediaFormat>(destinationFormat, &AMediaFormat_delete);
+ }
+};
+
+class TestCallback : public MediaTrackTranscoderCallback {
+public:
+ TestCallback() = default;
+ ~TestCallback() = default;
+
+ // MediaTrackTranscoderCallback
+ void onTrackFormatAvailable(const MediaTrackTranscoder* transcoder __unused) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mTrackFormatAvailable = true;
+ mTrackFormatAvailableCondition.notify_all();
+ }
+
+ void onTrackFinished(const MediaTrackTranscoder* transcoder __unused) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mTranscodingFinished = true;
+ mTranscodingFinishedCondition.notify_all();
+ }
+
+ void onTrackError(const MediaTrackTranscoder* transcoder __unused, media_status_t status) {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mTranscodingFinished = true;
+ mStatus = status;
+ mTranscodingFinishedCondition.notify_all();
+ }
+ // ~MediaTrackTranscoderCallback
+
+ media_status_t waitUntilFinished() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mTranscodingFinished) {
+ mTranscodingFinishedCondition.wait(lock);
+ }
+ return mStatus;
+ }
+
+ void waitUntilTrackFormatAvailable() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mTrackFormatAvailable) {
+ mTrackFormatAvailableCondition.wait(lock);
+ }
+ }
+
+private:
+ media_status_t mStatus = AMEDIA_OK;
+ std::mutex mMutex;
+ std::condition_variable mTranscodingFinishedCondition;
+ std::condition_variable mTrackFormatAvailableCondition;
+ bool mTranscodingFinished = false;
+ bool mTrackFormatAvailable = false;
+};
+
+class OneShotSemaphore {
+public:
+ void wait() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ while (!mSignaled) {
+ mCondition.wait(lock);
+ }
+ }
+
+ void signal() {
+ std::unique_lock<std::mutex> lock(mMutex);
+ mSignaled = true;
+ mCondition.notify_all();
+ }
+
+private:
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ bool mSignaled = false;
+};
+
+}; // namespace android
diff --git a/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
new file mode 100644
index 0000000..1b5bd13
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/VideoTrackTranscoderTests.cpp
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for VideoTrackTranscoder
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "VideoTrackTranscoderTests"
+
+#include <android-base/logging.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <media/MediaSampleReaderNDK.h>
+#include <media/NdkCommon.h>
+#include <media/VideoTrackTranscoder.h>
+#include <utils/Timers.h>
+
+#include "TrackTranscoderTestUtils.h"
+
+namespace android {
+
+// TODO(b/155304421): Implement more advanced video specific tests:
+// - Codec conversions (HEVC -> AVC).
+// - Bitrate validation.
+// - Output frame validation through PSNR.
+
+class VideoTrackTranscoderTests : public ::testing::Test {
+public:
+ VideoTrackTranscoderTests() { LOG(DEBUG) << "VideoTrackTranscoderTests created"; }
+
+ void SetUp() override {
+ LOG(DEBUG) << "VideoTrackTranscoderTests set up";
+ const char* sourcePath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+
+ const int sourceFd = open(sourcePath, O_RDONLY);
+ ASSERT_GT(sourceFd, 0);
+
+ const off_t fileSize = lseek(sourceFd, 0, SEEK_END);
+ lseek(sourceFd, 0, SEEK_SET);
+
+ mMediaSampleReader = MediaSampleReaderNDK::createFromFd(sourceFd, 0, fileSize);
+ ASSERT_NE(mMediaSampleReader, nullptr);
+ close(sourceFd);
+
+ for (size_t trackIndex = 0; trackIndex < mMediaSampleReader->getTrackCount();
+ ++trackIndex) {
+ AMediaFormat* trackFormat = mMediaSampleReader->getTrackFormat(trackIndex);
+ ASSERT_NE(trackFormat, nullptr);
+
+ const char* mime = nullptr;
+ AMediaFormat_getString(trackFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+ ASSERT_NE(mime, nullptr);
+
+ if (strncmp(mime, "video/", 6) == 0) {
+ mTrackIndex = trackIndex;
+
+ mSourceFormat = std::shared_ptr<AMediaFormat>(trackFormat, &AMediaFormat_delete);
+ ASSERT_NE(mSourceFormat, nullptr);
+
+ mDestinationFormat =
+ TrackTranscoderTestUtils::getDefaultVideoDestinationFormat(trackFormat);
+ ASSERT_NE(mDestinationFormat, nullptr);
+ break;
+ }
+
+ AMediaFormat_delete(trackFormat);
+ }
+
+ ASSERT_NE(mSourceFormat, nullptr);
+ }
+
+ void TearDown() override { LOG(DEBUG) << "VideoTrackTranscoderTests tear down"; }
+
+ ~VideoTrackTranscoderTests() { LOG(DEBUG) << "VideoTrackTranscoderTests destroyed"; }
+
+ std::shared_ptr<MediaSampleReader> mMediaSampleReader;
+ int mTrackIndex;
+ std::shared_ptr<AMediaFormat> mSourceFormat;
+ std::shared_ptr<AMediaFormat> mDestinationFormat;
+};
+
+TEST_F(VideoTrackTranscoderTests, SampleSoundness) {
+ LOG(DEBUG) << "Testing SampleSoundness";
+ std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
+ auto transcoder = VideoTrackTranscoder::create(callback);
+
+ EXPECT_EQ(mMediaSampleReader->selectTrack(mTrackIndex), AMEDIA_OK);
+ EXPECT_EQ(transcoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(transcoder->start());
+
+ bool eos = false;
+ uint64_t sampleCount = 0;
+ transcoder->setSampleConsumer([&sampleCount, &eos](const std::shared_ptr<MediaSample>& sample) {
+ ASSERT_NE(sample, nullptr);
+ const uint32_t flags = sample->info.flags;
+
+ if (sampleCount == 0) {
+ // Expect first sample to be a codec config.
+ EXPECT_TRUE((flags & SAMPLE_FLAG_CODEC_CONFIG) != 0);
+ EXPECT_TRUE((flags & SAMPLE_FLAG_SYNC_SAMPLE) == 0);
+ EXPECT_TRUE((flags & SAMPLE_FLAG_END_OF_STREAM) == 0);
+ EXPECT_TRUE((flags & SAMPLE_FLAG_PARTIAL_FRAME) == 0);
+ } else if (sampleCount == 1) {
+ // Expect second sample to be a sync sample.
+ EXPECT_TRUE((flags & SAMPLE_FLAG_CODEC_CONFIG) == 0);
+ EXPECT_TRUE((flags & SAMPLE_FLAG_SYNC_SAMPLE) != 0);
+ EXPECT_TRUE((flags & SAMPLE_FLAG_END_OF_STREAM) == 0);
+ }
+
+ if (!(flags & SAMPLE_FLAG_END_OF_STREAM)) {
+ // Expect a valid buffer unless it is EOS.
+ EXPECT_NE(sample->buffer, nullptr);
+ EXPECT_NE(sample->bufferId, 0xBAADF00D);
+ EXPECT_GT(sample->info.size, 0);
+ } else {
+ EXPECT_FALSE(eos);
+ eos = true;
+ }
+
+ ++sampleCount;
+ });
+
+ EXPECT_EQ(callback->waitUntilFinished(), AMEDIA_OK);
+ EXPECT_TRUE(transcoder->stop());
+}
+
+TEST_F(VideoTrackTranscoderTests, PreserveBitrate) {
+ LOG(DEBUG) << "Testing PreserveBitrate";
+ std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
+ std::shared_ptr<MediaTrackTranscoder> transcoder = VideoTrackTranscoder::create(callback);
+
+ auto destFormat = TrackTranscoderTestUtils::getDefaultVideoDestinationFormat(
+ mSourceFormat.get(), false /* includeBitrate*/);
+ EXPECT_NE(destFormat, nullptr);
+
+ EXPECT_EQ(mMediaSampleReader->selectTrack(mTrackIndex), AMEDIA_OK);
+
+ int32_t srcBitrate;
+ EXPECT_EQ(mMediaSampleReader->getEstimatedBitrateForTrack(mTrackIndex, &srcBitrate), AMEDIA_OK);
+
+ ASSERT_EQ(transcoder->configure(mMediaSampleReader, mTrackIndex, destFormat), AMEDIA_OK);
+ ASSERT_TRUE(transcoder->start());
+
+ callback->waitUntilTrackFormatAvailable();
+
+ auto outputFormat = transcoder->getOutputFormat();
+ ASSERT_NE(outputFormat, nullptr);
+
+ ASSERT_TRUE(transcoder->stop());
+
+ int32_t outBitrate;
+ EXPECT_TRUE(AMediaFormat_getInt32(outputFormat.get(), AMEDIAFORMAT_KEY_BIT_RATE, &outBitrate));
+
+ EXPECT_EQ(srcBitrate, outBitrate);
+}
+
+// VideoTrackTranscoder needs a valid destination format.
+TEST_F(VideoTrackTranscoderTests, NullDestinationFormat) {
+ LOG(DEBUG) << "Testing NullDestinationFormat";
+ std::shared_ptr<TestCallback> callback = std::make_shared<TestCallback>();
+ std::shared_ptr<AMediaFormat> nullFormat;
+
+ auto transcoder = VideoTrackTranscoder::create(callback);
+ EXPECT_EQ(transcoder->configure(mMediaSampleReader, 0 /* trackIndex */, nullFormat),
+ AMEDIA_ERROR_INVALID_PARAMETER);
+}
+
+TEST_F(VideoTrackTranscoderTests, LingeringEncoder) {
+ OneShotSemaphore semaphore;
+ auto callback = std::make_shared<TestCallback>();
+ auto transcoder = VideoTrackTranscoder::create(callback);
+
+ EXPECT_EQ(mMediaSampleReader->selectTrack(mTrackIndex), AMEDIA_OK);
+ EXPECT_EQ(transcoder->configure(mMediaSampleReader, mTrackIndex, mDestinationFormat),
+ AMEDIA_OK);
+ ASSERT_TRUE(transcoder->start());
+
+ std::vector<std::shared_ptr<MediaSample>> samples;
+ transcoder->setSampleConsumer(
+ [&samples, &semaphore](const std::shared_ptr<MediaSample>& sample) {
+ if (samples.size() >= 4) return;
+
+ ASSERT_NE(sample, nullptr);
+ samples.push_back(sample);
+
+ if (samples.size() == 4 || sample->info.flags & SAMPLE_FLAG_END_OF_STREAM) {
+ semaphore.signal();
+ }
+ });
+
+ // Wait for the encoder to output samples before stopping and releasing the transcoder.
+ semaphore.wait();
+
+ EXPECT_TRUE(transcoder->stop());
+ transcoder.reset();
+
+ // Return buffers to the codec so that it can resume processing, but keep one buffer to avoid
+ // the codec being released.
+ samples.resize(1);
+
+ // Wait for async codec events.
+ std::this_thread::sleep_for(std::chrono::seconds(1));
+}
+
+} // namespace android
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh b/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..b848b4c
--- /dev/null
+++ b/media/libmediatranscoding/transcoder/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ "$SYNC_FINISHED" != true ]; then
+ if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+ fi
+
+ # ensure we have mm
+ . $ANDROID_BUILD_TOP/build/envsetup.sh
+
+ mm
+
+ echo "waiting for device"
+
+ adb root && adb wait-for-device remount && adb sync
+fi
+
+# Push the files onto the device.
+. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/push_assets.sh
+
+echo "========================================"
+
+echo "testing MediaSampleReaderNDK"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/MediaSampleReaderNDKTests/MediaSampleReaderNDKTests
+
+echo "testing MediaSampleQueue"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/MediaSampleQueueTests/MediaSampleQueueTests
+
+echo "testing MediaTrackTranscoder"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/MediaTrackTranscoderTests/MediaTrackTranscoderTests
+
+echo "testing VideoTrackTranscoder"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/VideoTrackTranscoderTests/VideoTrackTranscoderTests
+
+echo "testing PassthroughTrackTranscoder"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/PassthroughTrackTranscoderTests/PassthroughTrackTranscoderTests
+
+echo "testing MediaSampleWriter"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/MediaSampleWriterTests/MediaSampleWriterTests
+
+echo "testing MediaTranscoder"
+adb shell ASAN_OPTIONS=detect_container_overflow=0 /data/nativetest64/MediaTranscoderTests/MediaTranscoderTests
diff --git a/media/libshmem/Android.bp b/media/libshmem/Android.bp
new file mode 100644
index 0000000..fae98ed
--- /dev/null
+++ b/media/libshmem/Android.bp
@@ -0,0 +1,50 @@
+aidl_interface {
+ name: "shared-file-region-aidl",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ "aidl/android/media/SharedFileRegion.aidl",
+ ],
+}
+
+cc_library {
+ name: "libshmemcompat",
+ export_include_dirs: ["include"],
+ srcs: ["ShmemCompat.cpp"],
+ shared_libs: [
+ "libbinder",
+ "libshmemutil",
+ "libutils",
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+ export_shared_lib_headers: [
+ "libbinder",
+ "libutils",
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+}
+
+cc_library {
+ name: "libshmemutil",
+ export_include_dirs: ["include"],
+ srcs: ["ShmemUtil.cpp"],
+ shared_libs: [
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+ export_shared_lib_headers: [
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+}
+
+cc_test {
+ name: "shmemTest",
+ srcs: ["ShmemTest.cpp"],
+ shared_libs: [
+ "libbinder",
+ "libshmemcompat",
+ "libshmemutil",
+ "libutils",
+ "shared-file-region-aidl-unstable-cpp",
+ ],
+ test_suites: ["device-tests"],
+}
diff --git a/media/libshmem/OWNERS b/media/libshmem/OWNERS
new file mode 100644
index 0000000..29fa2f5
--- /dev/null
+++ b/media/libshmem/OWNERS
@@ -0,0 +1,3 @@
+ytai@google.com
+mnaganov@google.com
+elaurent@google.com
diff --git a/media/libshmem/README.md b/media/libshmem/README.md
new file mode 100644
index 0000000..c25fa7f
--- /dev/null
+++ b/media/libshmem/README.md
@@ -0,0 +1,6 @@
+# libshmem
+
+This library provides facilities for sharing memory across processes over (stable) AIDL. The main
+feature is the definition of the `android.media.SharedMemory` AIDL type, which represents a block of
+memory that can be shared between processes. In addition, a few utilities are provided to facilitate
+the use of shared memory and to integrate with legacy code that uses older facilities.
\ No newline at end of file
diff --git a/media/libshmem/ShmemCompat.cpp b/media/libshmem/ShmemCompat.cpp
new file mode 100644
index 0000000..5dd83f4
--- /dev/null
+++ b/media/libshmem/ShmemCompat.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "media/ShmemCompat.h"
+
+#include "binder/MemoryBase.h"
+#include "binder/MemoryHeapBase.h"
+#include "media/ShmemUtil.h"
+
+namespace android {
+namespace media {
+
+bool convertSharedFileRegionToIMemory(const SharedFileRegion& shmem,
+ sp<IMemory>* result) {
+ if (!validateSharedFileRegion(shmem)) {
+ return false;
+ }
+
+ if (shmem.fd.get() < 0) {
+ *result = nullptr;
+ return true;
+ }
+
+ // Heap offset and size must be page aligned.
+ const size_t pageSize = getpagesize();
+ const size_t pageMask = ~(pageSize - 1);
+
+ // OK if this wraps.
+ const uint64_t endOffset = static_cast<uint64_t>(shmem.offset) +
+ static_cast<uint64_t>(shmem.size);
+
+ // Round down to page boundary.
+ const uint64_t heapStartOffset = shmem.offset & pageMask;
+ // Round up to page boundary.
+ const uint64_t heapEndOffset = (endOffset + pageSize - 1) & pageMask;
+ const uint64_t heapSize = heapEndOffset - heapStartOffset;
+
+ if (heapStartOffset > std::numeric_limits<size_t>::max() ||
+ heapSize > std::numeric_limits<size_t>::max()) {
+ return false;
+ }
+
+ const sp<MemoryHeapBase> heap =
+ new MemoryHeapBase(shmem.fd.get(), heapSize, 0, heapStartOffset);
+ *result = sp<MemoryBase>::make(heap,
+ shmem.offset - heapStartOffset,
+ shmem.size);
+ return true;
+}
+
+bool convertIMemoryToSharedFileRegion(const sp<IMemory>& mem,
+ SharedFileRegion* result) {
+ *result = SharedFileRegion();
+ if (mem == nullptr) {
+ return true;
+ }
+
+ ssize_t offset;
+ size_t size;
+
+ sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+ if (heap != nullptr) {
+ // Make sure the offset and size do not overflow from int64 boundaries.
+ if (size > std::numeric_limits<int64_t>::max() ||
+ offset > std::numeric_limits<int64_t>::max() ||
+ heap->getOffset() > std::numeric_limits<int64_t>::max() ||
+ static_cast<uint64_t>(heap->getOffset()) +
+ static_cast<uint64_t>(offset)
+ > std::numeric_limits<int64_t>::max()) {
+ return false;
+ }
+
+ const int fd = fcntl(heap->getHeapID(), F_DUPFD_CLOEXEC, 0);
+ if (fd < 0) {
+ return false;
+ }
+ result->fd.reset(base::unique_fd(fd));
+ result->size = size;
+ result->offset = heap->getOffset() + offset;
+ }
+
+ return true;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libshmem/ShmemTest.cpp b/media/libshmem/ShmemTest.cpp
new file mode 100644
index 0000000..4f11b51
--- /dev/null
+++ b/media/libshmem/ShmemTest.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <gtest/gtest.h>
+
+#include "binder/MemoryBase.h"
+#include "binder/MemoryHeapBase.h"
+#include "media/ShmemCompat.h"
+#include "media/ShmemUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+// Creates a SharedFileRegion instance with a null FD.
+SharedFileRegion makeSharedFileRegion(int64_t offset, int64_t size) {
+ SharedFileRegion shmem;
+ shmem.offset = offset;
+ shmem.size = size;
+ return shmem;
+}
+
+sp<IMemory> makeIMemory(const std::vector<uint8_t>& content) {
+ constexpr size_t kOffset = 19;
+
+ sp<MemoryHeapBase> heap = new MemoryHeapBase(content.size());
+ sp<IMemory> result = sp<MemoryBase>::make(heap, kOffset, content.size());
+ memcpy(result->unsecurePointer(), content.data(), content.size());
+ return result;
+}
+
+TEST(ShmemTest, Validate) {
+ EXPECT_TRUE(validateSharedFileRegion(makeSharedFileRegion(0, 0)));
+ EXPECT_TRUE(validateSharedFileRegion(makeSharedFileRegion(1, 2)));
+ EXPECT_FALSE(validateSharedFileRegion(makeSharedFileRegion(-1, 2)));
+ EXPECT_FALSE(validateSharedFileRegion(makeSharedFileRegion(2, -1)));
+ EXPECT_TRUE(validateSharedFileRegion(makeSharedFileRegion(
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max())));
+}
+
+TEST(ShmemTest, Conversion) {
+ sp<IMemory> reconstructed;
+ {
+ SharedFileRegion shmem;
+ sp<IMemory> imem = makeIMemory({6, 5, 3});
+ ASSERT_TRUE(convertIMemoryToSharedFileRegion(imem, &shmem));
+ ASSERT_EQ(3, shmem.size);
+ ASSERT_GE(shmem.fd.get(), 0);
+ ASSERT_TRUE(convertSharedFileRegionToIMemory(shmem, &reconstructed));
+ }
+ ASSERT_EQ(3, reconstructed->size());
+ const uint8_t* p =
+ reinterpret_cast<const uint8_t*>(reconstructed->unsecurePointer());
+ EXPECT_EQ(6, p[0]);
+ EXPECT_EQ(5, p[1]);
+ EXPECT_EQ(3, p[2]);
+}
+
+TEST(ShmemTest, NullConversion) {
+ sp<IMemory> reconstructed;
+ {
+ SharedFileRegion shmem;
+ sp<IMemory> imem;
+ ASSERT_TRUE(convertIMemoryToSharedFileRegion(imem, &shmem));
+ ASSERT_EQ(0, shmem.size);
+ ASSERT_LT(shmem.fd.get(), 0);
+ ASSERT_TRUE(convertSharedFileRegionToIMemory(shmem, &reconstructed));
+ }
+ ASSERT_EQ(nullptr, reconstructed);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libshmem/ShmemUtil.cpp b/media/libshmem/ShmemUtil.cpp
new file mode 100644
index 0000000..a6d047f
--- /dev/null
+++ b/media/libshmem/ShmemUtil.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "media/ShmemUtil.h"
+
+namespace android {
+namespace media {
+
+bool validateSharedFileRegion(const SharedFileRegion& shmem) {
+ // Size and offset must be non-negative.
+ if (shmem.size < 0 || shmem.offset < 0) {
+ return false;
+ }
+
+ uint64_t size = shmem.size;
+ uint64_t offset = shmem.offset;
+
+ // Must not wrap.
+ if (offset > offset + size) {
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libshmem/aidl/android/media/SharedFileRegion.aidl b/media/libshmem/aidl/android/media/SharedFileRegion.aidl
new file mode 100644
index 0000000..c99ad95
--- /dev/null
+++ b/media/libshmem/aidl/android/media/SharedFileRegion.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * A shared file region.
+ *
+ * This type contains the required information to share a region of a file between processes over
+ * AIDL. An invalid (null) region may be represented using a negative file descriptor.
+ * Primarily, this is intended for shared memory blocks.
+ *
+ * @hide
+ */
+parcelable SharedFileRegion {
+ /** File descriptor of the region. */
+ ParcelFileDescriptor fd;
+ /** Offset, in bytes within the file of the start of the region. Must be non-negative. */
+ long offset;
+ /** Size, in bytes of the memory region. Must be non-negative. */
+ long size;
+}
diff --git a/media/libshmem/include/media/ShmemCompat.h b/media/libshmem/include/media/ShmemCompat.h
new file mode 100644
index 0000000..3bf7f67
--- /dev/null
+++ b/media/libshmem/include/media/ShmemCompat.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+// This module contains utilities for interfacing between legacy code that is using IMemory and new
+// code that is using android.os.SharedFileRegion.
+
+#include "android/media/SharedFileRegion.h"
+#include "binder/IMemory.h"
+#include "utils/StrongPointer.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a SharedFileRegion parcelable to an IMemory instance.
+ * @param shmem The SharedFileRegion instance.
+ * @param result The resulting IMemory instance, or null of the SharedFileRegion is null (has a
+ * negative FD).
+ * @return true if the conversion is successful (should always succeed under normal circumstances,
+ * failure usually means corrupt data).
+ */
+bool convertSharedFileRegionToIMemory(const SharedFileRegion& shmem,
+ sp<IMemory>* result);
+
+/**
+ * Converts an IMemory instance to SharedFileRegion.
+ * @param mem The IMemory instance. May be null.
+ * @param result The resulting SharedFileRegion instance.
+ * @return true if the conversion is successful (should always succeed under normal circumstances,
+ * failure usually means corrupt data).
+ */
+bool convertIMemoryToSharedFileRegion(const sp<IMemory>& mem,
+ SharedFileRegion* result);
+
+} // namespace media
+} // namespace android
diff --git a/media/libshmem/include/media/ShmemUtil.h b/media/libshmem/include/media/ShmemUtil.h
new file mode 100644
index 0000000..563cb71
--- /dev/null
+++ b/media/libshmem/include/media/ShmemUtil.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+// This module contains utilities for working with android.os.SharedFileRegion.
+
+#include "android/media/SharedFileRegion.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Checks whether a SharedFileRegion instance is valid (all the fields have sane values).
+ * A null SharedFileRegion (having a negative FD) is considered valid.
+ */
+bool validateSharedFileRegion(const SharedFileRegion& shmem);
+
+} // namespace media
+} // namespace android
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d677744..b9f9173 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -564,6 +564,7 @@
mShutdownInProgress(false),
mExplicitShutdown(false),
mIsLegacyVP9Decoder(false),
+ mIsStreamCorruptFree(false),
mIsLowLatency(false),
mEncoderDelay(0),
mEncoderPadding(0),
@@ -2339,6 +2340,12 @@
mChannelMaskPresent = false;
}
+ int32_t isCorruptFree = 0;
+ if (msg->findInt32("corrupt-free", &isCorruptFree)) {
+ mIsStreamCorruptFree = isCorruptFree == 1 ? true : false;
+ ALOGV("corrupt-free=[%d]", mIsStreamCorruptFree);
+ }
+
int32_t maxInputSize;
if (msg->findInt32("max-input-size", &maxInputSize)) {
err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
@@ -4164,6 +4171,29 @@
ALOGI("setupVideoEncoder succeeded");
}
+ // Video should be encoded as stand straight because RTP protocol
+ // can provide rotation information only if CVO is supported.
+ // This needs to be added to support non-CVO case for video streaming scenario.
+ int32_t rotation = 0;
+ if (msg->findInt32("rotation-degrees", &rotation)) {
+ OMX_CONFIG_ROTATIONTYPE config;
+ InitOMXParams(&config);
+ config.nPortIndex = kPortIndexOutput;
+ status_t err = mOMXNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigCommonRotate, &config, sizeof(config));
+ if (err != OK) {
+ ALOGW("Failed to getConfig of OMX_IndexConfigCommonRotate(err %d)", err);
+ }
+ config.nRotation = rotation;
+ err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigCommonRotate, &config, sizeof(config));
+
+ ALOGD("Applying encoder-rotation=[%d] to video encoder.", config.nRotation);
+ if (err != OK) {
+ ALOGW("Failed to setConfig of OMX_IndexConfigCommonRotate(err %d)", err);
+ }
+ }
+
return err;
}
@@ -6027,6 +6057,12 @@
return false;
}
+ if (mCodec->mIsStreamCorruptFree && data1 == (OMX_U32)OMX_ErrorStreamCorrupt) {
+ ALOGV("[%s] handle OMX_ErrorStreamCorrupt as a normal operation",
+ mCodec->mComponentName.c_str());
+ return true;
+ }
+
ALOGE("[%s] ERROR(0x%08x)", mCodec->mComponentName.c_str(), data1);
// verify OMX component sends back an error we expect.
@@ -6134,6 +6170,13 @@
return;
}
+ int32_t cvo;
+ if (mCodec->mNativeWindow != NULL && buffer != NULL &&
+ buffer->meta()->findInt32("cvo", &cvo)) {
+ ALOGV("cvo(%d) found in buffer #%u", cvo, bufferID);
+ setNativeWindowRotation(mCodec->mNativeWindow.get(), cvo);
+ }
+
info->mStatus = BufferInfo::OWNED_BY_US;
info->mData = buffer;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index c180edf..16977d7 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -211,7 +211,7 @@
],
static_libs: [
- "librenderengine",
+ "librenderfright",
],
export_include_dirs: [
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 9b3f420..bcf418a 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -46,88 +46,6 @@
static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
-struct CameraSourceListener : public CameraListener {
- explicit CameraSourceListener(const sp<CameraSource> &source);
-
- virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2);
- virtual void postData(int32_t msgType, const sp<IMemory> &dataPtr,
- camera_frame_metadata_t *metadata);
-
- virtual void postDataTimestamp(
- nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
-
- virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
-
- virtual void postRecordingFrameHandleTimestampBatch(
- const std::vector<nsecs_t>& timestamps,
- const std::vector<native_handle_t*>& handles);
-
-protected:
- virtual ~CameraSourceListener();
-
-private:
- wp<CameraSource> mSource;
-
- CameraSourceListener(const CameraSourceListener &);
- CameraSourceListener &operator=(const CameraSourceListener &);
-};
-
-CameraSourceListener::CameraSourceListener(const sp<CameraSource> &source)
- : mSource(source) {
-}
-
-CameraSourceListener::~CameraSourceListener() {
-}
-
-void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
- UNUSED_UNLESS_VERBOSE(msgType);
- UNUSED_UNLESS_VERBOSE(ext1);
- UNUSED_UNLESS_VERBOSE(ext2);
- ALOGV("notify(%d, %d, %d)", msgType, ext1, ext2);
-}
-
-void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
- camera_frame_metadata_t * /* metadata */) {
- ALOGV("postData(%d, ptr:%p, size:%zu)",
- msgType, dataPtr->unsecurePointer(), dataPtr->size());
-
- sp<CameraSource> source = mSource.promote();
- if (source.get() != NULL) {
- source->dataCallback(msgType, dataPtr);
- }
-}
-
-void CameraSourceListener::postDataTimestamp(
- nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
-
- sp<CameraSource> source = mSource.promote();
- if (source.get() != NULL) {
- source->dataCallbackTimestamp(timestamp/1000, msgType, dataPtr);
- }
-}
-
-void CameraSourceListener::postRecordingFrameHandleTimestamp(nsecs_t timestamp,
- native_handle_t* handle) {
- sp<CameraSource> source = mSource.promote();
- if (source.get() != nullptr) {
- source->recordingFrameHandleCallbackTimestamp(timestamp/1000, handle);
- }
-}
-
-void CameraSourceListener::postRecordingFrameHandleTimestampBatch(
- const std::vector<nsecs_t>& timestamps,
- const std::vector<native_handle_t*>& handles) {
- sp<CameraSource> source = mSource.promote();
- if (source.get() != nullptr) {
- int n = timestamps.size();
- std::vector<nsecs_t> modifiedTimestamps(n);
- for (int i = 0; i < n; i++) {
- modifiedTimestamps[i] = timestamps[i] / 1000;
- }
- source->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
- }
-}
-
static int32_t getColorFormat(const char* colorFormat) {
if (!colorFormat) {
ALOGE("Invalid color format");
@@ -169,16 +87,6 @@
return -1;
}
-CameraSource *CameraSource::Create(const String16 &clientName) {
- Size size;
- size.width = -1;
- size.height = -1;
-
- sp<hardware::ICamera> camera;
- return new CameraSource(camera, NULL, 0, clientName, Camera::USE_CALLING_UID,
- Camera::USE_CALLING_PID, size, -1, NULL, false);
-}
-
// static
CameraSource *CameraSource::CreateFromCamera(
const sp<hardware::ICamera>& camera,
@@ -189,12 +97,10 @@
pid_t clientPid,
Size videoSize,
int32_t frameRate,
- const sp<IGraphicBufferProducer>& surface,
- bool storeMetaDataInVideoBuffers) {
+ const sp<IGraphicBufferProducer>& surface) {
CameraSource *source = new CameraSource(camera, proxy, cameraId,
- clientName, clientUid, clientPid, videoSize, frameRate, surface,
- storeMetaDataInVideoBuffers);
+ clientName, clientUid, clientPid, videoSize, frameRate, surface);
return source;
}
@@ -207,8 +113,7 @@
pid_t clientPid,
Size videoSize,
int32_t frameRate,
- const sp<IGraphicBufferProducer>& surface,
- bool storeMetaDataInVideoBuffers)
+ const sp<IGraphicBufferProducer>& surface)
: mCameraFlags(0),
mNumInputBuffers(0),
mVideoFrameRate(-1),
@@ -231,8 +136,7 @@
mInitCheck = init(camera, proxy, cameraId,
clientName, clientUid, clientPid,
- videoSize, frameRate,
- storeMetaDataInVideoBuffers);
+ videoSize, frameRate);
if (mInitCheck != OK) releaseCamera();
}
@@ -531,15 +435,13 @@
uid_t clientUid,
pid_t clientPid,
Size videoSize,
- int32_t frameRate,
- bool storeMetaDataInVideoBuffers) {
+ int32_t frameRate) {
ALOGV("init");
status_t err = OK;
int64_t token = IPCThreadState::self()->clearCallingIdentity();
err = initWithCameraAccess(camera, proxy, cameraId, clientName, clientUid, clientPid,
- videoSize, frameRate,
- storeMetaDataInVideoBuffers);
+ videoSize, frameRate);
IPCThreadState::self()->restoreCallingIdentity(token);
return err;
}
@@ -626,8 +528,7 @@
uid_t clientUid,
pid_t clientPid,
Size videoSize,
- int32_t frameRate,
- bool storeMetaDataInVideoBuffers) {
+ int32_t frameRate) {
ALOGV("initWithCameraAccess");
status_t err = OK;
@@ -667,24 +568,12 @@
CHECK_EQ((status_t)OK, mCamera->setPreviewTarget(mSurface));
}
- // By default, store real data in video buffers.
- mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
- if (storeMetaDataInVideoBuffers) {
- if (OK == mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE)) {
- mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE;
- } else if (OK == mCamera->setVideoBufferMode(
- hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA)) {
- mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA;
- }
- }
-
- if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
- err = mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV);
- if (err != OK) {
- ALOGE("%s: Setting video buffer mode to VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV failed: "
- "%s (err=%d)", __FUNCTION__, strerror(-err), err);
- return err;
- }
+ // Use buffer queue to receive video buffers from camera
+ err = mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
+ if (err != OK) {
+ ALOGE("%s: Setting video buffer mode to VIDEO_BUFFER_MODE_BUFFER_QUEUE failed: "
+ "%s (err=%d)", __FUNCTION__, strerror(-err), err);
+ return err;
}
int64_t glitchDurationUs = (1000000LL / mVideoFrameRate);
@@ -724,54 +613,26 @@
int64_t token = IPCThreadState::self()->clearCallingIdentity();
status_t err;
- if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
- // Initialize buffer queue.
- err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
- (android_dataspace_t)mEncoderDataSpace,
- mNumInputBuffers > 0 ? mNumInputBuffers : 1);
- if (err != OK) {
- ALOGE("%s: Failed to initialize buffer queue: %s (err=%d)", __FUNCTION__,
- strerror(-err), err);
- return err;
- }
- } else {
- if (mNumInputBuffers > 0) {
- err = mCamera->sendCommand(
- CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
-
- // This could happen for CameraHAL1 clients; thus the failure is
- // not a fatal error
- if (err != OK) {
- ALOGW("Failed to set video buffer count to %d due to %d",
- mNumInputBuffers, err);
- }
- }
-
- err = mCamera->sendCommand(
- CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
-
- // This could happen for CameraHAL1 clients; thus the failure is
- // not a fatal error
- if (err != OK) {
- ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
- mEncoderFormat, mEncoderDataSpace, err);
- }
-
- // Create memory heap to store buffers as VideoNativeMetadata.
- createVideoBufferMemoryHeap(sizeof(VideoNativeHandleMetadata), kDefaultVideoBufferCount);
+ // Initialize buffer queue.
+ err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
+ (android_dataspace_t)mEncoderDataSpace,
+ mNumInputBuffers > 0 ? mNumInputBuffers : 1);
+ if (err != OK) {
+ ALOGE("%s: Failed to initialize buffer queue: %s (err=%d)", __FUNCTION__,
+ strerror(-err), err);
+ return err;
}
+ // Start data flow
err = OK;
if (mCameraFlags & FLAGS_HOT_CAMERA) {
mCamera->unlock();
mCamera.clear();
- if ((err = mCameraRecordingProxy->startRecording(
- new ProxyListener(this))) != OK) {
+ if ((err = mCameraRecordingProxy->startRecording()) != OK) {
ALOGE("Failed to start recording, received error: %s (%d)",
strerror(-err), err);
}
} else {
- mCamera->setListener(new CameraSourceListener(this));
mCamera->startRecording();
if (!mCamera->recordingEnabled()) {
err = -EINVAL;
@@ -836,7 +697,6 @@
}
} else {
if (mCamera != 0) {
- mCamera->setListener(NULL);
mCamera->stopRecording();
}
}
@@ -935,97 +795,31 @@
void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
ALOGV("releaseRecordingFrame");
- if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
- // Return the buffer to buffer queue in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
- if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
- ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)", __FUNCTION__,
- heap->getHeapID(), mMemoryHeapBase->getHeapID());
- return;
- }
-
- VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
- (uint8_t*)heap->getBase() + offset);
-
- // Find the corresponding buffer item for the native window buffer.
- ssize_t index = mReceivedBufferItemMap.indexOfKey(payload->pBuffer);
- if (index == NAME_NOT_FOUND) {
- ALOGE("%s: Couldn't find buffer item for %p", __FUNCTION__, payload->pBuffer);
- return;
- }
-
- BufferItem buffer = mReceivedBufferItemMap.valueAt(index);
- mReceivedBufferItemMap.removeItemsAt(index);
- mVideoBufferConsumer->releaseBuffer(buffer);
- mMemoryBases.push_back(frame);
- mMemoryBaseAvailableCond.signal();
- } else {
- native_handle_t* handle = nullptr;
-
- // Check if frame contains a VideoNativeHandleMetadata.
- if (frame->size() == sizeof(VideoNativeHandleMetadata)) {
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(frame->unsecurePointer());
- if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
- handle = metadata->pHandle;
- }
- }
-
- if (handle != nullptr) {
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
- if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
- ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)",
- __FUNCTION__, heap->getHeapID(), mMemoryHeapBase->getHeapID());
- return;
- }
- uint32_t batchSize = 0;
- {
- Mutex::Autolock autoLock(mBatchLock);
- if (mInflightBatchSizes.size() > 0) {
- batchSize = mInflightBatchSizes[0];
- }
- }
- if (batchSize == 0) { // return buffers one by one
- // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
- releaseRecordingFrameHandle(handle);
- mMemoryBases.push_back(frame);
- mMemoryBaseAvailableCond.signal();
- } else { // Group buffers in batch then return
- Mutex::Autolock autoLock(mBatchLock);
- mInflightReturnedHandles.push_back(handle);
- mInflightReturnedMemorys.push_back(frame);
- if (mInflightReturnedHandles.size() == batchSize) {
- releaseRecordingFrameHandleBatch(mInflightReturnedHandles);
-
- mInflightBatchSizes.pop_front();
- mInflightReturnedHandles.clear();
- for (const auto& mem : mInflightReturnedMemorys) {
- mMemoryBases.push_back(mem);
- mMemoryBaseAvailableCond.signal();
- }
- mInflightReturnedMemorys.clear();
- }
- }
-
- } else if (mCameraRecordingProxy != nullptr) {
- // mCamera is created by application. Return the frame back to camera via camera
- // recording proxy.
- mCameraRecordingProxy->releaseRecordingFrame(frame);
- } else if (mCamera != nullptr) {
- // mCamera is created by CameraSource. Return the frame directly back to camera.
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->releaseRecordingFrame(frame);
- IPCThreadState::self()->restoreCallingIdentity(token);
- }
+ // Return the buffer to buffer queue in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
+ if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
+ ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)", __FUNCTION__,
+ heap->getHeapID(), mMemoryHeapBase->getHeapID());
+ return;
}
+
+ VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
+ (uint8_t*)heap->getBase() + offset);
+
+ // Find the corresponding buffer item for the native window buffer.
+ ssize_t index = mReceivedBufferItemMap.indexOfKey(payload->pBuffer);
+ if (index == NAME_NOT_FOUND) {
+ ALOGE("%s: Couldn't find buffer item for %p", __FUNCTION__, payload->pBuffer);
+ return;
+ }
+
+ BufferItem buffer = mReceivedBufferItemMap.valueAt(index);
+ mReceivedBufferItemMap.removeItemsAt(index);
+ mVideoBufferConsumer->releaseBuffer(buffer);
+ mMemoryBases.push_back(frame);
+ mMemoryBaseAvailableCond.signal();
}
void CameraSource::releaseQueuedFrames() {
@@ -1181,152 +975,6 @@
return false;
}
-void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
- int32_t msgType __unused, const sp<IMemory> &data) {
- ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
- Mutex::Autolock autoLock(mLock);
-
- if (shouldSkipFrameLocked(timestampUs)) {
- releaseOneRecordingFrame(data);
- return;
- }
-
- ++mNumFramesReceived;
-
- CHECK(data != NULL && data->size() > 0);
- mFramesReceived.push_back(data);
- int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
- mFrameTimes.push_back(timeUs);
- ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
- mStartTimeUs, timeUs);
- mFrameAvailableCondition.signal();
-}
-
-void CameraSource::releaseRecordingFrameHandle(native_handle_t* handle) {
- if (mCameraRecordingProxy != nullptr) {
- mCameraRecordingProxy->releaseRecordingFrameHandle(handle);
- } else if (mCamera != nullptr) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->releaseRecordingFrameHandle(handle);
- IPCThreadState::self()->restoreCallingIdentity(token);
- } else {
- native_handle_close(handle);
- native_handle_delete(handle);
- }
-}
-
-void CameraSource::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
- if (mCameraRecordingProxy != nullptr) {
- mCameraRecordingProxy->releaseRecordingFrameHandleBatch(handles);
- } else if (mCamera != nullptr) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- mCamera->releaseRecordingFrameHandleBatch(handles);
- IPCThreadState::self()->restoreCallingIdentity(token);
- } else {
- for (auto& handle : handles) {
- native_handle_close(handle);
- native_handle_delete(handle);
- }
- }
-}
-
-void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
- native_handle_t* handle) {
- ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
- Mutex::Autolock autoLock(mLock);
- if (handle == nullptr) return;
-
- if (shouldSkipFrameLocked(timestampUs)) {
- releaseRecordingFrameHandle(handle);
- return;
- }
-
- while (mMemoryBases.empty()) {
- if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
- TIMED_OUT) {
- ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
- releaseRecordingFrameHandle(handle);
- return;
- }
- }
-
- ++mNumFramesReceived;
-
- sp<IMemory> data = *mMemoryBases.begin();
- mMemoryBases.erase(mMemoryBases.begin());
-
- // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
- metadata->eType = kMetadataBufferTypeNativeHandleSource;
- metadata->pHandle = handle;
-
- mFramesReceived.push_back(data);
- int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
- mFrameTimes.push_back(timeUs);
- ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
- mFrameAvailableCondition.signal();
-}
-
-void CameraSource::recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles) {
- size_t n = timestampsUs.size();
- if (n != handles.size()) {
- ALOGE("%s: timestampsUs(%zu) and handles(%zu) size mismatch!",
- __FUNCTION__, timestampsUs.size(), handles.size());
- }
-
- Mutex::Autolock autoLock(mLock);
- int batchSize = 0;
- for (size_t i = 0; i < n; i++) {
- int64_t timestampUs = timestampsUs[i];
- native_handle_t* handle = handles[i];
-
- ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
- if (handle == nullptr) continue;
-
- if (shouldSkipFrameLocked(timestampUs)) {
- releaseRecordingFrameHandle(handle);
- continue;
- }
-
- while (mMemoryBases.empty()) {
- if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
- TIMED_OUT) {
- ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
- releaseRecordingFrameHandle(handle);
- continue;
- }
- }
- ++batchSize;
- ++mNumFramesReceived;
- sp<IMemory> data = *mMemoryBases.begin();
- mMemoryBases.erase(mMemoryBases.begin());
-
- // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
- metadata->eType = kMetadataBufferTypeNativeHandleSource;
- metadata->pHandle = handle;
-
- mFramesReceived.push_back(data);
- int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
- mFrameTimes.push_back(timeUs);
- ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
-
- }
- if (batchSize > 0) {
- Mutex::Autolock autoLock(mBatchLock);
- mInflightBatchSizes.push_back(batchSize);
- }
- for (int i = 0; i < batchSize; i++) {
- mFrameAvailableCondition.signal();
- }
-}
-
CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
const sp<CameraSource>& cameraSource) {
mConsumer = consumer;
@@ -1417,41 +1065,7 @@
MetadataBufferType CameraSource::metaDataStoredInVideoBuffers() const {
ALOGV("metaDataStoredInVideoBuffers");
- // Output buffers will contain metadata if camera sends us buffer in metadata mode or via
- // buffer queue.
- switch (mVideoBufferMode) {
- case hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA:
- return kMetadataBufferTypeNativeHandleSource;
- case hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE:
- return kMetadataBufferTypeANWBuffer;
- default:
- return kMetadataBufferTypeInvalid;
- }
-}
-
-CameraSource::ProxyListener::ProxyListener(const sp<CameraSource>& source) {
- mSource = source;
-}
-
-void CameraSource::ProxyListener::dataCallbackTimestamp(
- nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
- mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr);
-}
-
-void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
- native_handle_t* handle) {
- mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
-}
-
-void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles) {
- int n = timestampsUs.size();
- std::vector<nsecs_t> modifiedTimestamps(n);
- for (int i = 0; i < n; i++) {
- modifiedTimestamps[i] = timestampsUs[i] / 1000;
- }
- mSource->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+ return kMetadataBufferTypeANWBuffer;
}
void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index e0a6eb3..50a512f 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -45,15 +45,13 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs,
- bool storeMetaDataInVideoBuffers) {
+ int64_t timeBetweenFrameCaptureUs) {
CameraSourceTimeLapse *source = new
CameraSourceTimeLapse(camera, proxy, cameraId,
clientName, clientUid, clientPid,
videoSize, videoFrameRate, surface,
- timeBetweenFrameCaptureUs,
- storeMetaDataInVideoBuffers);
+ timeBetweenFrameCaptureUs);
if (source != NULL) {
if (source->initCheck() != OK) {
@@ -74,11 +72,9 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenFrameCaptureUs,
- bool storeMetaDataInVideoBuffers)
+ int64_t timeBetweenFrameCaptureUs)
: CameraSource(camera, proxy, cameraId, clientName, clientUid, clientPid,
- videoSize, videoFrameRate, surface,
- storeMetaDataInVideoBuffers),
+ videoSize, videoFrameRate, surface),
mTimeBetweenTimeLapseVideoFramesUs(1E6/videoFrameRate),
mLastTimeLapseFrameRealTimestampUs(0),
mSkipCurrentFrame(false) {
@@ -173,12 +169,6 @@
ALOGV("signalBufferReturned");
Mutex::Autolock autoLock(mQuickStopLock);
if (mQuickStop && (buffer == mLastReadBufferCopy)) {
- if (metaDataStoredInVideoBuffers() == kMetadataBufferTypeNativeHandleSource) {
- native_handle_t* handle = (
- (VideoNativeHandleMetadata*)(mLastReadBufferCopy->data()))->pHandle;
- native_handle_close(handle);
- native_handle_delete(handle);
- }
buffer->setObserver(NULL);
buffer->release();
mLastReadBufferCopy = NULL;
@@ -191,8 +181,7 @@
void createMediaBufferCopy(
const MediaBufferBase& sourceBuffer,
int64_t frameTime,
- MediaBufferBase **newBuffer,
- int32_t videoBufferMode) {
+ MediaBufferBase **newBuffer) {
ALOGV("createMediaBufferCopy");
size_t sourceSize = sourceBuffer.size();
@@ -203,19 +192,13 @@
(*newBuffer)->meta_data().setInt64(kKeyTime, frameTime);
- if (videoBufferMode == kMetadataBufferTypeNativeHandleSource) {
- ((VideoNativeHandleMetadata*)((*newBuffer)->data()))->pHandle =
- native_handle_clone(
- ((VideoNativeHandleMetadata*)(sourceBuffer.data()))->pHandle);
- }
}
void CameraSourceTimeLapse::fillLastReadBufferCopy(MediaBufferBase& sourceBuffer) {
ALOGV("fillLastReadBufferCopy");
int64_t frameTime;
CHECK(sourceBuffer.meta_data().findInt64(kKeyTime, &frameTime));
- createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy,
- metaDataStoredInVideoBuffers());
+ createMediaBufferCopy(sourceBuffer, frameTime, &mLastReadBufferCopy);
mLastReadBufferCopy->add_ref();
mLastReadBufferCopy->setObserver(this);
}
@@ -240,19 +223,6 @@
}
}
-sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy(
- const sp<IMemory> &source_data) {
-
- ALOGV("createIMemoryCopy");
- size_t source_size = source_data->size();
- void* source_pointer = source_data->unsecurePointer();
-
- sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size);
- sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size);
- memcpy(newMemory->unsecurePointer(), source_pointer, source_size);
- return newMemory;
-}
-
bool CameraSourceTimeLapse::skipCurrentFrame(int64_t /* timestampUs */) {
ALOGV("skipCurrentFrame");
if (mSkipCurrentFrame) {
@@ -318,31 +288,6 @@
return false;
}
-void CameraSourceTimeLapse::dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
- const sp<IMemory> &data) {
- ALOGV("dataCallbackTimestamp");
- mSkipCurrentFrame = skipFrameAndModifyTimeStamp(×tampUs);
- CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
-}
-
-void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
- native_handle_t* handle) {
- ALOGV("recordingFrameHandleCallbackTimestamp");
- mSkipCurrentFrame = skipFrameAndModifyTimeStamp(×tampUs);
- CameraSource::recordingFrameHandleCallbackTimestamp(timestampUs, handle);
-}
-
-void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles) {
- ALOGV("recordingFrameHandleCallbackTimestampBatch");
- int n = timestampsUs.size();
- for (int i = 0; i < n; i++) {
- // Don't do batching for CameraSourceTimeLapse for now
- recordingFrameHandleCallbackTimestamp(timestampsUs[i], handles[i]);
- }
-}
-
void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
ALOGV("processBufferQueueFrame");
int64_t timestampUs = buffer.mTimestamp / 1000;
diff --git a/media/libstagefright/FrameCaptureProcessor.cpp b/media/libstagefright/FrameCaptureProcessor.cpp
index 63238bc..8cd7f82 100644
--- a/media/libstagefright/FrameCaptureProcessor.cpp
+++ b/media/libstagefright/FrameCaptureProcessor.cpp
@@ -164,16 +164,15 @@
if (err != OK) {
ALOGE("drawLayers returned err %d", err);
- return err;
+ } else {
+ err = fence->wait(500);
+ if (err != OK) {
+ ALOGW("wait for fence returned err %d", err);
+ err = OK;
+ }
}
-
- err = fence->wait(500);
- if (err != OK) {
- ALOGW("wait for fence returned err %d", err);
- }
-
mRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
- return OK;
+ return err;
}
void FrameCaptureProcessor::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 734f5bb..965b6dd 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -19,6 +19,7 @@
#include "include/FrameDecoder.h"
#include "include/FrameCaptureLayer.h"
+#include "include/HevcUtils.h"
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/Surface.h>
@@ -456,7 +457,8 @@
const sp<IMediaSource> &source)
: FrameDecoder(componentName, trackMeta, source),
mFrame(NULL),
- mIsAvcOrHevc(false),
+ mIsAvc(false),
+ mIsHevc(false),
mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
mTargetTimeUs(-1LL),
mDefaultSampleDurationUs(0) {
@@ -479,8 +481,8 @@
return NULL;
}
- mIsAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+ mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
if (frameTimeUs < 0) {
int64_t thumbNailTime = -1ll;
@@ -543,8 +545,10 @@
ALOGV("Seeking closest: targetTimeUs=%lld", (long long)mTargetTimeUs);
}
- if (mIsAvcOrHevc && !isSeekingClosest
- && IsIDR(codecBuffer->data(), codecBuffer->size())) {
+ if (!isSeekingClosest
+ && ((mIsAvc && IsIDR(codecBuffer->data(), codecBuffer->size()))
+ || (mIsHevc && IsIDR(
+ codecBuffer->data(), codecBuffer->size())))) {
// Only need to decode one IDR frame, unless we're seeking with CLOSEST
// option, in which case we need to actually decode to targetTimeUs.
*flags |= MediaCodec::BUFFER_FLAG_EOS;
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 0e4eae2..5f9c20e 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -34,7 +34,10 @@
namespace android {
-static const uint8_t kHevcNalUnitTypes[5] = {
+static const uint8_t kHevcNalUnitTypes[8] = {
+ kHevcNalUnitTypeCodedSliceIdr,
+ kHevcNalUnitTypeCodedSliceIdrNoLP,
+ kHevcNalUnitTypeCodedSliceCra,
kHevcNalUnitTypeVps,
kHevcNalUnitTypeSps,
kHevcNalUnitTypePps,
@@ -377,6 +380,54 @@
return reader.overRead() ? ERROR_MALFORMED : OK;
}
+void HevcParameterSets::FindHEVCDimensions(const sp<ABuffer> &SpsBuffer, int32_t *width, int32_t *height)
+{
+ ALOGD("FindHEVCDimensions");
+ // See Rec. ITU-T H.265 v3 (04/2015) Chapter 7.3.2.2 for reference
+ ABitReader reader(SpsBuffer->data() + 1, SpsBuffer->size() - 1);
+ // Skip sps_video_parameter_set_id
+ reader.skipBits(4);
+ uint8_t maxSubLayersMinus1 = reader.getBitsWithFallback(3, 0);
+ // Skip sps_temporal_id_nesting_flag;
+ reader.skipBits(1);
+ // Skip general profile
+ reader.skipBits(96);
+ if (maxSubLayersMinus1 > 0) {
+ bool subLayerProfilePresentFlag[8];
+ bool subLayerLevelPresentFlag[8];
+ for (int i = 0; i < maxSubLayersMinus1; ++i) {
+ subLayerProfilePresentFlag[i] = reader.getBitsWithFallback(1, 0);
+ subLayerLevelPresentFlag[i] = reader.getBitsWithFallback(1, 0);
+ }
+ // Skip reserved
+ reader.skipBits(2 * (8 - maxSubLayersMinus1));
+ for (int i = 0; i < maxSubLayersMinus1; ++i) {
+ if (subLayerProfilePresentFlag[i]) {
+ // Skip profile
+ reader.skipBits(88);
+ }
+ if (subLayerLevelPresentFlag[i]) {
+ // Skip sub_layer_level_idc[i]
+ reader.skipBits(8);
+ }
+ }
+ }
+ // Skip sps_seq_parameter_set_id
+ skipUE(&reader);
+ uint8_t chromaFormatIdc = parseUEWithFallback(&reader, 0);
+ if (chromaFormatIdc == 3) {
+ // Skip separate_colour_plane_flag
+ reader.skipBits(1);
+ }
+ skipUE(&reader);
+ skipUE(&reader);
+
+ // pic_width_in_luma_samples
+ *width = parseUEWithFallback(&reader, 0);
+ // pic_height_in_luma_samples
+ *height = parseUEWithFallback(&reader, 0);
+}
+
status_t HevcParameterSets::parsePps(
const uint8_t* data UNUSED_PARAM, size_t size UNUSED_PARAM) {
return OK;
@@ -491,4 +542,26 @@
return OK;
}
+bool HevcParameterSets::IsHevcIDR(const uint8_t *data, size_t size) {
+ bool foundIDR = false;
+ const uint8_t *nalStart;
+ size_t nalSize;
+ while (!foundIDR && getNextNALUnit(&data, &size, &nalStart, &nalSize, true) == OK) {
+ if (nalSize == 0) {
+ ALOGE("Encountered zero-length HEVC NAL");
+ return false;
+ }
+
+ uint8_t nalType = (nalStart[0] & 0x7E) >> 1;
+ switch(nalType) {
+ case kHevcNalUnitTypeCodedSliceIdr:
+ case kHevcNalUnitTypeCodedSliceIdrNoLP:
+ case kHevcNalUnitTypeCodedSliceCra:
+ foundIDR = true;
+ break;
+ }
+ }
+
+ return foundIDR;
+}
} // namespace android
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index d99596e..0af97df 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -542,6 +542,7 @@
mNumGrids = 0;
mNextItemId = kItemIdBase;
mHasRefs = false;
+ mResetStatus = OK;
mPreAllocFirstTime = true;
mPrevAllTracksTotalMetaDataSizeEstimate = 0;
@@ -1027,6 +1028,11 @@
return OK;
}
+status_t MPEG4Writer::stop() {
+ // If reset was in progress, wait for it to complete.
+ return reset(true, true);
+}
+
status_t MPEG4Writer::pause() {
ALOGW("MPEG4Writer: pause is not supported");
return ERROR_UNSUPPORTED;
@@ -1159,8 +1165,12 @@
return err;
}
-void MPEG4Writer::finishCurrentSession() {
- reset(false /* stopSource */);
+status_t MPEG4Writer::finishCurrentSession() {
+ ALOGV("finishCurrentSession");
+ /* Don't wait if reset is in progress already, that avoids deadlock
+ * as finishCurrentSession() is called from control looper thread.
+ */
+ return reset(false, false);
}
status_t MPEG4Writer::switchFd() {
@@ -1182,11 +1192,32 @@
return err;
}
-status_t MPEG4Writer::reset(bool stopSource) {
+status_t MPEG4Writer::reset(bool stopSource, bool waitForAnyPreviousCallToComplete) {
ALOGD("reset()");
- std::lock_guard<std::mutex> l(mResetMutex);
+ std::unique_lock<std::mutex> lk(mResetMutex, std::defer_lock);
+ if (waitForAnyPreviousCallToComplete) {
+ /* stop=>reset from client needs the return value of reset call, hence wait here
+ * if a reset was in process already.
+ */
+ lk.lock();
+ } else if (!lk.try_lock()) {
+ /* Internal reset from control looper thread shouldn't wait for any reset in
+ * process already.
+ */
+ return INVALID_OPERATION;
+ }
+
+ if (mResetStatus != OK) {
+ /* Don't have to proceed if reset has finished with an error before.
+ * If there was no error before, proceeding reset would be harmless, as the
+ * the call would return from the mInitCheck condition below.
+ */
+ return mResetStatus;
+ }
+
if (mInitCheck != OK) {
- return OK;
+ mResetStatus = OK;
+ return mResetStatus;
} else {
if (!mWriterThreadStarted ||
!mStarted) {
@@ -1198,7 +1229,8 @@
if (writerErr != OK) {
retErr = writerErr;
}
- return retErr;
+ mResetStatus = retErr;
+ return mResetStatus;
}
}
@@ -1245,7 +1277,8 @@
if (err != OK && err != ERROR_MALFORMED) {
// Ignoring release() return value as there was an "err" already.
release();
- return err;
+ mResetStatus = err;
+ return mResetStatus;
}
// Fix up the size of the 'mdat' chunk.
@@ -1303,7 +1336,8 @@
if (err == OK) {
err = errRelease;
}
- return err;
+ mResetStatus = err;
+ return mResetStatus;
}
/*
@@ -2454,31 +2488,27 @@
int fd = mNextFd;
mNextFd = -1;
mLock.unlock();
- finishCurrentSession();
- initInternal(fd, false /*isFirstSession*/);
- start(mStartMeta.get());
- mSwitchPending = false;
- notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED, 0);
+ if (finishCurrentSession() == OK) {
+ initInternal(fd, false /*isFirstSession*/);
+ status_t status = start(mStartMeta.get());
+ mSwitchPending = false;
+ if (status == OK) {
+ notify(MEDIA_RECORDER_EVENT_INFO,
+ MEDIA_RECORDER_INFO_NEXT_OUTPUT_FILE_STARTED, 0);
+ }
+ }
break;
}
- // ::write() or lseek64() wasn't a success, file could be malformed
+ /* ::write() or lseek64() wasn't a success, file could be malformed.
+ * Or fallocate() failed. reset() and notify client on both the cases.
+ */
+ case kWhatFallocateError: // fallthrough
case kWhatIOError: {
- ALOGE("kWhatIOError");
int32_t err;
CHECK(msg->findInt32("err", &err));
- // Stop tracks' threads and main writer thread.
- stop();
- notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_RECORDER_ERROR_UNKNOWN, err);
- break;
- }
- // fallocate() failed, hence stop() and notify app.
- case kWhatFallocateError: {
- ALOGE("kWhatFallocateError");
- int32_t err;
- CHECK(msg->findInt32("err", &err));
- // Stop tracks' threads and main writer thread.
- stop();
- //TODO: introduce a suitable MEDIA_RECORDER_ERROR_* instead MEDIA_RECORDER_ERROR_UNKNOWN?
+ // If reset already in process, don't wait for it complete to avoid deadlock.
+ reset(true, false);
+ //TODO: new MEDIA_RECORDER_ERROR_**** instead MEDIA_RECORDER_ERROR_UNKNOWN ?
notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_RECORDER_ERROR_UNKNOWN, err);
break;
}
@@ -2486,7 +2516,7 @@
* Responding with other options could be added later if required.
*/
case kWhatNoIOErrorSoFar: {
- ALOGD("kWhatNoIOErrorSoFar");
+ ALOGV("kWhatNoIOErrorSoFar");
sp<AMessage> response = new AMessage;
response->setInt32("err", OK);
sp<AReplyToken> replyID;
@@ -4715,10 +4745,18 @@
// This is useful if the pixel is not square
void MPEG4Writer::Track::writePaspBox() {
- mOwner->beginBox("pasp");
- mOwner->writeInt32(1 << 16); // hspacing
- mOwner->writeInt32(1 << 16); // vspacing
- mOwner->endBox(); // pasp
+ // Do not write 'pasp' box unless the track format specifies it.
+ // According to ISO/IEC 14496-12 (ISO base media file format), 'pasp' box
+ // is optional. If present, it overrides the SAR from the video CSD. Only
+ // set it if the track format specifically requests that.
+ int32_t hSpacing, vSpacing;
+ if (mMeta->findInt32(kKeySARWidth, &hSpacing) && (hSpacing > 0)
+ && mMeta->findInt32(kKeySARHeight, &vSpacing) && (vSpacing > 0)) {
+ mOwner->beginBox("pasp");
+ mOwner->writeInt32(hSpacing); // hspacing
+ mOwner->writeInt32(vSpacing); // vspacing
+ mOwner->endBox(); // pasp
+ }
}
int64_t MPEG4Writer::Track::getStartTimeOffsetTimeUs() const {
diff --git a/media/libstagefright/MediaAdapter.cpp b/media/libstagefright/MediaAdapter.cpp
index f1b6e8c..5a2a910 100644
--- a/media/libstagefright/MediaAdapter.cpp
+++ b/media/libstagefright/MediaAdapter.cpp
@@ -114,6 +114,13 @@
return -EINVAL;
}
+ /* As mAdapterLock is unlocked while waiting for signalBufferReturned,
+ * a new buffer for the same track could be pushed from another thread
+ * in the client process, mBufferGatingMutex will help to hold that
+ * until the previous buffer is processed.
+ */
+ std::unique_lock<std::mutex> lk(mBufferGatingMutex);
+
Mutex::Autolock autoLock(mAdapterLock);
if (!mStarted) {
ALOGE("pushBuffer called before start");
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 3e191fe..1a4f3d3 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -59,6 +59,7 @@
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaCodecList.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaFilter.h>
@@ -317,7 +318,7 @@
class MediaCodec::ReleaseSurface {
public:
- ReleaseSurface() {
+ explicit ReleaseSurface(uint64_t usage) {
BufferQueue::createBufferQueue(&mProducer, &mConsumer);
mSurface = new Surface(mProducer, false /* controlledByApp */);
struct ConsumerListener : public BnConsumerListener {
@@ -328,6 +329,7 @@
sp<ConsumerListener> listener{new ConsumerListener};
mConsumer->consumerConnect(listener, false);
mConsumer->setConsumerName(String8{"MediaCodec.release"});
+ mConsumer->setConsumerUsageBits(usage);
}
const sp<Surface> &getSurface() {
@@ -2128,8 +2130,8 @@
CHECK(msg->findInt32("err", &err));
CHECK(msg->findInt32("actionCode", &actionCode));
- ALOGE("Codec reported err %#x, actionCode %d, while in state %d",
- err, actionCode, mState);
+ ALOGE("Codec reported err %#x, actionCode %d, while in state %d/%s",
+ err, actionCode, mState, stateString(mState).c_str());
if (err == DEAD_OBJECT) {
mFlags |= kFlagSawMediaServerDie;
mFlags &= ~kFlagIsComponentAllocated;
@@ -2297,8 +2299,8 @@
if (mState == RELEASING || mState == UNINITIALIZED) {
// In case a kWhatError or kWhatRelease message came in and replied,
// we log a warning and ignore.
- ALOGW("allocate interrupted by error or release, current state %d",
- mState);
+ ALOGW("allocate interrupted by error or release, current state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
CHECK_EQ(mState, INITIALIZING);
@@ -2344,8 +2346,8 @@
if (mState == RELEASING || mState == UNINITIALIZED || mState == INITIALIZED) {
// In case a kWhatError or kWhatRelease message came in and replied,
// we log a warning and ignore.
- ALOGW("configure interrupted by error or release, current state %d",
- mState);
+ ALOGW("configure interrupted by error or release, current state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
CHECK_EQ(mState, CONFIGURING);
@@ -2360,7 +2362,7 @@
if (mSurface != nullptr && !mAllowFrameDroppingBySurface) {
// signal frame dropping mode in the input format as this may also be
// meaningful and confusing for an encoder in a transcoder scenario
- mInputFormat->setInt32("allow-frame-drop", mAllowFrameDroppingBySurface);
+ mInputFormat->setInt32(KEY_ALLOW_FRAME_DROP, mAllowFrameDroppingBySurface);
}
sp<AMessage> interestingFormat =
(mFlags & kFlagIsEncoder) ? mOutputFormat : mInputFormat;
@@ -2492,7 +2494,8 @@
if (mState == RELEASING || mState == UNINITIALIZED) {
// In case a kWhatRelease message came in and replied,
// we log a warning and ignore.
- ALOGW("start interrupted by release, current state %d", mState);
+ ALOGW("start interrupted by release, current state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
@@ -2634,7 +2637,8 @@
case kWhatStopCompleted:
{
if (mState != STOPPING) {
- ALOGW("Received kWhatStopCompleted in state %d", mState);
+ ALOGW("Received kWhatStopCompleted in state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
setState(INITIALIZED);
@@ -2651,7 +2655,8 @@
case kWhatReleaseCompleted:
{
if (mState != RELEASING) {
- ALOGW("Received kWhatReleaseCompleted in state %d", mState);
+ ALOGW("Received kWhatReleaseCompleted in state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
setState(UNINITIALIZED);
@@ -2681,8 +2686,8 @@
case kWhatFlushCompleted:
{
if (mState != FLUSHING) {
- ALOGW("received FlushCompleted message in state %d",
- mState);
+ ALOGW("received FlushCompleted message in state %d/%s",
+ mState, stateString(mState).c_str());
break;
}
@@ -2802,7 +2807,7 @@
}
if (obj != NULL) {
- if (!format->findInt32("allow-frame-drop", &mAllowFrameDroppingBySurface)) {
+ if (!format->findInt32(KEY_ALLOW_FRAME_DROP, &mAllowFrameDroppingBySurface)) {
// allow frame dropping by surface by default
mAllowFrameDroppingBySurface = true;
}
@@ -3029,7 +3034,13 @@
int32_t reclaimed = 0;
msg->findInt32("reclaimed", &reclaimed);
if (reclaimed) {
- mReleasedByResourceManager = true;
+ if (!mReleasedByResourceManager) {
+ // notify the async client
+ if (mFlags & kFlagIsAsync) {
+ onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+ }
+ mReleasedByResourceManager = true;
+ }
int32_t force = 0;
msg->findInt32("force", &force);
@@ -3041,10 +3052,6 @@
response->setInt32("err", WOULD_BLOCK);
response->postReply(replyID);
- // notify the async client
- if (mFlags & kFlagIsAsync) {
- onError(DEAD_OBJECT, ACTION_CODE_FATAL);
- }
break;
}
}
@@ -3111,7 +3118,11 @@
if (asyncNotify != nullptr) {
if (mSurface != NULL) {
if (!mReleaseSurface) {
- mReleaseSurface.reset(new ReleaseSurface);
+ uint64_t usage = 0;
+ if (mSurface->getConsumerUsage(&usage) != OK) {
+ usage = 0;
+ }
+ mReleaseSurface.reset(new ReleaseSurface(usage));
}
if (mSurface != mReleaseSurface->getSurface()) {
status_t err = connectToSurface(mReleaseSurface->getSurface());
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 1395c27..bc656a2 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -435,6 +435,30 @@
buffer->release();
}
+status_t MediaCodecSource::setEncodingBitrate(int32_t bitRate) {
+ ALOGV("setEncodingBitrate (%d)", bitRate);
+
+ if (mEncoder == NULL) {
+ ALOGW("setEncodingBitrate (%d) : mEncoder is null", bitRate);
+ return BAD_VALUE;
+ }
+
+ sp<AMessage> params = new AMessage;
+ params->setInt32("video-bitrate", bitRate);
+
+ return mEncoder->setParameters(params);
+}
+
+status_t MediaCodecSource::requestIDRFrame() {
+ if (mEncoder == NULL) {
+ ALOGW("requestIDRFrame : mEncoder is null");
+ return BAD_VALUE;
+ } else {
+ mEncoder->requestIDRFrame();
+ return OK;
+ }
+}
+
MediaCodecSource::MediaCodecSource(
const sp<ALooper> &looper,
const sp<AMessage> &outputFormat,
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 8d9bc06..c91386d 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -177,16 +177,23 @@
status_t MediaMuxer::writeSampleData(const sp<ABuffer> &buffer, size_t trackIndex,
int64_t timeUs, uint32_t flags) {
- Mutex::Autolock autoLock(mMuxerLock);
-
if (buffer.get() == NULL) {
ALOGE("WriteSampleData() get an NULL buffer.");
return -EINVAL;
}
-
- if (mState != STARTED) {
- ALOGE("WriteSampleData() is called in invalid state %d", mState);
- return INVALID_OPERATION;
+ {
+ /* As MediaMuxer's writeSampleData handles inputs from multiple tracks,
+ * limited the scope of mMuxerLock to this inner block so that the
+ * current track's buffer does not wait until the completion
+ * of processing of previous buffer of the same or another track.
+ * It's the responsibility of individual track - MediaAdapter object
+ * to gate its buffers.
+ */
+ Mutex::Autolock autoLock(mMuxerLock);
+ if (mState != STARTED) {
+ ALOGE("WriteSampleData() is called in invalid state %d", mState);
+ return INVALID_OPERATION;
+ }
}
if (trackIndex >= mTrackList.size()) {
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 050d7c2..6245014 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -312,6 +312,27 @@
(*format)->setBuffer("pssh", buf);
}
+ // Copy over the slow-motion related metadata
+ const void *slomoMarkers;
+ size_t slomoMarkersSize;
+ if (meta->findData(kKeySlowMotionMarkers, &type, &slomoMarkers, &slomoMarkersSize)
+ && slomoMarkersSize > 0) {
+ sp<ABuffer> buf = new ABuffer(slomoMarkersSize);
+ memcpy(buf->data(), slomoMarkers, slomoMarkersSize);
+ (*format)->setBuffer("slow-motion-markers", buf);
+ }
+
+ int32_t temporalLayerCount;
+ if (meta->findInt32(kKeyTemporalLayerCount, &temporalLayerCount)
+ && temporalLayerCount > 0) {
+ (*format)->setInt32("temporal-layer-count", temporalLayerCount);
+ }
+
+ float captureFps;
+ if (meta->findFloat(kKeyCaptureFramerate, &captureFps) && captureFps > 0.0f) {
+ (*format)->setFloat("capture-rate", captureFps);
+ }
+
return OK;
}
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index c284ef7..1f569ef 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -175,6 +175,22 @@
}
}
+status_t setNativeWindowRotation(
+ ANativeWindow *nativeWindow /* nonnull */, int rotation) {
+
+ int transform = 0;
+ if ((rotation % 90) == 0) {
+ switch ((rotation / 90) & 3) {
+ case 1: transform = HAL_TRANSFORM_ROT_90; break;
+ case 2: transform = HAL_TRANSFORM_ROT_180; break;
+ case 3: transform = HAL_TRANSFORM_ROT_270; break;
+ default: transform = 0; break;
+ }
+ }
+
+ return native_window_set_buffers_transform(nativeWindow, transform);
+}
+
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
status_t err = NO_ERROR;
ANativeWindowBuffer* anb = NULL;
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 5e537dd..76fc74f 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -1,4 +1,16 @@
{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ // writerTest fails about 5 out of 66
+ // { "name": "writerTest" },
+
+ { "name": "HEVCUtilsUnitTest" },
+ { "name": "ExtractorFactoryTest" }
+
+ ],
+
"presubmit": [
{
"name": "CtsMediaTestCases",
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index d04367a..3d152bc 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -769,6 +769,7 @@
{ "sei", kKeySEI },
{ "text-format-data", kKeyTextFormatData },
{ "thumbnail-csd-hevc", kKeyThumbnailHVCC },
+ { "slow-motion-markers", kKeySlowMotionMarkers },
}
};
@@ -2142,8 +2143,10 @@
}
info->sample_rate = srate;
- int32_t cmask = 0;
- if (!meta->findInt32(kKeyChannelMask, &cmask) || cmask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
+ int32_t rawChannelMask;
+ audio_channel_mask_t cmask = meta->findInt32(kKeyChannelMask, &rawChannelMask) ?
+ static_cast<audio_channel_mask_t>(rawChannelMask) : CHANNEL_MASK_USE_CHANNEL_ORDER;
+ if (cmask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
ALOGV("track of type '%s' does not publish channel mask", mime);
// Try a channel count instead
diff --git a/media/libstagefright/bqhelper/TEST_MAPPING b/media/libstagefright/bqhelper/TEST_MAPPING
new file mode 100644
index 0000000..c7f2fd8
--- /dev/null
+++ b/media/libstagefright/bqhelper/TEST_MAPPING
@@ -0,0 +1,6 @@
+// mappings for frameworks/av/media/libstagefright/bqhelper
+{
+ "presubmit": [
+ { "name": "FrameDropper_test"}
+ ]
+}
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
index 4e83059..7e2909a 100644
--- a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/FrameDropper.h
@@ -30,6 +30,8 @@
FrameDropper();
// maxFrameRate required to be positive.
+ // maxFrameRate negative causes shouldDrop() to always return false
+ // maxFrameRate == 0 is illegal
status_t setMaxFrameRate(float maxFrameRate);
// Returns false if max frame rate has not been set via setMaxFrameRate.
diff --git a/media/libstagefright/bqhelper/tests/Android.bp b/media/libstagefright/bqhelper/tests/Android.bp
index 2fbc3d6..3897689 100644
--- a/media/libstagefright/bqhelper/tests/Android.bp
+++ b/media/libstagefright/bqhelper/tests/Android.bp
@@ -1,5 +1,6 @@
cc_test {
name: "FrameDropper_test",
+ test_suites: ["device-tests"],
srcs: ["FrameDropper_test.cpp"],
diff --git a/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp b/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp
index 55ae77c..b18067f 100644
--- a/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp
+++ b/media/libstagefright/bqhelper/tests/FrameDropper_test.cpp
@@ -110,7 +110,7 @@
};
TEST_F(FrameDropperTest, TestInvalidMaxFrameRate) {
- EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(-1.0));
+ EXPECT_EQ(OK, mFrameDropper->setMaxFrameRate(-1.0));
EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(0));
}
diff --git a/media/libstagefright/codecs/amrnb/TEST_MAPPING b/media/libstagefright/codecs/amrnb/TEST_MAPPING
new file mode 100644
index 0000000..343d08a
--- /dev/null
+++ b/media/libstagefright/codecs/amrnb/TEST_MAPPING
@@ -0,0 +1,10 @@
+// mappings for frameworks/av/media/libstagefright/codecs/amrnb
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrnbDecoderTest"},
+ { "name": "AmrnbEncoderTest"}
+ ]
+}
diff --git a/media/libstagefright/codecs/amrnb/dec/test/Android.bp b/media/libstagefright/codecs/amrnb/dec/test/Android.bp
index 7a95cfa..91c9f86 100644
--- a/media/libstagefright/codecs/amrnb/dec/test/Android.bp
+++ b/media/libstagefright/codecs/amrnb/dec/test/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "AmrnbDecoderTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"AmrnbDecoderTest.cpp",
diff --git a/media/libstagefright/codecs/amrnb/enc/test/Android.bp b/media/libstagefright/codecs/amrnb/enc/test/Android.bp
index e8982fe..7e1b561 100644
--- a/media/libstagefright/codecs/amrnb/enc/test/Android.bp
+++ b/media/libstagefright/codecs/amrnb/enc/test/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "AmrnbEncoderTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"AmrnbEncoderTest.cpp",
diff --git a/media/libstagefright/codecs/amrwb/TEST_MAPPING b/media/libstagefright/codecs/amrwb/TEST_MAPPING
new file mode 100644
index 0000000..0278d26
--- /dev/null
+++ b/media/libstagefright/codecs/amrwb/TEST_MAPPING
@@ -0,0 +1,10 @@
+// mappings for frameworks/av/media/libstagefright/codecs/amrwb
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrwbDecoderTest"}
+
+ ]
+}
diff --git a/media/libstagefright/codecs/amrwb/test/Android.bp b/media/libstagefright/codecs/amrwb/test/Android.bp
index 968215a..e8a2aa9 100644
--- a/media/libstagefright/codecs/amrwb/test/Android.bp
+++ b/media/libstagefright/codecs/amrwb/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "AmrwbDecoderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/codecs/amrwbenc/TEST_MAPPING b/media/libstagefright/codecs/amrwbenc/TEST_MAPPING
new file mode 100644
index 0000000..045e8b3
--- /dev/null
+++ b/media/libstagefright/codecs/amrwbenc/TEST_MAPPING
@@ -0,0 +1,10 @@
+// mappings for frameworks/av/media/libstagefright/codecs/amrwbenc
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "AmrwbEncoderTest"}
+
+ ]
+}
diff --git a/media/libstagefright/codecs/amrwbenc/test/Android.bp b/media/libstagefright/codecs/amrwbenc/test/Android.bp
index 7042bc5..0872570 100644
--- a/media/libstagefright/codecs/amrwbenc/test/Android.bp
+++ b/media/libstagefright/codecs/amrwbenc/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "AmrwbEncoderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/codecs/m4v_h263/TEST_MAPPING b/media/libstagefright/codecs/m4v_h263/TEST_MAPPING
new file mode 100644
index 0000000..ba3ff1c
--- /dev/null
+++ b/media/libstagefright/codecs/m4v_h263/TEST_MAPPING
@@ -0,0 +1,18 @@
+// mappings for frameworks/av/media/libstagefright/codecs/m4v_h263
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+
+ // the decoder reports something bad about an unexpected newline in the *config file
+ // and the config file looks like the AndroidTest.xml file that we put in there.
+ // I don't get this from the Encoder -- and I don't see any substantive difference
+ // between decode and encode AndroidTest.xml files -- except that encode does NOT
+ // finish with a newline.
+ // strange.
+ { "name": "Mpeg4H263DecoderTest"},
+ { "name": "Mpeg4H263EncoderTest"}
+
+ ]
+}
diff --git a/media/libstagefright/codecs/m4v_h263/dec/test/Android.bp b/media/libstagefright/codecs/m4v_h263/dec/test/Android.bp
index 655491a..9c753e6 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/test/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/dec/test/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "Mpeg4H263DecoderTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"Mpeg4H263DecoderTest.cpp",
diff --git a/media/libstagefright/codecs/m4v_h263/enc/test/Android.bp b/media/libstagefright/codecs/m4v_h263/enc/test/Android.bp
index b9a8117..d2982da 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/test/Android.bp
+++ b/media/libstagefright/codecs/m4v_h263/enc/test/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "Mpeg4H263EncoderTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs : [ "Mpeg4H263EncoderTest.cpp" ],
diff --git a/media/libstagefright/codecs/mp3dec/TEST_MAPPING b/media/libstagefright/codecs/mp3dec/TEST_MAPPING
new file mode 100644
index 0000000..4ef4317
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/TEST_MAPPING
@@ -0,0 +1,9 @@
+// mappings for frameworks/av/media/libstagefright/codecs/mp3dec
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "Mp3DecoderTest"}
+ ]
+}
diff --git a/media/libstagefright/codecs/mp3dec/test/Android.bp b/media/libstagefright/codecs/mp3dec/test/Android.bp
index 0ff8b12..6b92ae9 100644
--- a/media/libstagefright/codecs/mp3dec/test/Android.bp
+++ b/media/libstagefright/codecs/mp3dec/test/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "Mp3DecoderTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"mp3reader.cpp",
diff --git a/media/libstagefright/foundation/TEST_MAPPING b/media/libstagefright/foundation/TEST_MAPPING
index 3301c4b..a70c352 100644
--- a/media/libstagefright/foundation/TEST_MAPPING
+++ b/media/libstagefright/foundation/TEST_MAPPING
@@ -1,5 +1,14 @@
+// mappings for frameworks/av/media/libstagefright/foundation
{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "OpusHeaderTest" }
+ ],
+
"presubmit": [
- { "name": "sf_foundation_test" }
+ { "name": "sf_foundation_test" },
+ { "name": "MetaDataBaseUnitTest"}
]
}
diff --git a/media/libstagefright/foundation/tests/Android.bp b/media/libstagefright/foundation/tests/Android.bp
index 45e81e8..9e67189 100644
--- a/media/libstagefright/foundation/tests/Android.bp
+++ b/media/libstagefright/foundation/tests/Android.bp
@@ -28,6 +28,7 @@
cc_test {
name: "MetaDataBaseUnitTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/foundation/tests/OpusHeader/Android.bp b/media/libstagefright/foundation/tests/OpusHeader/Android.bp
index c1251a8..ed3298c 100644
--- a/media/libstagefright/foundation/tests/OpusHeader/Android.bp
+++ b/media/libstagefright/foundation/tests/OpusHeader/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "OpusHeaderTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/id3/TEST_MAPPING b/media/libstagefright/id3/TEST_MAPPING
new file mode 100644
index 0000000..d070d25
--- /dev/null
+++ b/media/libstagefright/id3/TEST_MAPPING
@@ -0,0 +1,24 @@
+// frameworks/av/media/libstagefright/id3
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "ID3Test" }
+ ],
+
+ "presubmit": [
+ // this doesn't seem to run any tests.
+ // but: cts-tradefed run -m CtsMediaTestCases -t android.media.cts.MediaMetadataRetrieverTest
+ // does run he 32 and 64 bit tests, but not the instant tests
+ // but all I know is that with 'atest', it's not running
+ {
+ "name": "CtsMediaTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.cts.MediaMetadataRetrieverTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/media/libstagefright/id3/test/Android.bp b/media/libstagefright/id3/test/Android.bp
index 9d26eec..acf38e2 100644
--- a/media/libstagefright/id3/test/Android.bp
+++ b/media/libstagefright/id3/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "ID3Test",
+ test_suites: ["device-tests"],
gtest: true,
srcs: ["ID3Test.cpp"],
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index 19ae0e3..bca7f01 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -135,7 +135,8 @@
private:
sp<FrameCaptureLayer> mCaptureLayer;
VideoFrame *mFrame;
- bool mIsAvcOrHevc;
+ bool mIsAvc;
+ bool mIsHevc;
MediaSource::ReadOptions::SeekMode mSeekMode;
int64_t mTargetTimeUs;
List<int64_t> mSampleDurations;
diff --git a/media/libstagefright/include/HevcUtils.h b/media/libstagefright/include/HevcUtils.h
index 0f59631..6a4a168 100644
--- a/media/libstagefright/include/HevcUtils.h
+++ b/media/libstagefright/include/HevcUtils.h
@@ -30,6 +30,10 @@
namespace android {
enum {
+ kHevcNalUnitTypeCodedSliceIdr = 19,
+ kHevcNalUnitTypeCodedSliceIdrNoLP = 20,
+ kHevcNalUnitTypeCodedSliceCra = 21,
+
kHevcNalUnitTypeVps = 32,
kHevcNalUnitTypeSps = 33,
kHevcNalUnitTypePps = 34,
@@ -90,8 +94,11 @@
// Note that this method does not write the start code.
bool write(size_t index, uint8_t* dest, size_t size);
status_t makeHvcc(uint8_t *hvcc, size_t *hvccSize, size_t nalSizeLength);
+ void FindHEVCDimensions(
+ const sp<ABuffer> &SpsBuffer, int32_t *width, int32_t *height);
Info getInfo() const { return mInfo; }
+ static bool IsHevcIDR(const uint8_t *data, size_t size);
private:
status_t parseVps(const uint8_t* data, size_t size);
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 8ef9278..4c97b4d 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -273,6 +273,7 @@
bool mShutdownInProgress;
bool mExplicitShutdown;
bool mIsLegacyVP9Decoder;
+ bool mIsStreamCorruptFree;
bool mIsLowLatency;
// If "mKeepComponentAllocated" we only transition back to Loaded state
diff --git a/media/libstagefright/include/media/stagefright/CameraSource.h b/media/libstagefright/include/media/stagefright/CameraSource.h
index 6f0d3b5..16e7d89 100644
--- a/media/libstagefright/include/media/stagefright/CameraSource.h
+++ b/media/libstagefright/include/media/stagefright/CameraSource.h
@@ -23,7 +23,6 @@
#include <media/stagefright/MediaBuffer.h>
#include <camera/android/hardware/ICamera.h>
#include <camera/ICameraRecordingProxy.h>
-#include <camera/ICameraRecordingProxyListener.h>
#include <camera/CameraParameters.h>
#include <gui/BufferItemConsumer.h>
#include <utils/List.h>
@@ -40,17 +39,6 @@
class CameraSource : public MediaSource, public MediaBufferObserver {
public:
/**
- * Factory method to create a new CameraSource using the current
- * settings (such as video size, frame rate, color format, etc)
- * from the default camera.
- *
- * @param clientName The package/process name of the client application.
- * This is used for permissions checking.
- * @return NULL on error.
- */
- static CameraSource *Create(const String16 &clientName);
-
- /**
* Factory method to create a new CameraSource.
*
* @param camera the video input frame data source. If it is NULL,
@@ -89,8 +77,7 @@
pid_t clientPid,
Size videoSize,
int32_t frameRate,
- const sp<IGraphicBufferProducer>& surface,
- bool storeMetaDataInVideoBuffers = true);
+ const sp<IGraphicBufferProducer>& surface);
virtual ~CameraSource();
@@ -132,26 +119,6 @@
protected:
/**
- * The class for listening to BnCameraRecordingProxyListener. This is used to receive video
- * buffers in VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV and VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA
- * mode. When a frame is available, CameraSource::dataCallbackTimestamp() will be called.
- */
- class ProxyListener: public BnCameraRecordingProxyListener {
- public:
- ProxyListener(const sp<CameraSource>& source);
- virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
- const sp<IMemory> &data);
- virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
- native_handle_t* handle);
- virtual void recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles);
-
- private:
- sp<CameraSource> mSource;
- };
-
- /**
* The class for listening to BufferQueue's onFrameAvailable. This is used to receive video
* buffers in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode. When a frame is available,
* CameraSource::processBufferQueueFrame() will be called.
@@ -213,32 +180,15 @@
CameraSource(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
Size videoSize, int32_t frameRate,
- const sp<IGraphicBufferProducer>& surface,
- bool storeMetaDataInVideoBuffers);
+ const sp<IGraphicBufferProducer>& surface);
virtual status_t startCameraRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& frame);
- virtual void releaseRecordingFrameHandle(native_handle_t* handle);
- // stagefright recorder not using this for now
- virtual void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles);
// Returns true if need to skip the current frame.
// Called from dataCallbackTimestamp.
virtual bool skipCurrentFrame(int64_t /*timestampUs*/) {return false;}
- // Callback called when still camera raw data is available.
- virtual void dataCallback(int32_t /*msgType*/, const sp<IMemory>& /*data*/) {}
-
- virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
- const sp<IMemory> &data);
-
- virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
- native_handle_t* handle);
-
- virtual void recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles);
-
// Process a buffer item received in BufferQueueListener.
virtual void processBufferQueueFrame(BufferItem& buffer);
@@ -261,9 +211,6 @@
int64_t mGlitchDurationThresholdUs;
bool mCollectStats;
- // The mode video buffers are received from camera. One of VIDEO_BUFFER_MODE_*.
- int32_t mVideoBufferMode;
-
static const uint32_t kDefaultVideoBufferCount = 32;
/**
@@ -297,12 +244,12 @@
status_t init(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
- Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
+ Size videoSize, int32_t frameRate);
status_t initWithCameraAccess(
const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
- Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
+ Size videoSize, int32_t frameRate);
// Initialize the buffer queue used in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
status_t initBufferQueue(uint32_t width, uint32_t height, uint32_t format,
diff --git a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
index 533e33b..3c311cf 100644
--- a/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/media/libstagefright/include/media/stagefright/CameraSourceTimeLapse.h
@@ -45,8 +45,7 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs,
- bool storeMetaDataInVideoBuffers = true);
+ int64_t timeBetweenTimeLapseFrameCaptureUs);
virtual ~CameraSourceTimeLapse();
@@ -122,8 +121,7 @@
Size videoSize,
int32_t videoFrameRate,
const sp<IGraphicBufferProducer>& surface,
- int64_t timeBetweenTimeLapseFrameCaptureUs,
- bool storeMetaDataInVideoBuffers = true);
+ int64_t timeBetweenTimeLapseFrameCaptureUs);
// Wrapper over CameraSource::signalBufferReturned() to implement quick stop.
// It only handles the case when mLastReadBufferCopy is signalled. Otherwise
@@ -137,33 +135,6 @@
// frame needs to be skipped and this function just returns the value of mSkipCurrentFrame.
virtual bool skipCurrentFrame(int64_t timestampUs);
- // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
- // timestamp and set mSkipCurrentFrame.
- // Then it calls the base CameraSource::dataCallbackTimestamp()
- // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV and
- // VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode.
- virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
- const sp<IMemory> &data);
-
- // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
- // timestamp and set mSkipCurrentFrame.
- // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
- // CameraSource::recordingFrameHandleCallbackTimestampBatch()
- // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
- // the metadata is VideoNativeHandleMetadata.
- virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
- native_handle_t* handle);
-
- // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
- // timestamp and set mSkipCurrentFrame.
- // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
- // CameraSource::recordingFrameHandleCallbackTimestampBatch()
- // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
- // the metadata is VideoNativeHandleMetadata.
- virtual void recordingFrameHandleCallbackTimestampBatch(
- const std::vector<int64_t>& timestampsUs,
- const std::vector<native_handle_t*>& handles);
-
// Process a buffer item received in CameraSource::BufferQueueListener.
// This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
virtual void processBufferQueueFrame(BufferItem& buffer);
@@ -187,9 +158,6 @@
// Wrapper to enter threadTimeLapseEntry()
static void *ThreadTimeLapseWrapper(void *me);
- // Creates a copy of source_data into a new memory of final type MemoryBase.
- sp<IMemory> createIMemoryCopy(const sp<IMemory> &source_data);
-
CameraSourceTimeLapse(const CameraSourceTimeLapse &);
CameraSourceTimeLapse &operator=(const CameraSourceTimeLapse &);
};
diff --git a/media/libstagefright/include/media/stagefright/MPEG4Writer.h b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
index 501cf2c..2582ed0 100644
--- a/media/libstagefright/include/media/stagefright/MPEG4Writer.h
+++ b/media/libstagefright/include/media/stagefright/MPEG4Writer.h
@@ -46,7 +46,7 @@
// Returns INVALID_OPERATION if there is no source or track.
virtual status_t start(MetaData *param = NULL);
- virtual status_t stop() { return reset(); }
+ virtual status_t stop();
virtual status_t pause();
virtual bool reachedEOS();
virtual status_t dump(int fd, const Vector<String16>& args);
@@ -126,6 +126,7 @@
bool mWriteSeekErr;
bool mFallocateErr;
bool mPreAllocationEnabled;
+ status_t mResetStatus;
// Queue to hold top long write durations
std::priority_queue<std::chrono::microseconds, std::vector<std::chrono::microseconds>,
std::greater<std::chrono::microseconds>> mWriteDurationPQ;
@@ -135,7 +136,9 @@
sp<AHandlerReflector<MPEG4Writer> > mReflector;
Mutex mLock;
+ // Serialize reset calls from client of MPEG4Writer and MP4WtrCtrlHlpLooper.
std::mutex mResetMutex;
+ // Serialize preallocation calls from different track threads.
std::mutex mFallocMutex;
bool mPreAllocFirstTime; // Pre-allocate space for file and track headers only once per file.
uint64_t mPrevAllTracksTotalMetaDataSizeEstimate;
@@ -304,7 +307,7 @@
void writeGeoDataBox();
void writeLatitude(int degreex10000);
void writeLongitude(int degreex10000);
- void finishCurrentSession();
+ status_t finishCurrentSession();
void addDeviceMeta();
void writeHdlr(const char *handlerType);
@@ -337,7 +340,7 @@
void sendSessionSummary();
status_t release();
status_t switchFd();
- status_t reset(bool stopSource = true);
+ status_t reset(bool stopSource = true, bool waitForAnyPreviousCallToComplete = true);
static uint32_t getMpeg4Time();
diff --git a/media/libstagefright/include/media/stagefright/MediaAdapter.h b/media/libstagefright/include/media/stagefright/MediaAdapter.h
index 177a9e9..c7d7765 100644
--- a/media/libstagefright/include/media/stagefright/MediaAdapter.h
+++ b/media/libstagefright/include/media/stagefright/MediaAdapter.h
@@ -58,6 +58,7 @@
private:
Mutex mAdapterLock;
+ std::mutex mBufferGatingMutex;
// Make sure the read() wait for the incoming buffer.
Condition mBufferReadCond;
// Make sure the pushBuffer() wait for the current buffer consumed.
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 7614ba5..c246b36 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -81,6 +81,13 @@
BUFFER_FLAG_MUXER_DATA = 16,
};
+ enum CVODegree {
+ CVO_DEGREE_0 = 0,
+ CVO_DEGREE_90 = 90,
+ CVO_DEGREE_180 = 180,
+ CVO_DEGREE_270 = 270,
+ };
+
enum {
CB_INPUT_AVAILABLE = 1,
CB_OUTPUT_AVAILABLE = 2,
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 178d334..9c67338 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -744,6 +744,7 @@
constexpr char KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT[] = "aac-max-output-channel_count";
constexpr char KEY_AAC_PROFILE[] = "aac-profile";
constexpr char KEY_AAC_SBR_MODE[] = "aac-sbr-mode";
+constexpr char KEY_ALLOW_FRAME_DROP[] = "allow-frame-drop";
constexpr char KEY_AUDIO_SESSION_ID[] = "audio-session-id";
constexpr char KEY_BIT_RATE[] = "bitrate";
constexpr char KEY_BITRATE_MODE[] = "bitrate-mode";
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecSource.h b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
index 2f98af1..0f7b535 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecSource.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecSource.h
@@ -64,12 +64,15 @@
// MediaBufferObserver
virtual void signalBufferReturned(MediaBufferBase *buffer);
+ virtual status_t setEncodingBitrate(int32_t bitRate /* bps */);
// for AHandlerReflector
void onMessageReceived(const sp<AMessage> &msg);
+ status_t requestIDRFrame();
+
protected:
virtual ~MediaCodecSource();
diff --git a/media/libstagefright/include/media/stagefright/MediaWriter.h b/media/libstagefright/include/media/stagefright/MediaWriter.h
index 1f4fbcb..17b1abf 100644
--- a/media/libstagefright/include/media/stagefright/MediaWriter.h
+++ b/media/libstagefright/include/media/stagefright/MediaWriter.h
@@ -54,6 +54,10 @@
virtual void setStartTimeOffsetMs(int /*ms*/) {}
virtual int32_t getStartTimeOffsetMs() const { return 0; }
virtual status_t setNextFd(int /*fd*/) { return INVALID_OPERATION; }
+ virtual void updateCVODegrees(int32_t /*cvoDegrees*/) {}
+ virtual void updatePayloadType(int32_t /*payloadType*/) {}
+ virtual void updateSocketNetwork(int64_t /*socketNetwork*/) {}
+ virtual uint32_t getSequenceNum() { return 0; }
protected:
virtual ~MediaWriter() {}
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 64eb8b4..6b0d28f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -247,6 +247,20 @@
// Treat empty track as malformed for MediaRecorder.
kKeyEmptyTrackMalFormed = 'nemt', // bool (int32_t)
+
+ kKeyVps = 'sVps', // int32_t, indicates that a buffer has vps.
+ kKeySps = 'sSps', // int32_t, indicates that a buffer has sps.
+ kKeyPps = 'sPps', // int32_t, indicates that a buffer has pps.
+ kKeySelfID = 'sfid', // int32_t, source ID to identify itself on RTP protocol.
+ kKeyPayloadType = 'pTyp', // int32_t, SDP negotiated payload type.
+ kKeyRtpExtMap = 'extm', // int32_t, rtp extension ID for cvo on RTP protocol.
+ kKeyRtpCvoDegrees = 'cvod', // int32_t, rtp cvo degrees as per 3GPP 26.114.
+ kKeyRtpDscp = 'dscp', // int32_t, DSCP(Differentiated services codepoint) of RFC 2474.
+ kKeySocketNetwork = 'sNet', // int64_t, socket will be bound to network handle.
+
+ // Slow-motion markers
+ kKeySlowMotionMarkers = 'slmo', // raw data, byte array following spec for
+ // MediaFormat#KEY_SLOW_MOTION_MARKERS
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/ProcessInfo.h b/media/libstagefright/include/media/stagefright/ProcessInfo.h
index 0be1a52..b8a3c10 100644
--- a/media/libstagefright/include/media/stagefright/ProcessInfo.h
+++ b/media/libstagefright/include/media/stagefright/ProcessInfo.h
@@ -20,6 +20,9 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/ProcessInfoInterface.h>
+#include <map>
+#include <mutex>
+#include <utils/Condition.h>
namespace android {
@@ -28,11 +31,20 @@
virtual bool getPriority(int pid, int* priority);
virtual bool isValidPid(int pid);
+ virtual bool overrideProcessInfo(int pid, int procState, int oomScore);
+ virtual void removeProcessInfoOverride(int pid);
protected:
virtual ~ProcessInfo();
private:
+ struct ProcessInfoOverride {
+ int procState;
+ int oomScore;
+ };
+ std::mutex mOverrideLock;
+ std::map<int, ProcessInfoOverride> mOverrideMap GUARDED_BY(mOverrideLock);
+
DISALLOW_EVIL_CONSTRUCTORS(ProcessInfo);
};
diff --git a/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h b/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
index b39112a..9260181 100644
--- a/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
+++ b/media/libstagefright/include/media/stagefright/ProcessInfoInterface.h
@@ -24,6 +24,8 @@
struct ProcessInfoInterface : public RefBase {
virtual bool getPriority(int pid, int* priority) = 0;
virtual bool isValidPid(int pid) = 0;
+ virtual bool overrideProcessInfo(int pid, int procState, int oomScore);
+ virtual void removeProcessInfoOverride(int pid);
protected:
virtual ~ProcessInfoInterface() {}
diff --git a/media/libstagefright/include/media/stagefright/RemoteDataSource.h b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
index d82be8a..d605cda 100644
--- a/media/libstagefright/include/media/stagefright/RemoteDataSource.h
+++ b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
@@ -41,6 +41,11 @@
close();
}
virtual sp<IMemory> getIMemory() {
+ Mutex::Autolock lock(mLock);
+ if (mMemory.get() == nullptr) {
+ ALOGE("getIMemory() failed, mMemory is nullptr");
+ return nullptr;
+ }
return mMemory;
}
virtual ssize_t readAt(off64_t offset, size_t size) {
@@ -48,19 +53,35 @@
if (size > kBufferSize) {
size = kBufferSize;
}
+
+ Mutex::Autolock lock(mLock);
+ if (mSource.get() == nullptr) {
+ ALOGE("readAt() failed, mSource is nullptr");
+ return 0;
+ }
return mSource->readAt(offset, mMemory->unsecurePointer(), size);
}
virtual status_t getSize(off64_t *size) {
+ Mutex::Autolock lock(mLock);
+ if (mSource.get() == nullptr) {
+ ALOGE("getSize() failed, mSource is nullptr");
+ return INVALID_OPERATION;
+ }
return mSource->getSize(size);
}
virtual void close() {
// Protect strong pointer assignments. This also can be called from the binder
// clean-up procedure which is running on a separate thread.
- Mutex::Autolock lock(mCloseLock);
+ Mutex::Autolock lock(mLock);
mSource = nullptr;
mMemory = nullptr;
}
virtual uint32_t getFlags() {
+ Mutex::Autolock lock(mLock);
+ if (mSource.get() == nullptr) {
+ ALOGE("getSize() failed, mSource is nullptr");
+ return 0;
+ }
return mSource->flags();
}
virtual String8 toString() {
@@ -75,9 +96,10 @@
sp<IMemory> mMemory;
sp<DataSource> mSource;
String8 mName;
- Mutex mCloseLock;
+ Mutex mLock;
explicit RemoteDataSource(const sp<DataSource> &source) {
+ Mutex::Autolock lock(mLock);
mSource = source;
sp<MemoryDealer> memoryDealer = new MemoryDealer(kBufferSize, "RemoteDataSource");
mMemory = memoryDealer->allocate(kBufferSize);
diff --git a/media/libstagefright/include/media/stagefright/SurfaceUtils.h b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
index ae55c65..35b3fa2 100644
--- a/media/libstagefright/include/media/stagefright/SurfaceUtils.h
+++ b/media/libstagefright/include/media/stagefright/SurfaceUtils.h
@@ -38,6 +38,8 @@
int width, int height, int format, int rotation, int usage, bool reconnect);
void setNativeWindowHdrMetadata(
ANativeWindow *nativeWindow /* nonnull */, HDRStaticInfo *info /* nonnull */);
+status_t setNativeWindowRotation(
+ ANativeWindow *nativeWindow /* nonnull */, int rotation);
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
status_t nativeWindowConnect(ANativeWindow *surface, const char *reason);
status_t nativeWindowDisconnect(ANativeWindow *surface, const char *reason);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 801dba1..192ba77 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -1430,7 +1430,13 @@
if (mSampleDecryptor != NULL && (nalType == 1 || nalType == 5)) {
uint8_t *nalData = mBuffer->data() + pos.nalOffset;
size_t newSize = mSampleDecryptor->processNal(nalData, pos.nalSize);
- // Note: the data can shrink due to unescaping
+ // Note: the data can shrink due to unescaping, but it can never grow
+ if (newSize > pos.nalSize) {
+ // don't log unless verbose, since this can get called a lot if
+ // the caller is trying to resynchronize
+ ALOGV("expected sample size < %u, got %zu", pos.nalSize, newSize);
+ return NULL;
+ }
memcpy(accessUnit->data() + dstOffset + 4,
nalData,
newSize);
diff --git a/media/libstagefright/mpeg2ts/TEST_MAPPING b/media/libstagefright/mpeg2ts/TEST_MAPPING
new file mode 100644
index 0000000..9f4bbdf
--- /dev/null
+++ b/media/libstagefright/mpeg2ts/TEST_MAPPING
@@ -0,0 +1,9 @@
+// frameworks/av/media/libstagefright/mpeg2ts
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "Mpeg2tsUnitTest" }
+ ]
+}
diff --git a/media/libstagefright/mpeg2ts/test/Android.bp b/media/libstagefright/mpeg2ts/test/Android.bp
index 4e4832a..d8b0304 100644
--- a/media/libstagefright/mpeg2ts/test/Android.bp
+++ b/media/libstagefright/mpeg2ts/test/Android.bp
@@ -17,6 +17,7 @@
cc_test{
name: "Mpeg2tsUnitTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"Mpeg2tsUnitTest.cpp"
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
index 7d217eb..f7bf3ba 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -67,7 +67,7 @@
int32_t dataSpace, int32_t aspects, int32_t pixelFormat) override {
Message tMsg;
tMsg.type = Message::Type::EVENT;
- tMsg.fence = native_handle_create(0, 0);
+ tMsg.fence.setTo(native_handle_create(0, 0), /* shouldOwn = */ true);
tMsg.data.eventData.event = uint32_t(OMX_EventDataSpaceChanged);
tMsg.data.eventData.data1 = dataSpace;
tMsg.data.eventData.data2 = aspects;
diff --git a/media/libstagefright/renderfright/Android.bp b/media/libstagefright/renderfright/Android.bp
new file mode 100644
index 0000000..c17f84e
--- /dev/null
+++ b/media/libstagefright/renderfright/Android.bp
@@ -0,0 +1,111 @@
+cc_defaults {
+ name: "renderfright_defaults",
+ cflags: [
+ "-DLOG_TAG=\"renderfright\"",
+ "-Wall",
+ "-Werror",
+ "-Wthread-safety",
+ "-Wunused",
+ "-Wunreachable-code",
+ ],
+}
+
+cc_defaults {
+ name: "librenderfright_defaults",
+ defaults: ["renderfright_defaults"],
+ cflags: [
+ "-DGL_GLEXT_PROTOTYPES",
+ "-DEGL_EGLEXT_PROTOTYPES",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libEGL",
+ "libGLESv1_CM",
+ "libGLESv2",
+ "libgui",
+ "liblog",
+ "libnativewindow",
+ "libprocessgroup",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
+
+filegroup {
+ name: "librenderfright_sources",
+ srcs: [
+ "Description.cpp",
+ "Mesh.cpp",
+ "RenderEngine.cpp",
+ "Texture.cpp",
+ ],
+}
+
+filegroup {
+ name: "librenderfright_gl_sources",
+ srcs: [
+ "gl/GLESRenderEngine.cpp",
+ "gl/GLExtensions.cpp",
+ "gl/GLFramebuffer.cpp",
+ "gl/GLImage.cpp",
+ "gl/GLShadowTexture.cpp",
+ "gl/GLShadowVertexGenerator.cpp",
+ "gl/GLSkiaShadowPort.cpp",
+ "gl/GLVertexBuffer.cpp",
+ "gl/ImageManager.cpp",
+ "gl/Program.cpp",
+ "gl/ProgramCache.cpp",
+ "gl/filters/BlurFilter.cpp",
+ "gl/filters/GenericProgram.cpp",
+ ],
+}
+
+filegroup {
+ name: "librenderfright_threaded_sources",
+ srcs: [
+ "threaded/RenderEngineThreaded.cpp",
+ ],
+}
+
+cc_library_static {
+ name: "librenderfright",
+ defaults: ["librenderfright_defaults"],
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+ double_loadable: true,
+ clang: true,
+ cflags: [
+ "-fvisibility=hidden",
+ "-Werror=format",
+ ],
+ srcs: [
+ ":librenderfright_sources",
+ ":librenderfright_gl_sources",
+ ":librenderfright_threaded_sources",
+ ],
+ lto: {
+ thin: true,
+ },
+}
+
+cc_library_static {
+ name: "librenderfright_mocks",
+ defaults: ["librenderfright_defaults"],
+ srcs: [
+ "mock/Framebuffer.cpp",
+ "mock/Image.cpp",
+ "mock/RenderEngine.cpp",
+ ],
+ static_libs: [
+ "libgtest",
+ "libgmock",
+ ],
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
diff --git a/media/libstagefright/renderfright/Description.cpp b/media/libstagefright/renderfright/Description.cpp
new file mode 100644
index 0000000..b9cea10
--- /dev/null
+++ b/media/libstagefright/renderfright/Description.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/private/Description.h>
+
+#include <stdint.h>
+
+#include <utils/TypeHelpers.h>
+
+namespace android {
+namespace renderengine {
+
+Description::TransferFunction Description::dataSpaceToTransferFunction(ui::Dataspace dataSpace) {
+ ui::Dataspace transfer = static_cast<ui::Dataspace>(dataSpace & ui::Dataspace::TRANSFER_MASK);
+ switch (transfer) {
+ case ui::Dataspace::TRANSFER_ST2084:
+ return Description::TransferFunction::ST2084;
+ case ui::Dataspace::TRANSFER_HLG:
+ return Description::TransferFunction::HLG;
+ case ui::Dataspace::TRANSFER_LINEAR:
+ return Description::TransferFunction::LINEAR;
+ default:
+ return Description::TransferFunction::SRGB;
+ }
+}
+
+bool Description::hasInputTransformMatrix() const {
+ const mat4 identity;
+ return inputTransformMatrix != identity;
+}
+
+bool Description::hasOutputTransformMatrix() const {
+ const mat4 identity;
+ return outputTransformMatrix != identity;
+}
+
+bool Description::hasColorMatrix() const {
+ const mat4 identity;
+ return colorMatrix != identity;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/Mesh.cpp b/media/libstagefright/renderfright/Mesh.cpp
new file mode 100644
index 0000000..ed2f45f
--- /dev/null
+++ b/media/libstagefright/renderfright/Mesh.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Mesh.h>
+
+#include <utils/Log.h>
+
+namespace android {
+namespace renderengine {
+
+Mesh::Mesh(Primitive primitive, size_t vertexCount, size_t vertexSize, size_t texCoordSize,
+ size_t cropCoordsSize, size_t shadowColorSize, size_t shadowParamsSize,
+ size_t indexCount)
+ : mVertexCount(vertexCount),
+ mVertexSize(vertexSize),
+ mTexCoordsSize(texCoordSize),
+ mCropCoordsSize(cropCoordsSize),
+ mShadowColorSize(shadowColorSize),
+ mShadowParamsSize(shadowParamsSize),
+ mPrimitive(primitive),
+ mIndexCount(indexCount) {
+ if (vertexCount == 0) {
+ mVertices.resize(1);
+ mVertices[0] = 0.0f;
+ mStride = 0;
+ return;
+ }
+ size_t stride = vertexSize + texCoordSize + cropCoordsSize + shadowColorSize + shadowParamsSize;
+ size_t remainder = (stride * vertexCount) / vertexCount;
+ // Since all of the input parameters are unsigned, if stride is less than
+ // either vertexSize or texCoordSize, it must have overflowed. remainder
+ // will be equal to stride as long as stride * vertexCount doesn't overflow.
+ if ((stride < vertexSize) || (remainder != stride)) {
+ ALOGE("Overflow in Mesh(..., %zu, %zu, %zu, %zu, %zu, %zu)", vertexCount, vertexSize,
+ texCoordSize, cropCoordsSize, shadowColorSize, shadowParamsSize);
+ mVertices.resize(1);
+ mVertices[0] = 0.0f;
+ mVertexCount = 0;
+ mVertexSize = 0;
+ mTexCoordsSize = 0;
+ mCropCoordsSize = 0;
+ mShadowColorSize = 0;
+ mShadowParamsSize = 0;
+ mStride = 0;
+ return;
+ }
+
+ mVertices.resize(stride * vertexCount);
+ mStride = stride;
+ mIndices.resize(indexCount);
+}
+
+Mesh::Primitive Mesh::getPrimitive() const {
+ return mPrimitive;
+}
+
+float const* Mesh::getPositions() const {
+ return mVertices.data();
+}
+float* Mesh::getPositions() {
+ return mVertices.data();
+}
+
+float const* Mesh::getTexCoords() const {
+ return mVertices.data() + mVertexSize;
+}
+float* Mesh::getTexCoords() {
+ return mVertices.data() + mVertexSize;
+}
+
+float const* Mesh::getCropCoords() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize;
+}
+float* Mesh::getCropCoords() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize;
+}
+
+float const* Mesh::getShadowColor() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize;
+}
+float* Mesh::getShadowColor() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize;
+}
+
+float const* Mesh::getShadowParams() const {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize + mShadowColorSize;
+}
+float* Mesh::getShadowParams() {
+ return mVertices.data() + mVertexSize + mTexCoordsSize + mCropCoordsSize + mShadowColorSize;
+}
+
+uint16_t const* Mesh::getIndices() const {
+ return mIndices.data();
+}
+
+uint16_t* Mesh::getIndices() {
+ return mIndices.data();
+}
+
+size_t Mesh::getVertexCount() const {
+ return mVertexCount;
+}
+
+size_t Mesh::getVertexSize() const {
+ return mVertexSize;
+}
+
+size_t Mesh::getTexCoordsSize() const {
+ return mTexCoordsSize;
+}
+
+size_t Mesh::getShadowColorSize() const {
+ return mShadowColorSize;
+}
+
+size_t Mesh::getShadowParamsSize() const {
+ return mShadowParamsSize;
+}
+
+size_t Mesh::getByteStride() const {
+ return mStride * sizeof(float);
+}
+
+size_t Mesh::getStride() const {
+ return mStride;
+}
+
+size_t Mesh::getIndexCount() const {
+ return mIndexCount;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/RenderEngine.cpp b/media/libstagefright/renderfright/RenderEngine.cpp
new file mode 100644
index 0000000..c3fbb60
--- /dev/null
+++ b/media/libstagefright/renderfright/RenderEngine.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/RenderEngine.h>
+
+#include <cutils/properties.h>
+#include <log/log.h>
+#include <private/gui/SyncFeatures.h>
+#include "gl/GLESRenderEngine.h"
+#include "threaded/RenderEngineThreaded.h"
+
+namespace android {
+namespace renderengine {
+
+std::unique_ptr<RenderEngine> RenderEngine::create(const RenderEngineCreationArgs& args) {
+ RenderEngineType renderEngineType = args.renderEngineType;
+
+ // Keep the ability to override by PROPERTIES:
+ char prop[PROPERTY_VALUE_MAX];
+ property_get(PROPERTY_DEBUG_RENDERENGINE_BACKEND, prop, "");
+ if (strcmp(prop, "gles") == 0) {
+ renderEngineType = RenderEngineType::GLES;
+ }
+ if (strcmp(prop, "threaded") == 0) {
+ renderEngineType = RenderEngineType::THREADED;
+ }
+
+ switch (renderEngineType) {
+ case RenderEngineType::THREADED:
+ ALOGD("Threaded RenderEngine with GLES Backend");
+ return renderengine::threaded::RenderEngineThreaded::create(
+ [args]() { return android::renderengine::gl::GLESRenderEngine::create(args); });
+ case RenderEngineType::GLES:
+ default:
+ ALOGD("RenderEngine with GLES Backend");
+ return renderengine::gl::GLESRenderEngine::create(args);
+ }
+}
+
+RenderEngine::~RenderEngine() = default;
+
+namespace impl {
+
+RenderEngine::RenderEngine(const RenderEngineCreationArgs& args) : mArgs(args) {}
+
+RenderEngine::~RenderEngine() = default;
+
+bool RenderEngine::useNativeFenceSync() const {
+ return SyncFeatures::getInstance().useNativeFenceSync();
+}
+
+bool RenderEngine::useWaitSync() const {
+ return SyncFeatures::getInstance().useWaitSync();
+}
+
+} // namespace impl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/Texture.cpp b/media/libstagefright/renderfright/Texture.cpp
new file mode 100644
index 0000000..154cde8
--- /dev/null
+++ b/media/libstagefright/renderfright/Texture.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Texture.h>
+
+namespace android {
+namespace renderengine {
+
+Texture::Texture()
+ : mTextureName(0), mTextureTarget(TEXTURE_2D), mWidth(0), mHeight(0), mFiltering(false) {}
+
+Texture::Texture(Target textureTarget, uint32_t textureName)
+ : mTextureName(textureName),
+ mTextureTarget(textureTarget),
+ mWidth(0),
+ mHeight(0),
+ mFiltering(false) {}
+
+void Texture::init(Target textureTarget, uint32_t textureName) {
+ mTextureName = textureName;
+ mTextureTarget = textureTarget;
+}
+
+Texture::~Texture() {}
+
+void Texture::setMatrix(float const* matrix) {
+ mTextureMatrix = mat4(matrix);
+}
+
+void Texture::setFiltering(bool enabled) {
+ mFiltering = enabled;
+}
+
+void Texture::setDimensions(size_t width, size_t height) {
+ mWidth = width;
+ mHeight = height;
+}
+
+uint32_t Texture::getTextureName() const {
+ return mTextureName;
+}
+
+uint32_t Texture::getTextureTarget() const {
+ return mTextureTarget;
+}
+
+const mat4& Texture::getMatrix() const {
+ return mTextureMatrix;
+}
+
+bool Texture::getFiltering() const {
+ return mFiltering;
+}
+
+size_t Texture::getWidth() const {
+ return mWidth;
+}
+
+size_t Texture::getHeight() const {
+ return mHeight;
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp b/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp
new file mode 100644
index 0000000..824bdd9
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLESRenderEngine.cpp
@@ -0,0 +1,1772 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#undef LOG_TAG
+#define LOG_TAG "RenderEngine"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <sched.h>
+#include <cmath>
+#include <fstream>
+#include <sstream>
+#include <unordered_set>
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <android-base/stringprintf.h>
+#include <cutils/compiler.h>
+#include <cutils/properties.h>
+#include <gui/DebugEGLImageTracker.h>
+#include <renderengine/Mesh.h>
+#include <renderengine/Texture.h>
+#include <renderengine/private/Description.h>
+#include <sync/sync.h>
+#include <ui/ColorSpace.h>
+#include <ui/DebugUtils.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <utils/KeyedVector.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "GLExtensions.h"
+#include "GLFramebuffer.h"
+#include "GLImage.h"
+#include "GLShadowVertexGenerator.h"
+#include "Program.h"
+#include "ProgramCache.h"
+#include "filters/BlurFilter.h"
+
+bool checkGlError(const char* op, int lineNumber) {
+ bool errorFound = false;
+ GLint error = glGetError();
+ while (error != GL_NO_ERROR) {
+ errorFound = true;
+ error = glGetError();
+ ALOGV("after %s() (line # %d) glError (0x%x)\n", op, lineNumber, error);
+ }
+ return errorFound;
+}
+
+static constexpr bool outputDebugPPMs = false;
+
+void writePPM(const char* basename, GLuint width, GLuint height) {
+ ALOGV("writePPM #%s: %d x %d", basename, width, height);
+
+ std::vector<GLubyte> pixels(width * height * 4);
+ std::vector<GLubyte> outBuffer(width * height * 3);
+
+ // TODO(courtneygo): We can now have float formats, need
+ // to remove this code or update to support.
+ // Make returned pixels fit in uint32_t, one byte per component
+ glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, pixels.data());
+ if (checkGlError(__FUNCTION__, __LINE__)) {
+ return;
+ }
+
+ std::string filename(basename);
+ filename.append(".ppm");
+ std::ofstream file(filename.c_str(), std::ios::binary);
+ if (!file.is_open()) {
+ ALOGE("Unable to open file: %s", filename.c_str());
+ ALOGE("You may need to do: \"adb shell setenforce 0\" to enable "
+ "surfaceflinger to write debug images");
+ return;
+ }
+
+ file << "P6\n";
+ file << width << "\n";
+ file << height << "\n";
+ file << 255 << "\n";
+
+ auto ptr = reinterpret_cast<char*>(pixels.data());
+ auto outPtr = reinterpret_cast<char*>(outBuffer.data());
+ for (int y = height - 1; y >= 0; y--) {
+ char* data = ptr + y * width * sizeof(uint32_t);
+
+ for (GLuint x = 0; x < width; x++) {
+ // Only copy R, G and B components
+ outPtr[0] = data[0];
+ outPtr[1] = data[1];
+ outPtr[2] = data[2];
+ data += sizeof(uint32_t);
+ outPtr += 3;
+ }
+ }
+ file.write(reinterpret_cast<char*>(outBuffer.data()), outBuffer.size());
+}
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+using base::StringAppendF;
+using ui::Dataspace;
+
+static status_t selectConfigForAttribute(EGLDisplay dpy, EGLint const* attrs, EGLint attribute,
+ EGLint wanted, EGLConfig* outConfig) {
+ EGLint numConfigs = -1, n = 0;
+ eglGetConfigs(dpy, nullptr, 0, &numConfigs);
+ std::vector<EGLConfig> configs(numConfigs, EGL_NO_CONFIG_KHR);
+ eglChooseConfig(dpy, attrs, configs.data(), configs.size(), &n);
+ configs.resize(n);
+
+ if (!configs.empty()) {
+ if (attribute != EGL_NONE) {
+ for (EGLConfig config : configs) {
+ EGLint value = 0;
+ eglGetConfigAttrib(dpy, config, attribute, &value);
+ if (wanted == value) {
+ *outConfig = config;
+ return NO_ERROR;
+ }
+ }
+ } else {
+ // just pick the first one
+ *outConfig = configs[0];
+ return NO_ERROR;
+ }
+ }
+
+ return NAME_NOT_FOUND;
+}
+
+static status_t selectEGLConfig(EGLDisplay display, EGLint format, EGLint renderableType,
+ EGLConfig* config) {
+ // select our EGLConfig. It must support EGL_RECORDABLE_ANDROID if
+ // it is to be used with WIFI displays
+ status_t err;
+ EGLint wantedAttribute;
+ EGLint wantedAttributeValue;
+
+ std::vector<EGLint> attribs;
+ if (renderableType) {
+ const ui::PixelFormat pixelFormat = static_cast<ui::PixelFormat>(format);
+ const bool is1010102 = pixelFormat == ui::PixelFormat::RGBA_1010102;
+
+ // Default to 8 bits per channel.
+ const EGLint tmpAttribs[] = {
+ EGL_RENDERABLE_TYPE,
+ renderableType,
+ EGL_RECORDABLE_ANDROID,
+ EGL_TRUE,
+ EGL_SURFACE_TYPE,
+ EGL_WINDOW_BIT | EGL_PBUFFER_BIT,
+ EGL_FRAMEBUFFER_TARGET_ANDROID,
+ EGL_TRUE,
+ EGL_RED_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_GREEN_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_BLUE_SIZE,
+ is1010102 ? 10 : 8,
+ EGL_ALPHA_SIZE,
+ is1010102 ? 2 : 8,
+ EGL_NONE,
+ };
+ std::copy(tmpAttribs, tmpAttribs + (sizeof(tmpAttribs) / sizeof(EGLint)),
+ std::back_inserter(attribs));
+ wantedAttribute = EGL_NONE;
+ wantedAttributeValue = EGL_NONE;
+ } else {
+ // if no renderable type specified, fallback to a simplified query
+ wantedAttribute = EGL_NATIVE_VISUAL_ID;
+ wantedAttributeValue = format;
+ }
+
+ err = selectConfigForAttribute(display, attribs.data(), wantedAttribute, wantedAttributeValue,
+ config);
+ if (err == NO_ERROR) {
+ EGLint caveat;
+ if (eglGetConfigAttrib(display, *config, EGL_CONFIG_CAVEAT, &caveat))
+ ALOGW_IF(caveat == EGL_SLOW_CONFIG, "EGL_SLOW_CONFIG selected!");
+ }
+
+ return err;
+}
+
+std::unique_ptr<GLESRenderEngine> GLESRenderEngine::create(const RenderEngineCreationArgs& args) {
+ // initialize EGL for the default display
+ EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
+ if (!eglInitialize(display, nullptr, nullptr)) {
+ LOG_ALWAYS_FATAL("failed to initialize EGL");
+ }
+
+ const auto eglVersion = eglQueryString(display, EGL_VERSION);
+ if (!eglVersion) {
+ checkGlError(__FUNCTION__, __LINE__);
+ LOG_ALWAYS_FATAL("eglQueryString(EGL_VERSION) failed");
+ }
+
+ const auto eglExtensions = eglQueryString(display, EGL_EXTENSIONS);
+ if (!eglExtensions) {
+ checkGlError(__FUNCTION__, __LINE__);
+ LOG_ALWAYS_FATAL("eglQueryString(EGL_EXTENSIONS) failed");
+ }
+
+ GLExtensions& extensions = GLExtensions::getInstance();
+ extensions.initWithEGLStrings(eglVersion, eglExtensions);
+
+ // The code assumes that ES2 or later is available if this extension is
+ // supported.
+ EGLConfig config = EGL_NO_CONFIG;
+ if (!extensions.hasNoConfigContext()) {
+ config = chooseEglConfig(display, args.pixelFormat, /*logConfig*/ true);
+ }
+
+ bool useContextPriority =
+ extensions.hasContextPriority() && args.contextPriority == ContextPriority::HIGH;
+ EGLContext protectedContext = EGL_NO_CONTEXT;
+ if (args.enableProtectedContext && extensions.hasProtectedContent()) {
+ protectedContext = createEglContext(display, config, nullptr, useContextPriority,
+ Protection::PROTECTED);
+ ALOGE_IF(protectedContext == EGL_NO_CONTEXT, "Can't create protected context");
+ }
+
+ EGLContext ctxt = createEglContext(display, config, protectedContext, useContextPriority,
+ Protection::UNPROTECTED);
+
+ // if can't create a GL context, we can only abort.
+ LOG_ALWAYS_FATAL_IF(ctxt == EGL_NO_CONTEXT, "EGLContext creation failed");
+
+ EGLSurface stub = EGL_NO_SURFACE;
+ if (!extensions.hasSurfacelessContext()) {
+ stub = createStubEglPbufferSurface(display, config, args.pixelFormat,
+ Protection::UNPROTECTED);
+ LOG_ALWAYS_FATAL_IF(stub == EGL_NO_SURFACE, "can't create stub pbuffer");
+ }
+ EGLBoolean success = eglMakeCurrent(display, stub, stub, ctxt);
+ LOG_ALWAYS_FATAL_IF(!success, "can't make stub pbuffer current");
+ extensions.initWithGLStrings(glGetString(GL_VENDOR), glGetString(GL_RENDERER),
+ glGetString(GL_VERSION), glGetString(GL_EXTENSIONS));
+
+ EGLSurface protectedStub = EGL_NO_SURFACE;
+ if (protectedContext != EGL_NO_CONTEXT && !extensions.hasSurfacelessContext()) {
+ protectedStub = createStubEglPbufferSurface(display, config, args.pixelFormat,
+ Protection::PROTECTED);
+ ALOGE_IF(protectedStub == EGL_NO_SURFACE, "can't create protected stub pbuffer");
+ }
+
+ // now figure out what version of GL did we actually get
+ GlesVersion version = parseGlesVersion(extensions.getVersion());
+
+ LOG_ALWAYS_FATAL_IF(args.supportsBackgroundBlur && version < GLES_VERSION_3_0,
+ "Blurs require OpenGL ES 3.0. Please unset ro.surface_flinger.supports_background_blur");
+
+ // initialize the renderer while GL is current
+ std::unique_ptr<GLESRenderEngine> engine;
+ switch (version) {
+ case GLES_VERSION_1_0:
+ case GLES_VERSION_1_1:
+ LOG_ALWAYS_FATAL("SurfaceFlinger requires OpenGL ES 2.0 minimum to run.");
+ break;
+ case GLES_VERSION_2_0:
+ case GLES_VERSION_3_0:
+ engine = std::make_unique<GLESRenderEngine>(args, display, config, ctxt, stub,
+ protectedContext, protectedStub);
+ break;
+ }
+
+ ALOGI("OpenGL ES informations:");
+ ALOGI("vendor : %s", extensions.getVendor());
+ ALOGI("renderer : %s", extensions.getRenderer());
+ ALOGI("version : %s", extensions.getVersion());
+ ALOGI("extensions: %s", extensions.getExtensions());
+ ALOGI("GL_MAX_TEXTURE_SIZE = %zu", engine->getMaxTextureSize());
+ ALOGI("GL_MAX_VIEWPORT_DIMS = %zu", engine->getMaxViewportDims());
+
+ return engine;
+}
+
+EGLConfig GLESRenderEngine::chooseEglConfig(EGLDisplay display, int format, bool logConfig) {
+ status_t err;
+ EGLConfig config;
+
+ // First try to get an ES3 config
+ err = selectEGLConfig(display, format, EGL_OPENGL_ES3_BIT, &config);
+ if (err != NO_ERROR) {
+ // If ES3 fails, try to get an ES2 config
+ err = selectEGLConfig(display, format, EGL_OPENGL_ES2_BIT, &config);
+ if (err != NO_ERROR) {
+ // If ES2 still doesn't work, probably because we're on the emulator.
+ // try a simplified query
+ ALOGW("no suitable EGLConfig found, trying a simpler query");
+ err = selectEGLConfig(display, format, 0, &config);
+ if (err != NO_ERROR) {
+ // this EGL is too lame for android
+ LOG_ALWAYS_FATAL("no suitable EGLConfig found, giving up");
+ }
+ }
+ }
+
+ if (logConfig) {
+ // print some debugging info
+ EGLint r, g, b, a;
+ eglGetConfigAttrib(display, config, EGL_RED_SIZE, &r);
+ eglGetConfigAttrib(display, config, EGL_GREEN_SIZE, &g);
+ eglGetConfigAttrib(display, config, EGL_BLUE_SIZE, &b);
+ eglGetConfigAttrib(display, config, EGL_ALPHA_SIZE, &a);
+ ALOGI("EGL information:");
+ ALOGI("vendor : %s", eglQueryString(display, EGL_VENDOR));
+ ALOGI("version : %s", eglQueryString(display, EGL_VERSION));
+ ALOGI("extensions: %s", eglQueryString(display, EGL_EXTENSIONS));
+ ALOGI("Client API: %s", eglQueryString(display, EGL_CLIENT_APIS) ?: "Not Supported");
+ ALOGI("EGLSurface: %d-%d-%d-%d, config=%p", r, g, b, a, config);
+ }
+
+ return config;
+}
+
+GLESRenderEngine::GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display,
+ EGLConfig config, EGLContext ctxt, EGLSurface stub,
+ EGLContext protectedContext, EGLSurface protectedStub)
+ : renderengine::impl::RenderEngine(args),
+ mEGLDisplay(display),
+ mEGLConfig(config),
+ mEGLContext(ctxt),
+ mStubSurface(stub),
+ mProtectedEGLContext(protectedContext),
+ mProtectedStubSurface(protectedStub),
+ mVpWidth(0),
+ mVpHeight(0),
+ mFramebufferImageCacheSize(args.imageCacheSize),
+ mUseColorManagement(args.useColorManagement) {
+ glGetIntegerv(GL_MAX_TEXTURE_SIZE, &mMaxTextureSize);
+ glGetIntegerv(GL_MAX_VIEWPORT_DIMS, mMaxViewportDims);
+
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glPixelStorei(GL_PACK_ALIGNMENT, 4);
+
+ // Initialize protected EGL Context.
+ if (mProtectedEGLContext != EGL_NO_CONTEXT) {
+ EGLBoolean success = eglMakeCurrent(display, mProtectedStubSurface, mProtectedStubSurface,
+ mProtectedEGLContext);
+ ALOGE_IF(!success, "can't make protected context current");
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glPixelStorei(GL_PACK_ALIGNMENT, 4);
+ success = eglMakeCurrent(display, mStubSurface, mStubSurface, mEGLContext);
+ LOG_ALWAYS_FATAL_IF(!success, "can't make default context current");
+ }
+
+ // mColorBlindnessCorrection = M;
+
+ if (mUseColorManagement) {
+ const ColorSpace srgb(ColorSpace::sRGB());
+ const ColorSpace displayP3(ColorSpace::DisplayP3());
+ const ColorSpace bt2020(ColorSpace::BT2020());
+
+ // no chromatic adaptation needed since all color spaces use D65 for their white points.
+ mSrgbToXyz = mat4(srgb.getRGBtoXYZ());
+ mDisplayP3ToXyz = mat4(displayP3.getRGBtoXYZ());
+ mBt2020ToXyz = mat4(bt2020.getRGBtoXYZ());
+ mXyzToSrgb = mat4(srgb.getXYZtoRGB());
+ mXyzToDisplayP3 = mat4(displayP3.getXYZtoRGB());
+ mXyzToBt2020 = mat4(bt2020.getXYZtoRGB());
+
+ // Compute sRGB to Display P3 and BT2020 transform matrix.
+ // NOTE: For now, we are limiting output wide color space support to
+ // Display-P3 and BT2020 only.
+ mSrgbToDisplayP3 = mXyzToDisplayP3 * mSrgbToXyz;
+ mSrgbToBt2020 = mXyzToBt2020 * mSrgbToXyz;
+
+ // Compute Display P3 to sRGB and BT2020 transform matrix.
+ mDisplayP3ToSrgb = mXyzToSrgb * mDisplayP3ToXyz;
+ mDisplayP3ToBt2020 = mXyzToBt2020 * mDisplayP3ToXyz;
+
+ // Compute BT2020 to sRGB and Display P3 transform matrix
+ mBt2020ToSrgb = mXyzToSrgb * mBt2020ToXyz;
+ mBt2020ToDisplayP3 = mXyzToDisplayP3 * mBt2020ToXyz;
+ }
+
+ char value[PROPERTY_VALUE_MAX];
+ property_get("debug.egl.traceGpuCompletion", value, "0");
+ if (atoi(value)) {
+ mTraceGpuCompletion = true;
+ mFlushTracer = std::make_unique<FlushTracer>(this);
+ }
+
+ if (args.supportsBackgroundBlur) {
+ mBlurFilter = new BlurFilter(*this);
+ checkErrors("BlurFilter creation");
+ }
+
+ mImageManager = std::make_unique<ImageManager>(this);
+ mImageManager->initThread();
+ mDrawingBuffer = createFramebuffer();
+ sp<GraphicBuffer> buf =
+ new GraphicBuffer(1, 1, PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE, "placeholder");
+
+ const status_t err = buf->initCheck();
+ if (err != OK) {
+ ALOGE("Error allocating placeholder buffer: %d", err);
+ return;
+ }
+ mPlaceholderBuffer = buf.get();
+ EGLint attributes[] = {
+ EGL_NONE,
+ };
+ mPlaceholderImage = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ mPlaceholderBuffer, attributes);
+ ALOGE_IF(mPlaceholderImage == EGL_NO_IMAGE_KHR, "Failed to create placeholder image: %#x",
+ eglGetError());
+}
+
+GLESRenderEngine::~GLESRenderEngine() {
+ // Destroy the image manager first.
+ mImageManager = nullptr;
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ unbindFrameBuffer(mDrawingBuffer.get());
+ mDrawingBuffer = nullptr;
+ while (!mFramebufferImageCache.empty()) {
+ EGLImageKHR expired = mFramebufferImageCache.front().second;
+ mFramebufferImageCache.pop_front();
+ eglDestroyImageKHR(mEGLDisplay, expired);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ eglDestroyImageKHR(mEGLDisplay, mPlaceholderImage);
+ mImageCache.clear();
+ eglMakeCurrent(mEGLDisplay, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
+ eglTerminate(mEGLDisplay);
+}
+
+std::unique_ptr<Framebuffer> GLESRenderEngine::createFramebuffer() {
+ return std::make_unique<GLFramebuffer>(*this);
+}
+
+std::unique_ptr<Image> GLESRenderEngine::createImage() {
+ return std::make_unique<GLImage>(*this);
+}
+
+Framebuffer* GLESRenderEngine::getFramebufferForDrawing() {
+ return mDrawingBuffer.get();
+}
+
+void GLESRenderEngine::primeCache() const {
+ ProgramCache::getInstance().primeCache(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
+ mArgs.useColorManagement,
+ mArgs.precacheToneMapperShaderOnly);
+}
+
+base::unique_fd GLESRenderEngine::flush() {
+ ATRACE_CALL();
+ if (!GLExtensions::getInstance().hasNativeFenceSync()) {
+ return base::unique_fd();
+ }
+
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGW("failed to create EGL native fence sync: %#x", eglGetError());
+ return base::unique_fd();
+ }
+
+ // native fence fd will not be populated until flush() is done.
+ glFlush();
+
+ // get the fence fd
+ base::unique_fd fenceFd(eglDupNativeFenceFDANDROID(mEGLDisplay, sync));
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (fenceFd == EGL_NO_NATIVE_FENCE_FD_ANDROID) {
+ ALOGW("failed to dup EGL native fence sync: %#x", eglGetError());
+ }
+
+ // Only trace if we have a valid fence, as current usage falls back to
+ // calling finish() if the fence fd is invalid.
+ if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer) && fenceFd.get() >= 0) {
+ mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));
+ }
+
+ return fenceFd;
+}
+
+bool GLESRenderEngine::finish() {
+ ATRACE_CALL();
+ if (!GLExtensions::getInstance().hasFenceSync()) {
+ ALOGW("no synchronization support");
+ return false;
+ }
+
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGW("failed to create EGL fence sync: %#x", eglGetError());
+ return false;
+ }
+
+ if (CC_UNLIKELY(mTraceGpuCompletion && mFlushTracer)) {
+ mFlushTracer->queueSync(eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_FENCE_KHR, nullptr));
+ }
+
+ return waitSync(sync, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR);
+}
+
+bool GLESRenderEngine::waitSync(EGLSyncKHR sync, EGLint flags) {
+ EGLint result = eglClientWaitSyncKHR(mEGLDisplay, sync, flags, 2000000000 /*2 sec*/);
+ EGLint error = eglGetError();
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (result != EGL_CONDITION_SATISFIED_KHR) {
+ if (result == EGL_TIMEOUT_EXPIRED_KHR) {
+ ALOGW("fence wait timed out");
+ } else {
+ ALOGW("error waiting on EGL fence: %#x", error);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool GLESRenderEngine::waitFence(base::unique_fd fenceFd) {
+ if (!GLExtensions::getInstance().hasNativeFenceSync() ||
+ !GLExtensions::getInstance().hasWaitSync()) {
+ return false;
+ }
+
+ // release the fd and transfer the ownership to EGLSync
+ EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, fenceFd.release(), EGL_NONE};
+ EGLSyncKHR sync = eglCreateSyncKHR(mEGLDisplay, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs);
+ if (sync == EGL_NO_SYNC_KHR) {
+ ALOGE("failed to create EGL native fence sync: %#x", eglGetError());
+ return false;
+ }
+
+ // XXX: The spec draft is inconsistent as to whether this should return an
+ // EGLint or void. Ignore the return value for now, as it's not strictly
+ // needed.
+ eglWaitSyncKHR(mEGLDisplay, sync, 0);
+ EGLint error = eglGetError();
+ eglDestroySyncKHR(mEGLDisplay, sync);
+ if (error != EGL_SUCCESS) {
+ ALOGE("failed to wait for EGL native fence sync: %#x", error);
+ return false;
+ }
+
+ return true;
+}
+
+void GLESRenderEngine::clearWithColor(float red, float green, float blue, float alpha) {
+ ATRACE_CALL();
+ glDisable(GL_BLEND);
+ glClearColor(red, green, blue, alpha);
+ glClear(GL_COLOR_BUFFER_BIT);
+}
+
+void GLESRenderEngine::fillRegionWithColor(const Region& region, float red, float green, float blue,
+ float alpha) {
+ size_t c;
+ Rect const* r = region.getArray(&c);
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLES)
+ .setVertices(c * 6 /* count */, 2 /* size */)
+ .build();
+ Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
+ for (size_t i = 0; i < c; i++, r++) {
+ position[i * 6 + 0].x = r->left;
+ position[i * 6 + 0].y = r->top;
+ position[i * 6 + 1].x = r->left;
+ position[i * 6 + 1].y = r->bottom;
+ position[i * 6 + 2].x = r->right;
+ position[i * 6 + 2].y = r->bottom;
+ position[i * 6 + 3].x = r->left;
+ position[i * 6 + 3].y = r->top;
+ position[i * 6 + 4].x = r->right;
+ position[i * 6 + 4].y = r->bottom;
+ position[i * 6 + 5].x = r->right;
+ position[i * 6 + 5].y = r->top;
+ }
+ setupFillWithColor(red, green, blue, alpha);
+ drawMesh(mesh);
+}
+
+void GLESRenderEngine::setScissor(const Rect& region) {
+ glScissor(region.left, region.top, region.getWidth(), region.getHeight());
+ glEnable(GL_SCISSOR_TEST);
+}
+
+void GLESRenderEngine::disableScissor() {
+ glDisable(GL_SCISSOR_TEST);
+}
+
+void GLESRenderEngine::genTextures(size_t count, uint32_t* names) {
+ glGenTextures(count, names);
+}
+
+void GLESRenderEngine::deleteTextures(size_t count, uint32_t const* names) {
+ for (int i = 0; i < count; ++i) {
+ mTextureView.erase(names[i]);
+ }
+ glDeleteTextures(count, names);
+}
+
+void GLESRenderEngine::bindExternalTextureImage(uint32_t texName, const Image& image) {
+ ATRACE_CALL();
+ const GLImage& glImage = static_cast<const GLImage&>(image);
+ const GLenum target = GL_TEXTURE_EXTERNAL_OES;
+
+ glBindTexture(target, texName);
+ if (glImage.getEGLImage() != EGL_NO_IMAGE_KHR) {
+ glEGLImageTargetTexture2DOES(target, static_cast<GLeglImageOES>(glImage.getEGLImage()));
+ }
+}
+
+status_t GLESRenderEngine::bindExternalTextureBuffer(uint32_t texName,
+ const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& bufferFence) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+
+ ATRACE_CALL();
+
+ bool found = false;
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ auto cachedImage = mImageCache.find(buffer->getId());
+ found = (cachedImage != mImageCache.end());
+ }
+
+ // If we couldn't find the image in the cache at this time, then either
+ // SurfaceFlinger messed up registering the buffer ahead of time or we got
+ // backed up creating other EGLImages.
+ if (!found) {
+ status_t cacheResult = mImageManager->cache(buffer);
+ if (cacheResult != NO_ERROR) {
+ return cacheResult;
+ }
+ }
+
+ // Whether or not we needed to cache, re-check mImageCache to make sure that
+ // there's an EGLImage. The current threading model guarantees that we don't
+ // destroy a cached image until it's really not needed anymore (i.e. this
+ // function should not be called), so the only possibility is that something
+ // terrible went wrong and we should just bind something and move on.
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ auto cachedImage = mImageCache.find(buffer->getId());
+
+ if (cachedImage == mImageCache.end()) {
+ // We failed creating the image if we got here, so bail out.
+ ALOGE("Failed to create an EGLImage when rendering");
+ bindExternalTextureImage(texName, *createImage());
+ return NO_INIT;
+ }
+
+ bindExternalTextureImage(texName, *cachedImage->second);
+ mTextureView.insert_or_assign(texName, buffer->getId());
+ }
+
+ // Wait for the new buffer to be ready.
+ if (bufferFence != nullptr && bufferFence->isValid()) {
+ if (GLExtensions::getInstance().hasWaitSync()) {
+ base::unique_fd fenceFd(bufferFence->dup());
+ if (fenceFd == -1) {
+ ALOGE("error dup'ing fence fd: %d", errno);
+ return -errno;
+ }
+ if (!waitFence(std::move(fenceFd))) {
+ ALOGE("failed to wait on fence fd");
+ return UNKNOWN_ERROR;
+ }
+ } else {
+ status_t err = bufferFence->waitForever("RenderEngine::bindExternalTextureBuffer");
+ if (err != NO_ERROR) {
+ ALOGE("error waiting for fence: %d", err);
+ return err;
+ }
+ }
+ }
+
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+ mImageManager->cacheAsync(buffer, nullptr);
+}
+
+std::shared_ptr<ImageManager::Barrier> GLESRenderEngine::cacheExternalTextureBufferForTesting(
+ const sp<GraphicBuffer>& buffer) {
+ auto barrier = std::make_shared<ImageManager::Barrier>();
+ mImageManager->cacheAsync(buffer, barrier);
+ return barrier;
+}
+
+status_t GLESRenderEngine::cacheExternalTextureBufferInternal(const sp<GraphicBuffer>& buffer) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ if (mImageCache.count(buffer->getId()) > 0) {
+ // If there's already an image then fail fast here.
+ return NO_ERROR;
+ }
+ }
+ ATRACE_CALL();
+
+ // Create the image without holding a lock so that we don't block anything.
+ std::unique_ptr<Image> newImage = createImage();
+
+ bool created = newImage->setNativeWindowBuffer(buffer->getNativeBuffer(),
+ buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
+ if (!created) {
+ ALOGE("Failed to create image. size=%ux%u st=%u usage=%#" PRIx64 " fmt=%d",
+ buffer->getWidth(), buffer->getHeight(), buffer->getStride(), buffer->getUsage(),
+ buffer->getPixelFormat());
+ return NO_INIT;
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ if (mImageCache.count(buffer->getId()) > 0) {
+ // In theory it's possible for another thread to recache the image,
+ // so bail out if another thread won.
+ return NO_ERROR;
+ }
+ mImageCache.insert(std::make_pair(buffer->getId(), std::move(newImage)));
+ }
+
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::unbindExternalTextureBuffer(uint64_t bufferId) {
+ mImageManager->releaseAsync(bufferId, nullptr);
+}
+
+std::shared_ptr<ImageManager::Barrier> GLESRenderEngine::unbindExternalTextureBufferForTesting(
+ uint64_t bufferId) {
+ auto barrier = std::make_shared<ImageManager::Barrier>();
+ mImageManager->releaseAsync(bufferId, barrier);
+ return barrier;
+}
+
+void GLESRenderEngine::unbindExternalTextureBufferInternal(uint64_t bufferId) {
+ std::unique_ptr<Image> image;
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ const auto& cachedImage = mImageCache.find(bufferId);
+
+ if (cachedImage != mImageCache.end()) {
+ ALOGV("Destroying image for buffer: %" PRIu64, bufferId);
+ // Move the buffer out of cache first, so that we can destroy
+ // without holding the cache's lock.
+ image = std::move(cachedImage->second);
+ mImageCache.erase(bufferId);
+ return;
+ }
+ }
+ ALOGV("Failed to find image for buffer: %" PRIu64, bufferId);
+}
+
+FloatRect GLESRenderEngine::setupLayerCropping(const LayerSettings& layer, Mesh& mesh) {
+ // Translate win by the rounded corners rect coordinates, to have all values in
+ // layer coordinate space.
+ FloatRect cropWin = layer.geometry.boundaries;
+ const FloatRect& roundedCornersCrop = layer.geometry.roundedCornersCrop;
+ cropWin.left -= roundedCornersCrop.left;
+ cropWin.right -= roundedCornersCrop.left;
+ cropWin.top -= roundedCornersCrop.top;
+ cropWin.bottom -= roundedCornersCrop.top;
+ Mesh::VertexArray<vec2> cropCoords(mesh.getCropCoordArray<vec2>());
+ cropCoords[0] = vec2(cropWin.left, cropWin.top);
+ cropCoords[1] = vec2(cropWin.left, cropWin.top + cropWin.getHeight());
+ cropCoords[2] = vec2(cropWin.right, cropWin.top + cropWin.getHeight());
+ cropCoords[3] = vec2(cropWin.right, cropWin.top);
+
+ setupCornerRadiusCropSize(roundedCornersCrop.getWidth(), roundedCornersCrop.getHeight());
+ return cropWin;
+}
+
+void GLESRenderEngine::handleRoundedCorners(const DisplaySettings& display,
+ const LayerSettings& layer, const Mesh& mesh) {
+ // We separate the layer into 3 parts essentially, such that we only turn on blending for the
+ // top rectangle and the bottom rectangle, and turn off blending for the middle rectangle.
+ FloatRect bounds = layer.geometry.roundedCornersCrop;
+
+ // Explicitly compute the transform from the clip rectangle to the physical
+ // display. Normally, this is done in glViewport but we explicitly compute
+ // it here so that we can get the scissor bounds correct.
+ const Rect& source = display.clip;
+ const Rect& destination = display.physicalDisplay;
+ // Here we compute the following transform:
+ // 1. Translate the top left corner of the source clip to (0, 0)
+ // 2. Rotate the clip rectangle about the origin in accordance with the
+ // orientation flag
+ // 3. Translate the top left corner back to the origin.
+ // 4. Scale the clip rectangle to the destination rectangle dimensions
+ // 5. Translate the top left corner to the destination rectangle's top left
+ // corner.
+ const mat4 translateSource = mat4::translate(vec4(-source.left, -source.top, 0, 1));
+ mat4 rotation;
+ int displacementX = 0;
+ int displacementY = 0;
+ float destinationWidth = static_cast<float>(destination.getWidth());
+ float destinationHeight = static_cast<float>(destination.getHeight());
+ float sourceWidth = static_cast<float>(source.getWidth());
+ float sourceHeight = static_cast<float>(source.getHeight());
+ const float rot90InRadians = 2.0f * static_cast<float>(M_PI) / 4.0f;
+ switch (display.orientation) {
+ case ui::Transform::ROT_90:
+ rotation = mat4::rotate(rot90InRadians, vec3(0, 0, 1));
+ displacementX = source.getHeight();
+ std::swap(sourceHeight, sourceWidth);
+ break;
+ case ui::Transform::ROT_180:
+ rotation = mat4::rotate(rot90InRadians * 2.0f, vec3(0, 0, 1));
+ displacementY = source.getHeight();
+ displacementX = source.getWidth();
+ break;
+ case ui::Transform::ROT_270:
+ rotation = mat4::rotate(rot90InRadians * 3.0f, vec3(0, 0, 1));
+ displacementY = source.getWidth();
+ std::swap(sourceHeight, sourceWidth);
+ break;
+ default:
+ break;
+ }
+
+ const mat4 intermediateTranslation = mat4::translate(vec4(displacementX, displacementY, 0, 1));
+ const mat4 scale = mat4::scale(
+ vec4(destinationWidth / sourceWidth, destinationHeight / sourceHeight, 1, 1));
+ const mat4 translateDestination =
+ mat4::translate(vec4(destination.left, destination.top, 0, 1));
+ const mat4 globalTransform =
+ translateDestination * scale * intermediateTranslation * rotation * translateSource;
+
+ const mat4 transformMatrix = globalTransform * layer.geometry.positionTransform;
+ const vec4 leftTopCoordinate(bounds.left, bounds.top, 1.0, 1.0);
+ const vec4 rightBottomCoordinate(bounds.right, bounds.bottom, 1.0, 1.0);
+ const vec4 leftTopCoordinateInBuffer = transformMatrix * leftTopCoordinate;
+ const vec4 rightBottomCoordinateInBuffer = transformMatrix * rightBottomCoordinate;
+ bounds = FloatRect(std::min(leftTopCoordinateInBuffer[0], rightBottomCoordinateInBuffer[0]),
+ std::min(leftTopCoordinateInBuffer[1], rightBottomCoordinateInBuffer[1]),
+ std::max(leftTopCoordinateInBuffer[0], rightBottomCoordinateInBuffer[0]),
+ std::max(leftTopCoordinateInBuffer[1], rightBottomCoordinateInBuffer[1]));
+
+ // Finally, we cut the layer into 3 parts, with top and bottom parts having rounded corners
+ // and the middle part without rounded corners.
+ const int32_t radius = ceil(layer.geometry.roundedCornersRadius);
+ const Rect topRect(bounds.left, bounds.top, bounds.right, bounds.top + radius);
+ setScissor(topRect);
+ drawMesh(mesh);
+ const Rect bottomRect(bounds.left, bounds.bottom - radius, bounds.right, bounds.bottom);
+ setScissor(bottomRect);
+ drawMesh(mesh);
+
+ // The middle part of the layer can turn off blending.
+ if (topRect.bottom < bottomRect.top) {
+ const Rect middleRect(bounds.left, bounds.top + radius, bounds.right,
+ bounds.bottom - radius);
+ setScissor(middleRect);
+ mState.cornerRadius = 0.0;
+ disableBlending();
+ drawMesh(mesh);
+ }
+ disableScissor();
+}
+
+status_t GLESRenderEngine::bindFrameBuffer(Framebuffer* framebuffer) {
+ ATRACE_CALL();
+ GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(framebuffer);
+ EGLImageKHR eglImage = glFramebuffer->getEGLImage();
+ uint32_t textureName = glFramebuffer->getTextureName();
+ uint32_t framebufferName = glFramebuffer->getFramebufferName();
+
+ // Bind the texture and turn our EGLImage into a texture
+ glBindTexture(GL_TEXTURE_2D, textureName);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, (GLeglImageOES)eglImage);
+
+ // Bind the Framebuffer to render into
+ glBindFramebuffer(GL_FRAMEBUFFER, framebufferName);
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureName, 0);
+
+ uint32_t glStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ ALOGE_IF(glStatus != GL_FRAMEBUFFER_COMPLETE_OES, "glCheckFramebufferStatusOES error %d",
+ glStatus);
+
+ return glStatus == GL_FRAMEBUFFER_COMPLETE_OES ? NO_ERROR : BAD_VALUE;
+}
+
+void GLESRenderEngine::unbindFrameBuffer(Framebuffer* /*framebuffer*/) {
+ ATRACE_CALL();
+
+ // back to main framebuffer
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+}
+
+bool GLESRenderEngine::cleanupPostRender(CleanupMode mode) {
+ ATRACE_CALL();
+
+ if (mPriorResourcesCleaned ||
+ (mLastDrawFence != nullptr && mLastDrawFence->getStatus() != Fence::Status::Signaled)) {
+ // If we don't have a prior frame needing cleanup, then don't do anything.
+ return false;
+ }
+
+ // This is a bit of a band-aid fix for FrameCaptureProcessor, as we should
+ // not need to keep memory around if we don't need to do so.
+ if (mode == CleanupMode::CLEAN_ALL) {
+ // TODO: SurfaceFlinger memory utilization may benefit from resetting
+ // texture bindings as well. Assess if it does and there's no performance regression
+ // when rebinding the same image data to the same texture, and if so then its mode
+ // behavior can be tweaked.
+ if (mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ for (auto [textureName, bufferId] : mTextureView) {
+ if (bufferId && mPlaceholderImage != EGL_NO_IMAGE_KHR) {
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, textureName);
+ glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES,
+ static_cast<GLeglImageOES>(mPlaceholderImage));
+ mTextureView[textureName] = std::nullopt;
+ checkErrors();
+ }
+ }
+ }
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ mImageCache.clear();
+ }
+ }
+
+ // Bind the texture to placeholder so that backing image data can be freed.
+ GLFramebuffer* glFramebuffer = static_cast<GLFramebuffer*>(getFramebufferForDrawing());
+ glFramebuffer->allocateBuffers(1, 1, mPlaceholderDrawBuffer);
+ // Release the cached fence here, so that we don't churn reallocations when
+ // we could no-op repeated calls of this method instead.
+ mLastDrawFence = nullptr;
+ mPriorResourcesCleaned = true;
+ return true;
+}
+
+void GLESRenderEngine::checkErrors() const {
+ checkErrors(nullptr);
+}
+
+void GLESRenderEngine::checkErrors(const char* tag) const {
+ do {
+ // there could be more than one error flag
+ GLenum error = glGetError();
+ if (error == GL_NO_ERROR) break;
+ if (tag == nullptr) {
+ ALOGE("GL error 0x%04x", int(error));
+ } else {
+ ALOGE("GL error: %s -> 0x%04x", tag, int(error));
+ }
+ } while (true);
+}
+
+bool GLESRenderEngine::supportsProtectedContent() const {
+ return mProtectedEGLContext != EGL_NO_CONTEXT;
+}
+
+bool GLESRenderEngine::useProtectedContext(bool useProtectedContext) {
+ if (useProtectedContext == mInProtectedContext) {
+ return true;
+ }
+ if (useProtectedContext && mProtectedEGLContext == EGL_NO_CONTEXT) {
+ return false;
+ }
+ const EGLSurface surface = useProtectedContext ? mProtectedStubSurface : mStubSurface;
+ const EGLContext context = useProtectedContext ? mProtectedEGLContext : mEGLContext;
+ const bool success = eglMakeCurrent(mEGLDisplay, surface, surface, context) == EGL_TRUE;
+ if (success) {
+ mInProtectedContext = useProtectedContext;
+ }
+ return success;
+}
+EGLImageKHR GLESRenderEngine::createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer,
+ bool isProtected,
+ bool useFramebufferCache) {
+ sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(nativeBuffer);
+ if (useFramebufferCache) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ for (const auto& image : mFramebufferImageCache) {
+ if (image.first == graphicBuffer->getId()) {
+ return image.second;
+ }
+ }
+ }
+ EGLint attributes[] = {
+ isProtected ? EGL_PROTECTED_CONTENT_EXT : EGL_NONE,
+ isProtected ? EGL_TRUE : EGL_NONE,
+ EGL_NONE,
+ };
+ EGLImageKHR image = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ nativeBuffer, attributes);
+ if (useFramebufferCache) {
+ if (image != EGL_NO_IMAGE_KHR) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ if (mFramebufferImageCache.size() >= mFramebufferImageCacheSize) {
+ EGLImageKHR expired = mFramebufferImageCache.front().second;
+ mFramebufferImageCache.pop_front();
+ eglDestroyImageKHR(mEGLDisplay, expired);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ mFramebufferImageCache.push_back({graphicBuffer->getId(), image});
+ }
+ }
+
+ if (image != EGL_NO_IMAGE_KHR) {
+ DEBUG_EGL_IMAGE_TRACKER_CREATE();
+ }
+ return image;
+}
+
+status_t GLESRenderEngine::drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer,
+ const bool useFramebufferCache, base::unique_fd&& bufferFence,
+ base::unique_fd* drawFence) {
+ ATRACE_CALL();
+ if (layers.empty()) {
+ ALOGV("Drawing empty layer stack");
+ return NO_ERROR;
+ }
+
+ if (bufferFence.get() >= 0) {
+ // Duplicate the fence for passing to waitFence.
+ base::unique_fd bufferFenceDup(dup(bufferFence.get()));
+ if (bufferFenceDup < 0 || !waitFence(std::move(bufferFenceDup))) {
+ ATRACE_NAME("Waiting before draw");
+ sync_wait(bufferFence.get(), -1);
+ }
+ }
+
+ if (buffer == nullptr) {
+ ALOGE("No output buffer provided. Aborting GPU composition.");
+ return BAD_VALUE;
+ }
+
+ std::unique_ptr<BindNativeBufferAsFramebuffer> fbo;
+ // Gathering layers that requested blur, we'll need them to decide when to render to an
+ // offscreen buffer, and when to render to the native buffer.
+ std::deque<const LayerSettings*> blurLayers;
+ if (CC_LIKELY(mBlurFilter != nullptr)) {
+ for (auto layer : layers) {
+ if (layer->backgroundBlurRadius > 0) {
+ blurLayers.push_back(layer);
+ }
+ }
+ }
+ const auto blurLayersSize = blurLayers.size();
+
+ if (blurLayersSize == 0) {
+ fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this,
+ buffer.get()->getNativeBuffer(),
+ useFramebufferCache);
+ if (fbo->getStatus() != NO_ERROR) {
+ ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors();
+ return fbo->getStatus();
+ }
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ } else {
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ auto status =
+ mBlurFilter->setAsDrawTarget(display, blurLayers.front()->backgroundBlurRadius);
+ if (status != NO_ERROR) {
+ ALOGE("Failed to prepare blur filter! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors();
+ return status;
+ }
+ }
+
+ // clear the entire buffer, sometimes when we reuse buffers we'd persist
+ // ghost images otherwise.
+ // we also require a full transparent framebuffer for overlays. This is
+ // probably not quite efficient on all GPUs, since we could filter out
+ // opaque layers.
+ clearWithColor(0.0, 0.0, 0.0, 0.0);
+
+ setOutputDataSpace(display.outputDataspace);
+ setDisplayMaxLuminance(display.maxLuminance);
+
+ const mat4 projectionMatrix =
+ ui::Transform(display.orientation).asMatrix4() * mState.projectionMatrix;
+ if (!display.clearRegion.isEmpty()) {
+ glDisable(GL_BLEND);
+ fillRegionWithColor(display.clearRegion, 0.0, 0.0, 0.0, 1.0);
+ }
+
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLE_FAN)
+ .setVertices(4 /* count */, 2 /* size */)
+ .setTexCoords(2 /* size */)
+ .setCropCoords(2 /* size */)
+ .build();
+ for (auto const layer : layers) {
+ if (blurLayers.size() > 0 && blurLayers.front() == layer) {
+ blurLayers.pop_front();
+
+ auto status = mBlurFilter->prepare();
+ if (status != NO_ERROR) {
+ ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't render first blur pass");
+ return status;
+ }
+
+ if (blurLayers.size() == 0) {
+ // Done blurring, time to bind the native FBO and render our blur onto it.
+ fbo = std::make_unique<BindNativeBufferAsFramebuffer>(*this,
+ buffer.get()
+ ->getNativeBuffer(),
+ useFramebufferCache);
+ status = fbo->getStatus();
+ setViewportAndProjection(display.physicalDisplay, display.clip);
+ } else {
+ // There's still something else to blur, so let's keep rendering to our FBO
+ // instead of to the display.
+ status = mBlurFilter->setAsDrawTarget(display,
+ blurLayers.front()->backgroundBlurRadius);
+ }
+ if (status != NO_ERROR) {
+ ALOGE("Failed to bind framebuffer! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't bind native framebuffer");
+ return status;
+ }
+
+ status = mBlurFilter->render(blurLayersSize > 1);
+ if (status != NO_ERROR) {
+ ALOGE("Failed to render blur effect! Aborting GPU composition for buffer (%p).",
+ buffer->handle);
+ checkErrors("Can't render blur filter");
+ return status;
+ }
+ }
+
+ mState.maxMasteringLuminance = layer->source.buffer.maxMasteringLuminance;
+ mState.maxContentLuminance = layer->source.buffer.maxContentLuminance;
+ mState.projectionMatrix = projectionMatrix * layer->geometry.positionTransform;
+
+ const FloatRect bounds = layer->geometry.boundaries;
+ Mesh::VertexArray<vec2> position(mesh.getPositionArray<vec2>());
+ position[0] = vec2(bounds.left, bounds.top);
+ position[1] = vec2(bounds.left, bounds.bottom);
+ position[2] = vec2(bounds.right, bounds.bottom);
+ position[3] = vec2(bounds.right, bounds.top);
+
+ setupLayerCropping(*layer, mesh);
+ setColorTransform(display.colorTransform * layer->colorTransform);
+
+ bool usePremultipliedAlpha = true;
+ bool disableTexture = true;
+ bool isOpaque = false;
+ if (layer->source.buffer.buffer != nullptr) {
+ disableTexture = false;
+ isOpaque = layer->source.buffer.isOpaque;
+
+ sp<GraphicBuffer> gBuf = layer->source.buffer.buffer;
+ bindExternalTextureBuffer(layer->source.buffer.textureName, gBuf,
+ layer->source.buffer.fence);
+
+ usePremultipliedAlpha = layer->source.buffer.usePremultipliedAlpha;
+ Texture texture(Texture::TEXTURE_EXTERNAL, layer->source.buffer.textureName);
+ mat4 texMatrix = layer->source.buffer.textureTransform;
+
+ texture.setMatrix(texMatrix.asArray());
+ texture.setFiltering(layer->source.buffer.useTextureFiltering);
+
+ texture.setDimensions(gBuf->getWidth(), gBuf->getHeight());
+ setSourceY410BT2020(layer->source.buffer.isY410BT2020);
+
+ renderengine::Mesh::VertexArray<vec2> texCoords(mesh.getTexCoordArray<vec2>());
+ texCoords[0] = vec2(0.0, 0.0);
+ texCoords[1] = vec2(0.0, 1.0);
+ texCoords[2] = vec2(1.0, 1.0);
+ texCoords[3] = vec2(1.0, 0.0);
+ setupLayerTexturing(texture);
+ }
+
+ const half3 solidColor = layer->source.solidColor;
+ const half4 color = half4(solidColor.r, solidColor.g, solidColor.b, layer->alpha);
+ // Buffer sources will have a black solid color ignored in the shader,
+ // so in that scenario the solid color passed here is arbitrary.
+ setupLayerBlending(usePremultipliedAlpha, isOpaque, disableTexture, color,
+ layer->geometry.roundedCornersRadius);
+ if (layer->disableBlending) {
+ glDisable(GL_BLEND);
+ }
+ setSourceDataSpace(layer->sourceDataspace);
+
+ if (layer->shadow.length > 0.0f) {
+ handleShadow(layer->geometry.boundaries, layer->geometry.roundedCornersRadius,
+ layer->shadow);
+ }
+ // We only want to do a special handling for rounded corners when having rounded corners
+ // is the only reason it needs to turn on blending, otherwise, we handle it like the
+ // usual way since it needs to turn on blending anyway.
+ else if (layer->geometry.roundedCornersRadius > 0.0 && color.a >= 1.0f && isOpaque) {
+ handleRoundedCorners(display, *layer, mesh);
+ } else {
+ drawMesh(mesh);
+ }
+
+ // Cleanup if there's a buffer source
+ if (layer->source.buffer.buffer != nullptr) {
+ disableBlending();
+ setSourceY410BT2020(false);
+ disableTexturing();
+ }
+ }
+
+ if (drawFence != nullptr) {
+ *drawFence = flush();
+ }
+ // If flush failed or we don't support native fences, we need to force the
+ // gl command stream to be executed.
+ if (drawFence == nullptr || drawFence->get() < 0) {
+ bool success = finish();
+ if (!success) {
+ ALOGE("Failed to flush RenderEngine commands");
+ checkErrors();
+ // Chances are, something illegal happened (either the caller passed
+ // us bad parameters, or we messed up our shader generation).
+ return INVALID_OPERATION;
+ }
+ mLastDrawFence = nullptr;
+ } else {
+ // The caller takes ownership of drawFence, so we need to duplicate the
+ // fd here.
+ mLastDrawFence = new Fence(dup(drawFence->get()));
+ }
+ mPriorResourcesCleaned = false;
+
+ checkErrors();
+ return NO_ERROR;
+}
+
+void GLESRenderEngine::setViewportAndProjection(Rect viewport, Rect clip) {
+ ATRACE_CALL();
+ mVpWidth = viewport.getWidth();
+ mVpHeight = viewport.getHeight();
+
+ // We pass the top left corner instead of the bottom left corner,
+ // because since we're rendering off-screen first.
+ glViewport(viewport.left, viewport.top, mVpWidth, mVpHeight);
+
+ mState.projectionMatrix = mat4::ortho(clip.left, clip.right, clip.top, clip.bottom, 0, 1);
+}
+
+void GLESRenderEngine::setupLayerBlending(bool premultipliedAlpha, bool opaque, bool disableTexture,
+ const half4& color, float cornerRadius) {
+ mState.isPremultipliedAlpha = premultipliedAlpha;
+ mState.isOpaque = opaque;
+ mState.color = color;
+ mState.cornerRadius = cornerRadius;
+
+ if (disableTexture) {
+ mState.textureEnabled = false;
+ }
+
+ if (color.a < 1.0f || !opaque || cornerRadius > 0.0f) {
+ glEnable(GL_BLEND);
+ glBlendFunc(premultipliedAlpha ? GL_ONE : GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
+ } else {
+ glDisable(GL_BLEND);
+ }
+}
+
+void GLESRenderEngine::setSourceY410BT2020(bool enable) {
+ mState.isY410BT2020 = enable;
+}
+
+void GLESRenderEngine::setSourceDataSpace(Dataspace source) {
+ mDataSpace = source;
+}
+
+void GLESRenderEngine::setOutputDataSpace(Dataspace dataspace) {
+ mOutputDataSpace = dataspace;
+}
+
+void GLESRenderEngine::setDisplayMaxLuminance(const float maxLuminance) {
+ mState.displayMaxLuminance = maxLuminance;
+}
+
+void GLESRenderEngine::setupLayerTexturing(const Texture& texture) {
+ GLuint target = texture.getTextureTarget();
+ glBindTexture(target, texture.getTextureName());
+ GLenum filter = GL_NEAREST;
+ if (texture.getFiltering()) {
+ filter = GL_LINEAR;
+ }
+ glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, filter);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, filter);
+
+ mState.texture = texture;
+ mState.textureEnabled = true;
+}
+
+void GLESRenderEngine::setColorTransform(const mat4& colorTransform) {
+ mState.colorMatrix = colorTransform;
+}
+
+void GLESRenderEngine::disableTexturing() {
+ mState.textureEnabled = false;
+}
+
+void GLESRenderEngine::disableBlending() {
+ glDisable(GL_BLEND);
+}
+
+void GLESRenderEngine::setupFillWithColor(float r, float g, float b, float a) {
+ mState.isPremultipliedAlpha = true;
+ mState.isOpaque = false;
+ mState.color = half4(r, g, b, a);
+ mState.textureEnabled = false;
+ glDisable(GL_BLEND);
+}
+
+void GLESRenderEngine::setupCornerRadiusCropSize(float width, float height) {
+ mState.cropSize = half2(width, height);
+}
+
+void GLESRenderEngine::drawMesh(const Mesh& mesh) {
+ ATRACE_CALL();
+ if (mesh.getTexCoordsSize()) {
+ glEnableVertexAttribArray(Program::texCoords);
+ glVertexAttribPointer(Program::texCoords, mesh.getTexCoordsSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getTexCoords());
+ }
+
+ glVertexAttribPointer(Program::position, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getPositions());
+
+ if (mState.cornerRadius > 0.0f) {
+ glEnableVertexAttribArray(Program::cropCoords);
+ glVertexAttribPointer(Program::cropCoords, mesh.getVertexSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getCropCoords());
+ }
+
+ if (mState.drawShadows) {
+ glEnableVertexAttribArray(Program::shadowColor);
+ glVertexAttribPointer(Program::shadowColor, mesh.getShadowColorSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getShadowColor());
+
+ glEnableVertexAttribArray(Program::shadowParams);
+ glVertexAttribPointer(Program::shadowParams, mesh.getShadowParamsSize(), GL_FLOAT, GL_FALSE,
+ mesh.getByteStride(), mesh.getShadowParams());
+ }
+
+ Description managedState = mState;
+ // By default, DISPLAY_P3 is the only supported wide color output. However,
+ // when HDR content is present, hardware composer may be able to handle
+ // BT2020 data space, in that case, the output data space is set to be
+ // BT2020_HLG or BT2020_PQ respectively. In GPU fall back we need
+ // to respect this and convert non-HDR content to HDR format.
+ if (mUseColorManagement) {
+ Dataspace inputStandard = static_cast<Dataspace>(mDataSpace & Dataspace::STANDARD_MASK);
+ Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+ Dataspace outputStandard =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::STANDARD_MASK);
+ Dataspace outputTransfer =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
+ bool needsXYZConversion = needsXYZTransformMatrix();
+
+ // NOTE: if the input standard of the input dataspace is not STANDARD_DCI_P3 or
+ // STANDARD_BT2020, it will be treated as STANDARD_BT709
+ if (inputStandard != Dataspace::STANDARD_DCI_P3 &&
+ inputStandard != Dataspace::STANDARD_BT2020) {
+ inputStandard = Dataspace::STANDARD_BT709;
+ }
+
+ if (needsXYZConversion) {
+ // The supported input color spaces are standard RGB, Display P3 and BT2020.
+ switch (inputStandard) {
+ case Dataspace::STANDARD_DCI_P3:
+ managedState.inputTransformMatrix = mDisplayP3ToXyz;
+ break;
+ case Dataspace::STANDARD_BT2020:
+ managedState.inputTransformMatrix = mBt2020ToXyz;
+ break;
+ default:
+ managedState.inputTransformMatrix = mSrgbToXyz;
+ break;
+ }
+
+ // The supported output color spaces are BT2020, Display P3 and standard RGB.
+ switch (outputStandard) {
+ case Dataspace::STANDARD_BT2020:
+ managedState.outputTransformMatrix = mXyzToBt2020;
+ break;
+ case Dataspace::STANDARD_DCI_P3:
+ managedState.outputTransformMatrix = mXyzToDisplayP3;
+ break;
+ default:
+ managedState.outputTransformMatrix = mXyzToSrgb;
+ break;
+ }
+ } else if (inputStandard != outputStandard) {
+ // At this point, the input data space and output data space could be both
+ // HDR data spaces, but they match each other, we do nothing in this case.
+ // In addition to the case above, the input data space could be
+ // - scRGB linear
+ // - scRGB non-linear
+ // - sRGB
+ // - Display P3
+ // - BT2020
+ // The output data spaces could be
+ // - sRGB
+ // - Display P3
+ // - BT2020
+ switch (outputStandard) {
+ case Dataspace::STANDARD_BT2020:
+ if (inputStandard == Dataspace::STANDARD_BT709) {
+ managedState.outputTransformMatrix = mSrgbToBt2020;
+ } else if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+ managedState.outputTransformMatrix = mDisplayP3ToBt2020;
+ }
+ break;
+ case Dataspace::STANDARD_DCI_P3:
+ if (inputStandard == Dataspace::STANDARD_BT709) {
+ managedState.outputTransformMatrix = mSrgbToDisplayP3;
+ } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+ managedState.outputTransformMatrix = mBt2020ToDisplayP3;
+ }
+ break;
+ default:
+ if (inputStandard == Dataspace::STANDARD_DCI_P3) {
+ managedState.outputTransformMatrix = mDisplayP3ToSrgb;
+ } else if (inputStandard == Dataspace::STANDARD_BT2020) {
+ managedState.outputTransformMatrix = mBt2020ToSrgb;
+ }
+ break;
+ }
+ }
+
+ // we need to convert the RGB value to linear space and convert it back when:
+ // - there is a color matrix that is not an identity matrix, or
+ // - there is an output transform matrix that is not an identity matrix, or
+ // - the input transfer function doesn't match the output transfer function.
+ if (managedState.hasColorMatrix() || managedState.hasOutputTransformMatrix() ||
+ inputTransfer != outputTransfer) {
+ managedState.inputTransferFunction =
+ Description::dataSpaceToTransferFunction(inputTransfer);
+ managedState.outputTransferFunction =
+ Description::dataSpaceToTransferFunction(outputTransfer);
+ }
+ }
+
+ ProgramCache::getInstance().useProgram(mInProtectedContext ? mProtectedEGLContext : mEGLContext,
+ managedState);
+
+ if (mState.drawShadows) {
+ glDrawElements(mesh.getPrimitive(), mesh.getIndexCount(), GL_UNSIGNED_SHORT,
+ mesh.getIndices());
+ } else {
+ glDrawArrays(mesh.getPrimitive(), 0, mesh.getVertexCount());
+ }
+
+ if (mUseColorManagement && outputDebugPPMs) {
+ static uint64_t managedColorFrameCount = 0;
+ std::ostringstream out;
+ out << "/data/texture_out" << managedColorFrameCount++;
+ writePPM(out.str().c_str(), mVpWidth, mVpHeight);
+ }
+
+ if (mesh.getTexCoordsSize()) {
+ glDisableVertexAttribArray(Program::texCoords);
+ }
+
+ if (mState.cornerRadius > 0.0f) {
+ glDisableVertexAttribArray(Program::cropCoords);
+ }
+
+ if (mState.drawShadows) {
+ glDisableVertexAttribArray(Program::shadowColor);
+ glDisableVertexAttribArray(Program::shadowParams);
+ }
+}
+
+size_t GLESRenderEngine::getMaxTextureSize() const {
+ return mMaxTextureSize;
+}
+
+size_t GLESRenderEngine::getMaxViewportDims() const {
+ return mMaxViewportDims[0] < mMaxViewportDims[1] ? mMaxViewportDims[0] : mMaxViewportDims[1];
+}
+
+void GLESRenderEngine::dump(std::string& result) {
+ const GLExtensions& extensions = GLExtensions::getInstance();
+ ProgramCache& cache = ProgramCache::getInstance();
+
+ StringAppendF(&result, "EGL implementation : %s\n", extensions.getEGLVersion());
+ StringAppendF(&result, "%s\n", extensions.getEGLExtensions());
+ StringAppendF(&result, "GLES: %s, %s, %s\n", extensions.getVendor(), extensions.getRenderer(),
+ extensions.getVersion());
+ StringAppendF(&result, "%s\n", extensions.getExtensions());
+ StringAppendF(&result, "RenderEngine supports protected context: %d\n",
+ supportsProtectedContent());
+ StringAppendF(&result, "RenderEngine is in protected context: %d\n", mInProtectedContext);
+ StringAppendF(&result, "RenderEngine program cache size for unprotected context: %zu\n",
+ cache.getSize(mEGLContext));
+ StringAppendF(&result, "RenderEngine program cache size for protected context: %zu\n",
+ cache.getSize(mProtectedEGLContext));
+ StringAppendF(&result, "RenderEngine last dataspace conversion: (%s) to (%s)\n",
+ dataspaceDetails(static_cast<android_dataspace>(mDataSpace)).c_str(),
+ dataspaceDetails(static_cast<android_dataspace>(mOutputDataSpace)).c_str());
+ {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ StringAppendF(&result, "RenderEngine image cache size: %zu\n", mImageCache.size());
+ StringAppendF(&result, "Dumping buffer ids...\n");
+ for (const auto& [id, unused] : mImageCache) {
+ StringAppendF(&result, "0x%" PRIx64 "\n", id);
+ }
+ }
+ {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ StringAppendF(&result, "RenderEngine framebuffer image cache size: %zu\n",
+ mFramebufferImageCache.size());
+ StringAppendF(&result, "Dumping buffer ids...\n");
+ for (const auto& [id, unused] : mFramebufferImageCache) {
+ StringAppendF(&result, "0x%" PRIx64 "\n", id);
+ }
+ }
+}
+
+GLESRenderEngine::GlesVersion GLESRenderEngine::parseGlesVersion(const char* str) {
+ int major, minor;
+ if (sscanf(str, "OpenGL ES-CM %d.%d", &major, &minor) != 2) {
+ if (sscanf(str, "OpenGL ES %d.%d", &major, &minor) != 2) {
+ ALOGW("Unable to parse GL_VERSION string: \"%s\"", str);
+ return GLES_VERSION_1_0;
+ }
+ }
+
+ if (major == 1 && minor == 0) return GLES_VERSION_1_0;
+ if (major == 1 && minor >= 1) return GLES_VERSION_1_1;
+ if (major == 2 && minor >= 0) return GLES_VERSION_2_0;
+ if (major == 3 && minor >= 0) return GLES_VERSION_3_0;
+
+ ALOGW("Unrecognized OpenGL ES version: %d.%d", major, minor);
+ return GLES_VERSION_1_0;
+}
+
+EGLContext GLESRenderEngine::createEglContext(EGLDisplay display, EGLConfig config,
+ EGLContext shareContext, bool useContextPriority,
+ Protection protection) {
+ EGLint renderableType = 0;
+ if (config == EGL_NO_CONFIG) {
+ renderableType = EGL_OPENGL_ES3_BIT;
+ } else if (!eglGetConfigAttrib(display, config, EGL_RENDERABLE_TYPE, &renderableType)) {
+ LOG_ALWAYS_FATAL("can't query EGLConfig RENDERABLE_TYPE");
+ }
+ EGLint contextClientVersion = 0;
+ if (renderableType & EGL_OPENGL_ES3_BIT) {
+ contextClientVersion = 3;
+ } else if (renderableType & EGL_OPENGL_ES2_BIT) {
+ contextClientVersion = 2;
+ } else if (renderableType & EGL_OPENGL_ES_BIT) {
+ contextClientVersion = 1;
+ } else {
+ LOG_ALWAYS_FATAL("no supported EGL_RENDERABLE_TYPEs");
+ }
+
+ std::vector<EGLint> contextAttributes;
+ contextAttributes.reserve(7);
+ contextAttributes.push_back(EGL_CONTEXT_CLIENT_VERSION);
+ contextAttributes.push_back(contextClientVersion);
+ if (useContextPriority) {
+ contextAttributes.push_back(EGL_CONTEXT_PRIORITY_LEVEL_IMG);
+ contextAttributes.push_back(EGL_CONTEXT_PRIORITY_HIGH_IMG);
+ }
+ if (protection == Protection::PROTECTED) {
+ contextAttributes.push_back(EGL_PROTECTED_CONTENT_EXT);
+ contextAttributes.push_back(EGL_TRUE);
+ }
+ contextAttributes.push_back(EGL_NONE);
+
+ EGLContext context = eglCreateContext(display, config, shareContext, contextAttributes.data());
+
+ if (contextClientVersion == 3 && context == EGL_NO_CONTEXT) {
+ // eglGetConfigAttrib indicated we can create GLES 3 context, but we failed, thus
+ // EGL_NO_CONTEXT so that we can abort.
+ if (config != EGL_NO_CONFIG) {
+ return context;
+ }
+ // If |config| is EGL_NO_CONFIG, we speculatively try to create GLES 3 context, so we should
+ // try to fall back to GLES 2.
+ contextAttributes[1] = 2;
+ context = eglCreateContext(display, config, shareContext, contextAttributes.data());
+ }
+
+ return context;
+}
+
+EGLSurface GLESRenderEngine::createStubEglPbufferSurface(EGLDisplay display, EGLConfig config,
+ int hwcFormat, Protection protection) {
+ EGLConfig stubConfig = config;
+ if (stubConfig == EGL_NO_CONFIG) {
+ stubConfig = chooseEglConfig(display, hwcFormat, /*logConfig*/ true);
+ }
+ std::vector<EGLint> attributes;
+ attributes.reserve(7);
+ attributes.push_back(EGL_WIDTH);
+ attributes.push_back(1);
+ attributes.push_back(EGL_HEIGHT);
+ attributes.push_back(1);
+ if (protection == Protection::PROTECTED) {
+ attributes.push_back(EGL_PROTECTED_CONTENT_EXT);
+ attributes.push_back(EGL_TRUE);
+ }
+ attributes.push_back(EGL_NONE);
+
+ return eglCreatePbufferSurface(display, stubConfig, attributes.data());
+}
+
+bool GLESRenderEngine::isHdrDataSpace(const Dataspace dataSpace) const {
+ const Dataspace standard = static_cast<Dataspace>(dataSpace & Dataspace::STANDARD_MASK);
+ const Dataspace transfer = static_cast<Dataspace>(dataSpace & Dataspace::TRANSFER_MASK);
+ return standard == Dataspace::STANDARD_BT2020 &&
+ (transfer == Dataspace::TRANSFER_ST2084 || transfer == Dataspace::TRANSFER_HLG);
+}
+
+// For convenience, we want to convert the input color space to XYZ color space first,
+// and then convert from XYZ color space to output color space when
+// - SDR and HDR contents are mixed, either SDR content will be converted to HDR or
+// HDR content will be tone-mapped to SDR; Or,
+// - there are HDR PQ and HLG contents presented at the same time, where we want to convert
+// HLG content to PQ content.
+// In either case above, we need to operate the Y value in XYZ color space. Thus, when either
+// input data space or output data space is HDR data space, and the input transfer function
+// doesn't match the output transfer function, we would enable an intermediate transfrom to
+// XYZ color space.
+bool GLESRenderEngine::needsXYZTransformMatrix() const {
+ const bool isInputHdrDataSpace = isHdrDataSpace(mDataSpace);
+ const bool isOutputHdrDataSpace = isHdrDataSpace(mOutputDataSpace);
+ const Dataspace inputTransfer = static_cast<Dataspace>(mDataSpace & Dataspace::TRANSFER_MASK);
+ const Dataspace outputTransfer =
+ static_cast<Dataspace>(mOutputDataSpace & Dataspace::TRANSFER_MASK);
+
+ return (isInputHdrDataSpace || isOutputHdrDataSpace) && inputTransfer != outputTransfer;
+}
+
+bool GLESRenderEngine::isImageCachedForTesting(uint64_t bufferId) {
+ std::lock_guard<std::mutex> lock(mRenderingMutex);
+ const auto& cachedImage = mImageCache.find(bufferId);
+ return cachedImage != mImageCache.end();
+}
+
+bool GLESRenderEngine::isTextureNameKnownForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end();
+}
+
+std::optional<uint64_t> GLESRenderEngine::getBufferIdForTextureNameForTesting(uint32_t texName) {
+ const auto& entry = mTextureView.find(texName);
+ return entry != mTextureView.end() ? entry->second : std::nullopt;
+}
+
+bool GLESRenderEngine::isFramebufferImageCachedForTesting(uint64_t bufferId) {
+ std::lock_guard<std::mutex> lock(mFramebufferImageCacheMutex);
+ return std::any_of(mFramebufferImageCache.cbegin(), mFramebufferImageCache.cend(),
+ [=](std::pair<uint64_t, EGLImageKHR> image) {
+ return image.first == bufferId;
+ });
+}
+
+// FlushTracer implementation
+GLESRenderEngine::FlushTracer::FlushTracer(GLESRenderEngine* engine) : mEngine(engine) {
+ mThread = std::thread(&GLESRenderEngine::FlushTracer::loop, this);
+}
+
+GLESRenderEngine::FlushTracer::~FlushTracer() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mRunning = false;
+ }
+ mCondition.notify_all();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+void GLESRenderEngine::FlushTracer::queueSync(EGLSyncKHR sync) {
+ std::lock_guard<std::mutex> lock(mMutex);
+ char name[64];
+ const uint64_t frameNum = mFramesQueued++;
+ snprintf(name, sizeof(name), "Queueing sync for frame: %lu",
+ static_cast<unsigned long>(frameNum));
+ ATRACE_NAME(name);
+ mQueue.push({sync, frameNum});
+ ATRACE_INT("GPU Frames Outstanding", mQueue.size());
+ mCondition.notify_one();
+}
+
+void GLESRenderEngine::FlushTracer::loop() {
+ while (mRunning) {
+ QueueEntry entry;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+
+ mCondition.wait(mMutex,
+ [&]() REQUIRES(mMutex) { return !mQueue.empty() || !mRunning; });
+
+ if (!mRunning) {
+ // if mRunning is false, then FlushTracer is being destroyed, so
+ // bail out now.
+ break;
+ }
+ entry = mQueue.front();
+ mQueue.pop();
+ }
+ {
+ char name[64];
+ snprintf(name, sizeof(name), "waiting for frame %lu",
+ static_cast<unsigned long>(entry.mFrameNum));
+ ATRACE_NAME(name);
+ mEngine->waitSync(entry.mSync, 0);
+ }
+ }
+}
+
+void GLESRenderEngine::handleShadow(const FloatRect& casterRect, float casterCornerRadius,
+ const ShadowSettings& settings) {
+ ATRACE_CALL();
+ const float casterZ = settings.length / 2.0f;
+ const GLShadowVertexGenerator shadows(casterRect, casterCornerRadius, casterZ,
+ settings.casterIsTranslucent, settings.ambientColor,
+ settings.spotColor, settings.lightPos,
+ settings.lightRadius);
+
+ // setup mesh for both shadows
+ Mesh mesh = Mesh::Builder()
+ .setPrimitive(Mesh::TRIANGLES)
+ .setVertices(shadows.getVertexCount(), 2 /* size */)
+ .setShadowAttrs()
+ .setIndices(shadows.getIndexCount())
+ .build();
+
+ Mesh::VertexArray<vec2> position = mesh.getPositionArray<vec2>();
+ Mesh::VertexArray<vec4> shadowColor = mesh.getShadowColorArray<vec4>();
+ Mesh::VertexArray<vec3> shadowParams = mesh.getShadowParamsArray<vec3>();
+ shadows.fillVertices(position, shadowColor, shadowParams);
+ shadows.fillIndices(mesh.getIndicesArray());
+
+ mState.cornerRadius = 0.0f;
+ mState.drawShadows = true;
+ setupLayerTexturing(mShadowTexture.getTexture());
+ drawMesh(mesh);
+ mState.drawShadows = false;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLESRenderEngine.h b/media/libstagefright/renderfright/gl/GLESRenderEngine.h
new file mode 100644
index 0000000..2c6eae2
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLESRenderEngine.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_GLESRENDERENGINE_H_
+#define SF_GLESRENDERENGINE_H_
+
+#include <condition_variable>
+#include <deque>
+#include <mutex>
+#include <queue>
+#include <thread>
+#include <unordered_map>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <android-base/thread_annotations.h>
+#include <renderengine/RenderEngine.h>
+#include <renderengine/private/Description.h>
+#include <sys/types.h>
+#include "GLShadowTexture.h"
+#include "ImageManager.h"
+
+#define EGL_NO_CONFIG ((EGLConfig)0)
+
+namespace android {
+
+namespace renderengine {
+
+class Mesh;
+class Texture;
+
+namespace gl {
+
+class GLImage;
+class BlurFilter;
+
+class GLESRenderEngine : public impl::RenderEngine {
+public:
+ static std::unique_ptr<GLESRenderEngine> create(const RenderEngineCreationArgs& args);
+
+ GLESRenderEngine(const RenderEngineCreationArgs& args, EGLDisplay display, EGLConfig config,
+ EGLContext ctxt, EGLSurface stub, EGLContext protectedContext,
+ EGLSurface protectedStub);
+ ~GLESRenderEngine() override EXCLUDES(mRenderingMutex);
+
+ void primeCache() const override;
+ void genTextures(size_t count, uint32_t* names) override;
+ void deleteTextures(size_t count, uint32_t const* names) override;
+ void bindExternalTextureImage(uint32_t texName, const Image& image) override;
+ status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) EXCLUDES(mRenderingMutex);
+ void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) EXCLUDES(mRenderingMutex);
+ void unbindExternalTextureBuffer(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+ status_t bindFrameBuffer(Framebuffer* framebuffer) override;
+ void unbindFrameBuffer(Framebuffer* framebuffer) override;
+
+ bool isProtected() const override { return mInProtectedContext; }
+ bool supportsProtectedContent() const override;
+ bool useProtectedContext(bool useProtectedContext) override;
+ status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) override;
+ bool cleanupPostRender(CleanupMode mode) override;
+
+ EGLDisplay getEGLDisplay() const { return mEGLDisplay; }
+ // Creates an output image for rendering to
+ EGLImageKHR createFramebufferImageIfNeeded(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ bool useFramebufferCache)
+ EXCLUDES(mFramebufferImageCacheMutex);
+
+ // Test-only methods
+ // Returns true iff mImageCache contains an image keyed by bufferId
+ bool isImageCachedForTesting(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+ // Returns true iff texName was previously generated by RenderEngine and was
+ // not destroyed.
+ bool isTextureNameKnownForTesting(uint32_t texName);
+ // Returns the buffer ID of the content bound to texName, or nullopt if no
+ // such mapping exists.
+ std::optional<uint64_t> getBufferIdForTextureNameForTesting(uint32_t texName);
+ // Returns true iff mFramebufferImageCache contains an image keyed by bufferId
+ bool isFramebufferImageCachedForTesting(uint64_t bufferId)
+ EXCLUDES(mFramebufferImageCacheMutex);
+ // These are wrappers around public methods above, but exposing Barrier
+ // objects so that tests can block.
+ std::shared_ptr<ImageManager::Barrier> cacheExternalTextureBufferForTesting(
+ const sp<GraphicBuffer>& buffer);
+ std::shared_ptr<ImageManager::Barrier> unbindExternalTextureBufferForTesting(uint64_t bufferId);
+
+protected:
+ Framebuffer* getFramebufferForDrawing() override;
+ void dump(std::string& result) override EXCLUDES(mRenderingMutex)
+ EXCLUDES(mFramebufferImageCacheMutex);
+ size_t getMaxTextureSize() const override;
+ size_t getMaxViewportDims() const override;
+
+private:
+ enum GlesVersion {
+ GLES_VERSION_1_0 = 0x10000,
+ GLES_VERSION_1_1 = 0x10001,
+ GLES_VERSION_2_0 = 0x20000,
+ GLES_VERSION_3_0 = 0x30000,
+ };
+
+ static EGLConfig chooseEglConfig(EGLDisplay display, int format, bool logConfig);
+ static GlesVersion parseGlesVersion(const char* str);
+ static EGLContext createEglContext(EGLDisplay display, EGLConfig config,
+ EGLContext shareContext, bool useContextPriority,
+ Protection protection);
+ static EGLSurface createStubEglPbufferSurface(EGLDisplay display, EGLConfig config,
+ int hwcFormat, Protection protection);
+ std::unique_ptr<Framebuffer> createFramebuffer();
+ std::unique_ptr<Image> createImage();
+ void checkErrors() const;
+ void checkErrors(const char* tag) const;
+ void setScissor(const Rect& region);
+ void disableScissor();
+ bool waitSync(EGLSyncKHR sync, EGLint flags);
+ status_t cacheExternalTextureBufferInternal(const sp<GraphicBuffer>& buffer)
+ EXCLUDES(mRenderingMutex);
+ void unbindExternalTextureBufferInternal(uint64_t bufferId) EXCLUDES(mRenderingMutex);
+
+ // A data space is considered HDR data space if it has BT2020 color space
+ // with PQ or HLG transfer function.
+ bool isHdrDataSpace(const ui::Dataspace dataSpace) const;
+ bool needsXYZTransformMatrix() const;
+ // Defines the viewport, and sets the projection matrix to the projection
+ // defined by the clip.
+ void setViewportAndProjection(Rect viewport, Rect clip);
+ // Evicts stale images from the buffer cache.
+ void evictImages(const std::vector<LayerSettings>& layers);
+ // Computes the cropping window for the layer and sets up cropping
+ // coordinates for the mesh.
+ FloatRect setupLayerCropping(const LayerSettings& layer, Mesh& mesh);
+
+ // We do a special handling for rounded corners when it's possible to turn off blending
+ // for the majority of the layer. The rounded corners needs to turn on blending such that
+ // we can set the alpha value correctly, however, only the corners need this, and since
+ // blending is an expensive operation, we want to turn off blending when it's not necessary.
+ void handleRoundedCorners(const DisplaySettings& display, const LayerSettings& layer,
+ const Mesh& mesh);
+ base::unique_fd flush();
+ bool finish();
+ bool waitFence(base::unique_fd fenceFd);
+ void clearWithColor(float red, float green, float blue, float alpha);
+ void fillRegionWithColor(const Region& region, float red, float green, float blue, float alpha);
+ void handleShadow(const FloatRect& casterRect, float casterCornerRadius,
+ const ShadowSettings& shadowSettings);
+ void setupLayerBlending(bool premultipliedAlpha, bool opaque, bool disableTexture,
+ const half4& color, float cornerRadius);
+ void setupLayerTexturing(const Texture& texture);
+ void setupFillWithColor(float r, float g, float b, float a);
+ void setColorTransform(const mat4& colorTransform);
+ void disableTexturing();
+ void disableBlending();
+ void setupCornerRadiusCropSize(float width, float height);
+
+ // HDR and color management related functions and state
+ void setSourceY410BT2020(bool enable);
+ void setSourceDataSpace(ui::Dataspace source);
+ void setOutputDataSpace(ui::Dataspace dataspace);
+ void setDisplayMaxLuminance(const float maxLuminance);
+
+ // drawing
+ void drawMesh(const Mesh& mesh);
+
+ EGLDisplay mEGLDisplay;
+ EGLConfig mEGLConfig;
+ EGLContext mEGLContext;
+ EGLSurface mStubSurface;
+ EGLContext mProtectedEGLContext;
+ EGLSurface mProtectedStubSurface;
+ GLint mMaxViewportDims[2];
+ GLint mMaxTextureSize;
+ GLuint mVpWidth;
+ GLuint mVpHeight;
+ Description mState;
+ GLShadowTexture mShadowTexture;
+
+ mat4 mSrgbToXyz;
+ mat4 mDisplayP3ToXyz;
+ mat4 mBt2020ToXyz;
+ mat4 mXyzToSrgb;
+ mat4 mXyzToDisplayP3;
+ mat4 mXyzToBt2020;
+ mat4 mSrgbToDisplayP3;
+ mat4 mSrgbToBt2020;
+ mat4 mDisplayP3ToSrgb;
+ mat4 mDisplayP3ToBt2020;
+ mat4 mBt2020ToSrgb;
+ mat4 mBt2020ToDisplayP3;
+
+ bool mInProtectedContext = false;
+ // If set to true, then enables tracing flush() and finish() to systrace.
+ bool mTraceGpuCompletion = false;
+ // Maximum size of mFramebufferImageCache. If more images would be cached, then (approximately)
+ // the last recently used buffer should be kicked out.
+ uint32_t mFramebufferImageCacheSize = 0;
+
+ // Cache of output images, keyed by corresponding GraphicBuffer ID.
+ std::deque<std::pair<uint64_t, EGLImageKHR>> mFramebufferImageCache
+ GUARDED_BY(mFramebufferImageCacheMutex);
+ // The only reason why we have this mutex is so that we don't segfault when
+ // dumping info.
+ std::mutex mFramebufferImageCacheMutex;
+
+ // Current dataspace of layer being rendered
+ ui::Dataspace mDataSpace = ui::Dataspace::UNKNOWN;
+
+ // Current output dataspace of the render engine
+ ui::Dataspace mOutputDataSpace = ui::Dataspace::UNKNOWN;
+
+ // Whether device supports color management, currently color management
+ // supports sRGB, DisplayP3 color spaces.
+ const bool mUseColorManagement = false;
+
+ // Cache of GL images that we'll store per GraphicBuffer ID
+ std::unordered_map<uint64_t, std::unique_ptr<Image>> mImageCache GUARDED_BY(mRenderingMutex);
+ std::unordered_map<uint32_t, std::optional<uint64_t>> mTextureView;
+
+ // Mutex guarding rendering operations, so that:
+ // 1. GL operations aren't interleaved, and
+ // 2. Internal state related to rendering that is potentially modified by
+ // multiple threads is guaranteed thread-safe.
+ std::mutex mRenderingMutex;
+
+ std::unique_ptr<Framebuffer> mDrawingBuffer;
+ // this is a 1x1 RGB buffer, but over-allocate in case a driver wants more
+ // memory or if it needs to satisfy alignment requirements. In this case:
+ // assume that each channel requires 4 bytes, and add 3 additional bytes to
+ // ensure that we align on a word. Allocating 16 bytes will provide a
+ // guarantee that we don't clobber memory.
+ uint32_t mPlaceholderDrawBuffer[4];
+ // Placeholder buffer and image, similar to mPlaceholderDrawBuffer, but
+ // instead these are intended for cleaning up texture memory with the
+ // GL_TEXTURE_EXTERNAL_OES target.
+ ANativeWindowBuffer* mPlaceholderBuffer = nullptr;
+ EGLImage mPlaceholderImage = EGL_NO_IMAGE_KHR;
+ sp<Fence> mLastDrawFence;
+ // Store a separate boolean checking if prior resources were cleaned up, as
+ // devices that don't support native sync fences can't rely on a last draw
+ // fence that doesn't exist.
+ bool mPriorResourcesCleaned = true;
+
+ // Blur effect processor, only instantiated when a layer requests it.
+ BlurFilter* mBlurFilter = nullptr;
+
+ class FlushTracer {
+ public:
+ FlushTracer(GLESRenderEngine* engine);
+ ~FlushTracer();
+ void queueSync(EGLSyncKHR sync) EXCLUDES(mMutex);
+
+ struct QueueEntry {
+ EGLSyncKHR mSync = nullptr;
+ uint64_t mFrameNum = 0;
+ };
+
+ private:
+ void loop();
+ GLESRenderEngine* const mEngine;
+ std::thread mThread;
+ std::condition_variable_any mCondition;
+ std::mutex mMutex;
+ std::queue<QueueEntry> mQueue GUARDED_BY(mMutex);
+ uint64_t mFramesQueued GUARDED_BY(mMutex) = 0;
+ bool mRunning = true;
+ };
+ friend class FlushTracer;
+ friend class ImageManager;
+ friend class GLFramebuffer;
+ friend class BlurFilter;
+ friend class GenericProgram;
+ std::unique_ptr<FlushTracer> mFlushTracer;
+ std::unique_ptr<ImageManager> mImageManager = std::make_unique<ImageManager>(this);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_GLESRENDERENGINE_H_ */
diff --git a/media/libstagefright/renderfright/gl/GLExtensions.cpp b/media/libstagefright/renderfright/gl/GLExtensions.cpp
new file mode 100644
index 0000000..2924b0e
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLExtensions.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GLExtensions.h"
+
+#include <string>
+#include <unordered_set>
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+ANDROID_SINGLETON_STATIC_INSTANCE(android::renderengine::gl::GLExtensions)
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+namespace {
+
+class ExtensionSet {
+public:
+ ExtensionSet(const char* extensions) {
+ char const* curr = extensions;
+ char const* head = curr;
+ do {
+ head = strchr(curr, ' ');
+ size_t len = head ? head - curr : strlen(curr);
+ if (len > 0) {
+ mExtensions.emplace(curr, len);
+ }
+ curr = head + 1;
+ } while (head);
+ }
+
+ bool hasExtension(const char* extension) const { return mExtensions.count(extension) > 0; }
+
+private:
+ std::unordered_set<std::string> mExtensions;
+};
+
+} // anonymous namespace
+
+void GLExtensions::initWithGLStrings(GLubyte const* vendor, GLubyte const* renderer,
+ GLubyte const* version, GLubyte const* extensions) {
+ mVendor = (char const*)vendor;
+ mRenderer = (char const*)renderer;
+ mVersion = (char const*)version;
+ mExtensions = (char const*)extensions;
+
+ ExtensionSet extensionSet(mExtensions.c_str());
+ if (extensionSet.hasExtension("GL_EXT_protected_textures")) {
+ mHasProtectedTexture = true;
+ }
+}
+
+char const* GLExtensions::getVendor() const {
+ return mVendor.string();
+}
+
+char const* GLExtensions::getRenderer() const {
+ return mRenderer.string();
+}
+
+char const* GLExtensions::getVersion() const {
+ return mVersion.string();
+}
+
+char const* GLExtensions::getExtensions() const {
+ return mExtensions.string();
+}
+
+void GLExtensions::initWithEGLStrings(char const* eglVersion, char const* eglExtensions) {
+ mEGLVersion = eglVersion;
+ mEGLExtensions = eglExtensions;
+
+ ExtensionSet extensionSet(eglExtensions);
+
+ // EGL_ANDROIDX_no_config_context is an experimental extension with no
+ // written specification. It will be replaced by something more formal.
+ // SurfaceFlinger is using it to allow a single EGLContext to render to
+ // both a 16-bit primary display framebuffer and a 32-bit virtual display
+ // framebuffer.
+ //
+ // EGL_KHR_no_config_context is official extension to allow creating a
+ // context that works with any surface of a display.
+ if (extensionSet.hasExtension("EGL_ANDROIDX_no_config_context") ||
+ extensionSet.hasExtension("EGL_KHR_no_config_context")) {
+ mHasNoConfigContext = true;
+ }
+
+ if (extensionSet.hasExtension("EGL_ANDROID_native_fence_sync")) {
+ mHasNativeFenceSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_fence_sync")) {
+ mHasFenceSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_wait_sync")) {
+ mHasWaitSync = true;
+ }
+ if (extensionSet.hasExtension("EGL_EXT_protected_content")) {
+ mHasProtectedContent = true;
+ }
+ if (extensionSet.hasExtension("EGL_IMG_context_priority")) {
+ mHasContextPriority = true;
+ }
+ if (extensionSet.hasExtension("EGL_KHR_surfaceless_context")) {
+ mHasSurfacelessContext = true;
+ }
+}
+
+char const* GLExtensions::getEGLVersion() const {
+ return mEGLVersion.string();
+}
+
+char const* GLExtensions::getEGLExtensions() const {
+ return mEGLExtensions.string();
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLExtensions.h b/media/libstagefright/renderfright/gl/GLExtensions.h
new file mode 100644
index 0000000..ef00009
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLExtensions.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SF_GLEXTENSION_H
+#define ANDROID_SF_GLEXTENSION_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <utils/Singleton.h>
+#include <utils/String8.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLExtensions : public Singleton<GLExtensions> {
+public:
+ bool hasNoConfigContext() const { return mHasNoConfigContext; }
+ bool hasNativeFenceSync() const { return mHasNativeFenceSync; }
+ bool hasFenceSync() const { return mHasFenceSync; }
+ bool hasWaitSync() const { return mHasWaitSync; }
+ bool hasProtectedContent() const { return mHasProtectedContent; }
+ bool hasContextPriority() const { return mHasContextPriority; }
+ bool hasSurfacelessContext() const { return mHasSurfacelessContext; }
+ bool hasProtectedTexture() const { return mHasProtectedTexture; }
+
+ void initWithGLStrings(GLubyte const* vendor, GLubyte const* renderer, GLubyte const* version,
+ GLubyte const* extensions);
+ char const* getVendor() const;
+ char const* getRenderer() const;
+ char const* getVersion() const;
+ char const* getExtensions() const;
+
+ void initWithEGLStrings(char const* eglVersion, char const* eglExtensions);
+ char const* getEGLVersion() const;
+ char const* getEGLExtensions() const;
+
+protected:
+ GLExtensions() = default;
+
+private:
+ friend class Singleton<GLExtensions>;
+
+ bool mHasNoConfigContext = false;
+ bool mHasNativeFenceSync = false;
+ bool mHasFenceSync = false;
+ bool mHasWaitSync = false;
+ bool mHasProtectedContent = false;
+ bool mHasContextPriority = false;
+ bool mHasSurfacelessContext = false;
+ bool mHasProtectedTexture = false;
+
+ String8 mVendor;
+ String8 mRenderer;
+ String8 mVersion;
+ String8 mExtensions;
+ String8 mEGLVersion;
+ String8 mEGLExtensions;
+
+ GLExtensions(const GLExtensions&);
+ GLExtensions& operator=(const GLExtensions&);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif // ANDROID_SF_GLEXTENSION_H
diff --git a/media/libstagefright/renderfright/gl/GLFramebuffer.cpp b/media/libstagefright/renderfright/gl/GLFramebuffer.cpp
new file mode 100644
index 0000000..383486b
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLFramebuffer.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLFramebuffer.h"
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+#include <gui/DebugEGLImageTracker.h>
+#include <nativebase/nativebase.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLFramebuffer::GLFramebuffer(GLESRenderEngine& engine)
+ : mEngine(engine), mEGLDisplay(engine.getEGLDisplay()), mEGLImage(EGL_NO_IMAGE_KHR) {
+ glGenTextures(1, &mTextureName);
+ glGenFramebuffers(1, &mFramebufferName);
+}
+
+GLFramebuffer::~GLFramebuffer() {
+ glDeleteFramebuffers(1, &mFramebufferName);
+ glDeleteTextures(1, &mTextureName);
+}
+
+bool GLFramebuffer::setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) {
+ ATRACE_CALL();
+ if (mEGLImage != EGL_NO_IMAGE_KHR) {
+ if (!usingFramebufferCache) {
+ eglDestroyImageKHR(mEGLDisplay, mEGLImage);
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ }
+ mEGLImage = EGL_NO_IMAGE_KHR;
+ mBufferWidth = 0;
+ mBufferHeight = 0;
+ }
+
+ if (nativeBuffer) {
+ mEGLImage = mEngine.createFramebufferImageIfNeeded(nativeBuffer, isProtected,
+ useFramebufferCache);
+ if (mEGLImage == EGL_NO_IMAGE_KHR) {
+ return false;
+ }
+ usingFramebufferCache = useFramebufferCache;
+ mBufferWidth = nativeBuffer->width;
+ mBufferHeight = nativeBuffer->height;
+ }
+ return true;
+}
+
+void GLFramebuffer::allocateBuffers(uint32_t width, uint32_t height, void* data) {
+ ATRACE_CALL();
+
+ glBindTexture(GL_TEXTURE_2D, mTextureName);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
+
+ mBufferHeight = height;
+ mBufferWidth = width;
+ mEngine.checkErrors("Allocating Fbo texture");
+
+ bind();
+ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, mTextureName, 0);
+ mStatus = glCheckFramebufferStatus(GL_FRAMEBUFFER);
+ unbind();
+ glBindTexture(GL_TEXTURE_2D, 0);
+
+ if (mStatus != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Frame buffer is not complete. Error %d", mStatus);
+ }
+}
+
+void GLFramebuffer::bind() const {
+ glBindFramebuffer(GL_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::bindAsReadBuffer() const {
+ glBindFramebuffer(GL_READ_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::bindAsDrawBuffer() const {
+ glBindFramebuffer(GL_DRAW_FRAMEBUFFER, mFramebufferName);
+}
+
+void GLFramebuffer::unbind() const {
+ glBindFramebuffer(GL_FRAMEBUFFER, 0);
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLFramebuffer.h b/media/libstagefright/renderfright/gl/GLFramebuffer.h
new file mode 100644
index 0000000..6757695
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLFramebuffer.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+#include <renderengine/Framebuffer.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLFramebuffer : public renderengine::Framebuffer {
+public:
+ explicit GLFramebuffer(GLESRenderEngine& engine);
+ explicit GLFramebuffer(GLESRenderEngine& engine, bool multiTarget);
+ ~GLFramebuffer() override;
+
+ bool setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) override;
+ void allocateBuffers(uint32_t width, uint32_t height, void* data = nullptr);
+ EGLImageKHR getEGLImage() const { return mEGLImage; }
+ uint32_t getTextureName() const { return mTextureName; }
+ uint32_t getFramebufferName() const { return mFramebufferName; }
+ int32_t getBufferHeight() const { return mBufferHeight; }
+ int32_t getBufferWidth() const { return mBufferWidth; }
+ GLenum getStatus() const { return mStatus; }
+ void bind() const;
+ void bindAsReadBuffer() const;
+ void bindAsDrawBuffer() const;
+ void unbind() const;
+
+private:
+ GLESRenderEngine& mEngine;
+ EGLDisplay mEGLDisplay;
+ EGLImageKHR mEGLImage;
+ bool usingFramebufferCache = false;
+ GLenum mStatus = GL_FRAMEBUFFER_UNSUPPORTED;
+ uint32_t mTextureName, mFramebufferName;
+
+ int32_t mBufferHeight = 0;
+ int32_t mBufferWidth = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLImage.cpp b/media/libstagefright/renderfright/gl/GLImage.cpp
new file mode 100644
index 0000000..8497721
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLImage.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLImage.h"
+
+#include <vector>
+
+#include <gui/DebugEGLImageTracker.h>
+#include <log/log.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "GLExtensions.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+static std::vector<EGLint> buildAttributeList(bool isProtected) {
+ std::vector<EGLint> attrs;
+ attrs.reserve(16);
+
+ attrs.push_back(EGL_IMAGE_PRESERVED_KHR);
+ attrs.push_back(EGL_TRUE);
+
+ if (isProtected && GLExtensions::getInstance().hasProtectedContent()) {
+ attrs.push_back(EGL_PROTECTED_CONTENT_EXT);
+ attrs.push_back(EGL_TRUE);
+ }
+
+ attrs.push_back(EGL_NONE);
+
+ return attrs;
+}
+
+GLImage::GLImage(const GLESRenderEngine& engine) : mEGLDisplay(engine.getEGLDisplay()) {}
+
+GLImage::~GLImage() {
+ setNativeWindowBuffer(nullptr, false);
+}
+
+bool GLImage::setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) {
+ ATRACE_CALL();
+ if (mEGLImage != EGL_NO_IMAGE_KHR) {
+ if (!eglDestroyImageKHR(mEGLDisplay, mEGLImage)) {
+ ALOGE("failed to destroy image: %#x", eglGetError());
+ }
+ DEBUG_EGL_IMAGE_TRACKER_DESTROY();
+ mEGLImage = EGL_NO_IMAGE_KHR;
+ }
+
+ if (buffer) {
+ std::vector<EGLint> attrs = buildAttributeList(isProtected);
+ mEGLImage = eglCreateImageKHR(mEGLDisplay, EGL_NO_CONTEXT, EGL_NATIVE_BUFFER_ANDROID,
+ static_cast<EGLClientBuffer>(buffer), attrs.data());
+ if (mEGLImage == EGL_NO_IMAGE_KHR) {
+ ALOGE("failed to create EGLImage: %#x", eglGetError());
+ return false;
+ }
+ DEBUG_EGL_IMAGE_TRACKER_CREATE();
+ mProtected = isProtected;
+ }
+
+ return true;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLImage.h b/media/libstagefright/renderfright/gl/GLImage.h
new file mode 100644
index 0000000..59d6ce3
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLImage.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <android-base/macros.h>
+#include <renderengine/Image.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLImage : public renderengine::Image {
+public:
+ explicit GLImage(const GLESRenderEngine& engine);
+ ~GLImage() override;
+
+ bool setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) override;
+
+ EGLImageKHR getEGLImage() const { return mEGLImage; }
+ bool isProtected() const { return mProtected; }
+
+private:
+ EGLDisplay mEGLDisplay;
+ EGLImageKHR mEGLImage = EGL_NO_IMAGE_KHR;
+ bool mProtected = false;
+
+ DISALLOW_COPY_AND_ASSIGN(GLImage);
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowTexture.cpp b/media/libstagefright/renderfright/gl/GLShadowTexture.cpp
new file mode 100644
index 0000000..2423a34
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowTexture.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <GLES3/gl3.h>
+
+#include "GLShadowTexture.h"
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLShadowTexture::GLShadowTexture() {
+ fillShadowTextureData(mTextureData, SHADOW_TEXTURE_WIDTH);
+
+ glGenTextures(1, &mName);
+ glBindTexture(GL_TEXTURE_2D, mName);
+ glTexImage2D(GL_TEXTURE_2D, 0 /* base image level */, GL_ALPHA, SHADOW_TEXTURE_WIDTH,
+ SHADOW_TEXTURE_HEIGHT, 0 /* border */, GL_ALPHA, GL_UNSIGNED_BYTE, mTextureData);
+ mTexture.init(Texture::TEXTURE_2D, mName);
+ mTexture.setFiltering(true);
+ mTexture.setDimensions(SHADOW_TEXTURE_WIDTH, 1);
+}
+
+GLShadowTexture::~GLShadowTexture() {
+ glDeleteTextures(1, &mName);
+}
+
+const Texture& GLShadowTexture::getTexture() {
+ return mTexture;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowTexture.h b/media/libstagefright/renderfright/gl/GLShadowTexture.h
new file mode 100644
index 0000000..250a9d7
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowTexture.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <renderengine/Texture.h>
+#include <cstdint>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLShadowTexture {
+public:
+ GLShadowTexture();
+ ~GLShadowTexture();
+
+ const Texture& getTexture();
+
+private:
+ static constexpr int SHADOW_TEXTURE_WIDTH = 128;
+ static constexpr int SHADOW_TEXTURE_HEIGHT = 1;
+
+ GLuint mName;
+ Texture mTexture;
+ uint8_t mTextureData[SHADOW_TEXTURE_WIDTH];
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp
new file mode 100644
index 0000000..3181f9b
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/Mesh.h>
+
+#include <math/vec4.h>
+
+#include <ui/Rect.h>
+#include <ui/Transform.h>
+
+#include "GLShadowVertexGenerator.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLShadowVertexGenerator::GLShadowVertexGenerator(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& ambientColor,
+ const vec4& spotColor, const vec3& lightPosition,
+ float lightRadius) {
+ mDrawAmbientShadow = ambientColor.a > 0.f;
+ mDrawSpotShadow = spotColor.a > 0.f;
+
+ // Generate geometries and find number of vertices to generate
+ if (mDrawAmbientShadow) {
+ mAmbientShadowGeometry = getAmbientShadowGeometry(casterRect, casterCornerRadius, casterZ,
+ casterIsTranslucent, ambientColor);
+ mAmbientShadowVertexCount = getVertexCountForGeometry(*mAmbientShadowGeometry.get());
+ mAmbientShadowIndexCount = getIndexCountForGeometry(*mAmbientShadowGeometry.get());
+ } else {
+ mAmbientShadowVertexCount = 0;
+ mAmbientShadowIndexCount = 0;
+ }
+
+ if (mDrawSpotShadow) {
+ mSpotShadowGeometry =
+ getSpotShadowGeometry(casterRect, casterCornerRadius, casterZ, casterIsTranslucent,
+ spotColor, lightPosition, lightRadius);
+ mSpotShadowVertexCount = getVertexCountForGeometry(*mSpotShadowGeometry.get());
+ mSpotShadowIndexCount = getIndexCountForGeometry(*mSpotShadowGeometry.get());
+ } else {
+ mSpotShadowVertexCount = 0;
+ mSpotShadowIndexCount = 0;
+ }
+}
+
+size_t GLShadowVertexGenerator::getVertexCount() const {
+ return mAmbientShadowVertexCount + mSpotShadowVertexCount;
+}
+
+size_t GLShadowVertexGenerator::getIndexCount() const {
+ return mAmbientShadowIndexCount + mSpotShadowIndexCount;
+}
+
+void GLShadowVertexGenerator::fillVertices(Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& color,
+ Mesh::VertexArray<vec3>& params) const {
+ if (mDrawAmbientShadow) {
+ fillVerticesForGeometry(*mAmbientShadowGeometry.get(), mAmbientShadowVertexCount, position,
+ color, params);
+ }
+ if (mDrawSpotShadow) {
+ fillVerticesForGeometry(*mSpotShadowGeometry.get(), mSpotShadowVertexCount,
+ Mesh::VertexArray<vec2>(position, mAmbientShadowVertexCount),
+ Mesh::VertexArray<vec4>(color, mAmbientShadowVertexCount),
+ Mesh::VertexArray<vec3>(params, mAmbientShadowVertexCount));
+ }
+}
+
+void GLShadowVertexGenerator::fillIndices(uint16_t* indices) const {
+ if (mDrawAmbientShadow) {
+ fillIndicesForGeometry(*mAmbientShadowGeometry.get(), mAmbientShadowIndexCount,
+ 0 /* starting vertex offset */, indices);
+ }
+ if (mDrawSpotShadow) {
+ fillIndicesForGeometry(*mSpotShadowGeometry.get(), mSpotShadowIndexCount,
+ mAmbientShadowVertexCount /* starting vertex offset */,
+ &(indices[mAmbientShadowIndexCount]));
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h
new file mode 100644
index 0000000..112f976
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLShadowVertexGenerator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <math/vec4.h>
+#include <ui/Rect.h>
+
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+
+class Mesh;
+
+namespace gl {
+
+/**
+ * Generates gl attributes required to draw shadow spot and/or ambient shadows.
+ *
+ * Each shadow can support different colors. This class generates three vertex attributes for
+ * each shadow, its position, color and shadow params(offset and distance). These can be sent
+ * using a single glDrawElements call.
+ */
+class GLShadowVertexGenerator {
+public:
+ GLShadowVertexGenerator(const FloatRect& casterRect, float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& ambientColor,
+ const vec4& spotColor, const vec3& lightPosition, float lightRadius);
+ ~GLShadowVertexGenerator() = default;
+
+ size_t getVertexCount() const;
+ size_t getIndexCount() const;
+ void fillVertices(Mesh::VertexArray<vec2>& position, Mesh::VertexArray<vec4>& color,
+ Mesh::VertexArray<vec3>& params) const;
+ void fillIndices(uint16_t* indices) const;
+
+private:
+ bool mDrawAmbientShadow;
+ std::unique_ptr<Geometry> mAmbientShadowGeometry;
+ int mAmbientShadowVertexCount = 0;
+ int mAmbientShadowIndexCount = 0;
+
+ bool mDrawSpotShadow;
+ std::unique_ptr<Geometry> mSpotShadowGeometry;
+ int mSpotShadowVertexCount = 0;
+ int mSpotShadowIndexCount = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp
new file mode 100644
index 0000000..da8b435
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.cpp
@@ -0,0 +1,656 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math/vec4.h>
+
+#include <renderengine/Mesh.h>
+
+#include <ui/Rect.h>
+#include <ui/Transform.h>
+
+#include <utils/Log.h>
+
+#include "GLSkiaShadowPort.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * The shadow geometry logic and vertex generation code has been ported from skia shadow
+ * fast path OpenGL implementation to draw shadows around rects and rounded rects including
+ * circles.
+ *
+ * path: skia/src/gpu/GrRenderTargetContext.cpp GrRenderTargetContext::drawFastShadow
+ *
+ * Modifications made:
+ * - Switched to using std lib math functions
+ * - Fall off function is implemented in vertex shader rather than a shadow texture
+ * - Removed transformations applied on the caster rect since the caster will be in local
+ * coordinate space and will be transformed by the vertex shader.
+ */
+
+static inline float divide_and_pin(float numer, float denom, float min, float max) {
+ if (denom == 0.0f) return min;
+ return std::clamp(numer / denom, min, max);
+}
+
+static constexpr auto SK_ScalarSqrt2 = 1.41421356f;
+static constexpr auto kAmbientHeightFactor = 1.0f / 128.0f;
+static constexpr auto kAmbientGeomFactor = 64.0f;
+// Assuming that we have a light height of 600 for the spot shadow,
+// the spot values will reach their maximum at a height of approximately 292.3077.
+// We'll round up to 300 to keep it simple.
+static constexpr auto kMaxAmbientRadius = 300 * kAmbientHeightFactor * kAmbientGeomFactor;
+
+inline float AmbientBlurRadius(float height) {
+ return std::min(height * kAmbientHeightFactor * kAmbientGeomFactor, kMaxAmbientRadius);
+}
+inline float AmbientRecipAlpha(float height) {
+ return 1.0f + std::max(height * kAmbientHeightFactor, 0.0f);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// Circle Data
+//
+// We have two possible cases for geometry for a circle:
+
+// In the case of a normal fill, we draw geometry for the circle as an octagon.
+static const uint16_t gFillCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 8, 1, 2, 8,
+ 2, 3, 8, 3, 4, 8,
+ 4, 5, 8, 5, 6, 8,
+ 6, 7, 8, 7, 0, 8,
+ // clang-format on
+};
+
+// For stroked circles, we use two nested octagons.
+static const uint16_t gStrokeCircleIndices[] = {
+ // enter the octagon
+ // clang-format off
+ 0, 1, 9, 0, 9, 8,
+ 1, 2, 10, 1, 10, 9,
+ 2, 3, 11, 2, 11, 10,
+ 3, 4, 12, 3, 12, 11,
+ 4, 5, 13, 4, 13, 12,
+ 5, 6, 14, 5, 14, 13,
+ 6, 7, 15, 6, 15, 14,
+ 7, 0, 8, 7, 8, 15,
+ // clang-format on
+};
+
+#define SK_ARRAY_COUNT(a) (sizeof(a) / sizeof((a)[0]))
+static const int kIndicesPerFillCircle = SK_ARRAY_COUNT(gFillCircleIndices);
+static const int kIndicesPerStrokeCircle = SK_ARRAY_COUNT(gStrokeCircleIndices);
+static const int kVertsPerStrokeCircle = 16;
+static const int kVertsPerFillCircle = 9;
+
+static int circle_type_to_vert_count(bool stroked) {
+ return stroked ? kVertsPerStrokeCircle : kVertsPerFillCircle;
+}
+
+static int circle_type_to_index_count(bool stroked) {
+ return stroked ? kIndicesPerStrokeCircle : kIndicesPerFillCircle;
+}
+
+static const uint16_t* circle_type_to_indices(bool stroked) {
+ return stroked ? gStrokeCircleIndices : gFillCircleIndices;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// RoundRect Data
+//
+// The geometry for a shadow roundrect is similar to a 9-patch:
+// ____________
+// |_|________|_|
+// | | | |
+// | | | |
+// | | | |
+// |_|________|_|
+// |_|________|_|
+//
+// However, each corner is rendered as a fan rather than a simple quad, as below. (The diagram
+// shows the upper part of the upper left corner. The bottom triangle would similarly be split
+// into two triangles.)
+// ________
+// |\ \ |
+// | \ \ |
+// | \\ |
+// | \|
+// --------
+//
+// The center of the fan handles the curve of the corner. For roundrects where the stroke width
+// is greater than the corner radius, the outer triangles blend from the curve to the straight
+// sides. Otherwise these triangles will be degenerate.
+//
+// In the case where the stroke width is greater than the corner radius and the
+// blur radius (overstroke), we add additional geometry to mark out the rectangle in the center.
+// This rectangle extends the coverage values of the center edges of the 9-patch.
+// ____________
+// |_|________|_|
+// | |\ ____ /| |
+// | | | | | |
+// | | |____| | |
+// |_|/______\|_|
+// |_|________|_|
+//
+// For filled rrects we reuse the stroke geometry but add an additional quad to the center.
+
+static const uint16_t gRRectIndices[] = {
+ // clang-format off
+ // overstroke quads
+ // we place this at the beginning so that we can skip these indices when rendering as filled
+ 0, 6, 25, 0, 25, 24,
+ 6, 18, 27, 6, 27, 25,
+ 18, 12, 26, 18, 26, 27,
+ 12, 0, 24, 12, 24, 26,
+
+ // corners
+ 0, 1, 2, 0, 2, 3, 0, 3, 4, 0, 4, 5,
+ 6, 11, 10, 6, 10, 9, 6, 9, 8, 6, 8, 7,
+ 12, 17, 16, 12, 16, 15, 12, 15, 14, 12, 14, 13,
+ 18, 19, 20, 18, 20, 21, 18, 21, 22, 18, 22, 23,
+
+ // edges
+ 0, 5, 11, 0, 11, 6,
+ 6, 7, 19, 6, 19, 18,
+ 18, 23, 17, 18, 17, 12,
+ 12, 13, 1, 12, 1, 0,
+
+ // fill quad
+ // we place this at the end so that we can skip these indices when rendering as stroked
+ 0, 6, 18, 0, 18, 12,
+ // clang-format on
+};
+
+// overstroke count
+static const int kIndicesPerOverstrokeRRect = SK_ARRAY_COUNT(gRRectIndices) - 6;
+// simple stroke count skips overstroke indices
+static const int kIndicesPerStrokeRRect = kIndicesPerOverstrokeRRect - 6 * 4;
+// fill count adds final quad to stroke count
+static const int kIndicesPerFillRRect = kIndicesPerStrokeRRect + 6;
+static const int kVertsPerStrokeRRect = 24;
+static const int kVertsPerOverstrokeRRect = 28;
+static const int kVertsPerFillRRect = 24;
+
+static int rrect_type_to_vert_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kVertsPerFillRRect;
+ case kStroke_RRectType:
+ return kVertsPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kVertsPerOverstrokeRRect;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return -1;
+}
+
+static int rrect_type_to_index_count(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ return kIndicesPerFillRRect;
+ case kStroke_RRectType:
+ return kIndicesPerStrokeRRect;
+ case kOverstroke_RRectType:
+ return kIndicesPerOverstrokeRRect;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return -1;
+}
+
+static const uint16_t* rrect_type_to_indices(RRectType type) {
+ switch (type) {
+ case kFill_RRectType:
+ case kStroke_RRectType:
+ return gRRectIndices + 6 * 4;
+ case kOverstroke_RRectType:
+ return gRRectIndices;
+ }
+ ALOGE("Invalid rect type: %d", type);
+ return nullptr;
+}
+
+static void fillInCircleVerts(const Geometry& args, bool isStroked,
+ Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& shadowColor,
+ Mesh::VertexArray<vec3>& shadowParams) {
+ vec4 color = args.fColor;
+ float outerRadius = args.fOuterRadius;
+ float innerRadius = args.fInnerRadius;
+ float blurRadius = args.fBlurRadius;
+ float distanceCorrection = outerRadius / blurRadius;
+
+ const FloatRect& bounds = args.fDevBounds;
+
+ // The inner radius in the vertex data must be specified in normalized space.
+ innerRadius = innerRadius / outerRadius;
+
+ vec2 center = vec2(bounds.getWidth() / 2.0f, bounds.getHeight() / 2.0f);
+ float halfWidth = 0.5f * bounds.getWidth();
+ float octOffset = 0.41421356237f; // sqrt(2) - 1
+ int vertexCount = 0;
+
+ position[vertexCount] = center + vec2(-octOffset * halfWidth, -halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-octOffset, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(octOffset * halfWidth, -halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(octOffset, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(halfWidth, -octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(1, -octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(halfWidth, octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(1, octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(octOffset * halfWidth, halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(octOffset, 1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-octOffset * halfWidth, halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-octOffset, 1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-halfWidth, octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-1, octOffset, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-halfWidth, -octOffset * halfWidth);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-1, -octOffset, distanceCorrection);
+ vertexCount++;
+
+ if (isStroked) {
+ // compute the inner ring
+
+ // cosine and sine of pi/8
+ float c = 0.923579533f;
+ float s = 0.382683432f;
+ float r = args.fInnerRadius;
+
+ position[vertexCount] = center + vec2(-s * r, -c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-s * innerRadius, -c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(s * r, -c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(s * innerRadius, -c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(c * r, -s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(c * innerRadius, -s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(c * r, s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(c * innerRadius, s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(s * r, c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(s * innerRadius, c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-s * r, c * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-s * innerRadius, c * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-c * r, s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-c * innerRadius, s * innerRadius, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = center + vec2(-c * r, -s * r);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(-c * innerRadius, -s * innerRadius, distanceCorrection);
+ vertexCount++;
+ } else {
+ // filled
+ position[vertexCount] = center;
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+ }
+}
+
+static void fillInRRectVerts(const Geometry& args, Mesh::VertexArray<vec2>& position,
+ Mesh::VertexArray<vec4>& shadowColor,
+ Mesh::VertexArray<vec3>& shadowParams) {
+ vec4 color = args.fColor;
+ float outerRadius = args.fOuterRadius;
+
+ const FloatRect& bounds = args.fDevBounds;
+
+ float umbraInset = args.fUmbraInset;
+ float minDim = 0.5f * std::min(bounds.getWidth(), bounds.getHeight());
+ if (umbraInset > minDim) {
+ umbraInset = minDim;
+ }
+
+ float xInner[4] = {bounds.left + umbraInset, bounds.right - umbraInset,
+ bounds.left + umbraInset, bounds.right - umbraInset};
+ float xMid[4] = {bounds.left + outerRadius, bounds.right - outerRadius,
+ bounds.left + outerRadius, bounds.right - outerRadius};
+ float xOuter[4] = {bounds.left, bounds.right, bounds.left, bounds.right};
+ float yInner[4] = {bounds.top + umbraInset, bounds.top + umbraInset, bounds.bottom - umbraInset,
+ bounds.bottom - umbraInset};
+ float yMid[4] = {bounds.top + outerRadius, bounds.top + outerRadius,
+ bounds.bottom - outerRadius, bounds.bottom - outerRadius};
+ float yOuter[4] = {bounds.top, bounds.top, bounds.bottom, bounds.bottom};
+
+ float blurRadius = args.fBlurRadius;
+
+ // In the case where we have to inset more for the umbra, our two triangles in the
+ // corner get skewed to a diamond rather than a square. To correct for that,
+ // we also skew the vectors we send to the shader that help define the circle.
+ // By doing so, we end up with a quarter circle in the corner rather than the
+ // elliptical curve.
+
+ // This is a bit magical, but it gives us the correct results at extrema:
+ // a) umbraInset == outerRadius produces an orthogonal vector
+ // b) outerRadius == 0 produces a diagonal vector
+ // And visually the corner looks correct.
+ vec2 outerVec = vec2(outerRadius - umbraInset, -outerRadius - umbraInset);
+ outerVec = normalize(outerVec);
+ // We want the circle edge to fall fractionally along the diagonal at
+ // (sqrt(2)*(umbraInset - outerRadius) + outerRadius)/sqrt(2)*umbraInset
+ //
+ // Setting the components of the diagonal offset to the following value will give us that.
+ float diagVal = umbraInset / (SK_ScalarSqrt2 * (outerRadius - umbraInset) - outerRadius);
+ vec2 diagVec = vec2(diagVal, diagVal);
+ float distanceCorrection = umbraInset / blurRadius;
+
+ int vertexCount = 0;
+ // build corner by corner
+ for (int i = 0; i < 4; ++i) {
+ // inner point
+ position[vertexCount] = vec2(xInner[i], yInner[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // outer points
+ position[vertexCount] = vec2(xOuter[i], yInner[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, -1, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xOuter[i], yMid[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(outerVec.x, outerVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xOuter[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(diagVec.x, diagVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xMid[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(outerVec.x, outerVec.y, distanceCorrection);
+ vertexCount++;
+
+ position[vertexCount] = vec2(xInner[i], yOuter[i]);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, -1, distanceCorrection);
+ vertexCount++;
+ }
+
+ // Add the additional vertices for overstroked rrects.
+ // Effectively this is an additional stroked rrect, with its
+ // parameters equal to those in the center of the 9-patch. This will
+ // give constant values across this inner ring.
+ if (kOverstroke_RRectType == args.fType) {
+ float inset = umbraInset + args.fInnerRadius;
+
+ // TL
+ position[vertexCount] = vec2(bounds.left + inset, bounds.top + inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // TR
+ position[vertexCount] = vec2(bounds.right - inset, bounds.top + inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // BL
+ position[vertexCount] = vec2(bounds.left + inset, bounds.bottom - inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+
+ // BR
+ position[vertexCount] = vec2(bounds.right - inset, bounds.bottom - inset);
+ shadowColor[vertexCount] = color;
+ shadowParams[vertexCount] = vec3(0, 0, distanceCorrection);
+ vertexCount++;
+ }
+}
+
+int getVertexCountForGeometry(const Geometry& shadowGeometry) {
+ if (shadowGeometry.fIsCircle) {
+ return circle_type_to_vert_count(shadowGeometry.fType);
+ }
+
+ return rrect_type_to_vert_count(shadowGeometry.fType);
+}
+
+int getIndexCountForGeometry(const Geometry& shadowGeometry) {
+ if (shadowGeometry.fIsCircle) {
+ return circle_type_to_index_count(kStroke_RRectType == shadowGeometry.fType);
+ }
+
+ return rrect_type_to_index_count(shadowGeometry.fType);
+}
+
+void fillVerticesForGeometry(const Geometry& shadowGeometry, int /* vertexCount */,
+ Mesh::VertexArray<vec2> position, Mesh::VertexArray<vec4> shadowColor,
+ Mesh::VertexArray<vec3> shadowParams) {
+ if (shadowGeometry.fIsCircle) {
+ fillInCircleVerts(shadowGeometry, shadowGeometry.fIsStroked, position, shadowColor,
+ shadowParams);
+ } else {
+ fillInRRectVerts(shadowGeometry, position, shadowColor, shadowParams);
+ }
+}
+
+void fillIndicesForGeometry(const Geometry& shadowGeometry, int indexCount,
+ int startingVertexOffset, uint16_t* indices) {
+ if (shadowGeometry.fIsCircle) {
+ const uint16_t* primIndices = circle_type_to_indices(shadowGeometry.fIsStroked);
+ for (int i = 0; i < indexCount; ++i) {
+ indices[i] = primIndices[i] + startingVertexOffset;
+ }
+ } else {
+ const uint16_t* primIndices = rrect_type_to_indices(shadowGeometry.fType);
+ for (int i = 0; i < indexCount; ++i) {
+ indices[i] = primIndices[i] + startingVertexOffset;
+ }
+ }
+}
+
+inline void GetSpotParams(float occluderZ, float lightX, float lightY, float lightZ,
+ float lightRadius, float& blurRadius, float& scale, vec2& translate) {
+ float zRatio = divide_and_pin(occluderZ, lightZ - occluderZ, 0.0f, 0.95f);
+ blurRadius = lightRadius * zRatio;
+ scale = divide_and_pin(lightZ, lightZ - occluderZ, 1.0f, 1.95f);
+ translate.x = -zRatio * lightX;
+ translate.y = -zRatio * lightY;
+}
+
+static std::unique_ptr<Geometry> getShadowGeometry(const vec4& color, const FloatRect& devRect,
+ float devRadius, float blurRadius,
+ float insetWidth) {
+ // An insetWidth > 1/2 rect width or height indicates a simple fill.
+ const bool isCircle = ((devRadius >= devRect.getWidth()) && (devRadius >= devRect.getHeight()));
+
+ FloatRect bounds = devRect;
+ float innerRadius = 0.0f;
+ float outerRadius = devRadius;
+ float umbraInset;
+
+ RRectType type = kFill_RRectType;
+ if (isCircle) {
+ umbraInset = 0;
+ } else {
+ umbraInset = std::max(outerRadius, blurRadius);
+ }
+
+ // If stroke is greater than width or height, this is still a fill,
+ // otherwise we compute stroke params.
+ if (isCircle) {
+ innerRadius = devRadius - insetWidth;
+ type = innerRadius > 0 ? kStroke_RRectType : kFill_RRectType;
+ } else {
+ if (insetWidth <= 0.5f * std::min(devRect.getWidth(), devRect.getHeight())) {
+ // We don't worry about a real inner radius, we just need to know if we
+ // need to create overstroke vertices.
+ innerRadius = std::max(insetWidth - umbraInset, 0.0f);
+ type = innerRadius > 0 ? kOverstroke_RRectType : kStroke_RRectType;
+ }
+ }
+ const bool isStroked = (kStroke_RRectType == type);
+ return std::make_unique<Geometry>(Geometry{color, outerRadius, umbraInset, innerRadius,
+ blurRadius, bounds, type, isCircle, isStroked});
+}
+
+std::unique_ptr<Geometry> getAmbientShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent,
+ const vec4& ambientColor) {
+ float devSpaceInsetWidth = AmbientBlurRadius(casterZ);
+ const float umbraRecipAlpha = AmbientRecipAlpha(casterZ);
+ const float devSpaceAmbientBlur = devSpaceInsetWidth * umbraRecipAlpha;
+
+ // Outset the shadow rrect to the border of the penumbra
+ float ambientPathOutset = devSpaceInsetWidth;
+ FloatRect outsetRect(casterRect);
+ outsetRect.left -= ambientPathOutset;
+ outsetRect.top -= ambientPathOutset;
+ outsetRect.right += ambientPathOutset;
+ outsetRect.bottom += ambientPathOutset;
+
+ float outsetRad = casterCornerRadius + ambientPathOutset;
+ if (casterIsTranslucent) {
+ // set a large inset to force a fill
+ devSpaceInsetWidth = outsetRect.getWidth();
+ }
+
+ return getShadowGeometry(ambientColor, outsetRect, std::abs(outsetRad), devSpaceAmbientBlur,
+ std::abs(devSpaceInsetWidth));
+}
+
+std::unique_ptr<Geometry> getSpotShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& spotColor,
+ const vec3& lightPosition, float lightRadius) {
+ float devSpaceSpotBlur;
+ float spotScale;
+ vec2 spotOffset;
+ GetSpotParams(casterZ, lightPosition.x, lightPosition.y, lightPosition.z, lightRadius,
+ devSpaceSpotBlur, spotScale, spotOffset);
+ // handle scale of radius due to CTM
+ const float srcSpaceSpotBlur = devSpaceSpotBlur;
+
+ // Adjust translate for the effect of the scale.
+ spotOffset.x += spotScale;
+ spotOffset.y += spotScale;
+
+ // Compute the transformed shadow rect
+ ui::Transform shadowTransform;
+ shadowTransform.set(spotOffset.x, spotOffset.y);
+ shadowTransform.set(spotScale, 0, 0, spotScale);
+ FloatRect spotShadowRect = shadowTransform.transform(casterRect);
+ float spotShadowRadius = casterCornerRadius * spotScale;
+
+ // Compute the insetWidth
+ float blurOutset = srcSpaceSpotBlur;
+ float insetWidth = blurOutset;
+ if (casterIsTranslucent) {
+ // If transparent, just do a fill
+ insetWidth += spotShadowRect.getWidth();
+ } else {
+ // For shadows, instead of using a stroke we specify an inset from the penumbra
+ // border. We want to extend this inset area so that it meets up with the caster
+ // geometry. The inset geometry will by default already be inset by the blur width.
+ //
+ // We compare the min and max corners inset by the radius between the original
+ // rrect and the shadow rrect. The distance between the two plus the difference
+ // between the scaled radius and the original radius gives the distance from the
+ // transformed shadow shape to the original shape in that corner. The max
+ // of these gives the maximum distance we need to cover.
+ //
+ // Since we are outsetting by 1/2 the blur distance, we just add the maxOffset to
+ // that to get the full insetWidth.
+ float maxOffset;
+ if (casterCornerRadius <= 0.f) {
+ // Manhattan distance works better for rects
+ maxOffset = std::max(std::max(std::abs(spotShadowRect.left - casterRect.left),
+ std::abs(spotShadowRect.top - casterRect.top)),
+ std::max(std::abs(spotShadowRect.right - casterRect.right),
+ std::abs(spotShadowRect.bottom - casterRect.bottom)));
+ } else {
+ float dr = spotShadowRadius - casterCornerRadius;
+ vec2 upperLeftOffset = vec2(spotShadowRect.left - casterRect.left + dr,
+ spotShadowRect.top - casterRect.top + dr);
+ vec2 lowerRightOffset = vec2(spotShadowRect.right - casterRect.right - dr,
+ spotShadowRect.bottom - casterRect.bottom - dr);
+ maxOffset = sqrt(std::max(dot(upperLeftOffset, lowerRightOffset),
+ dot(lowerRightOffset, lowerRightOffset))) +
+ dr;
+ }
+ insetWidth += std::max(blurOutset, maxOffset);
+ }
+
+ // Outset the shadow rrect to the border of the penumbra
+ spotShadowRadius += blurOutset;
+ spotShadowRect.left -= blurOutset;
+ spotShadowRect.top -= blurOutset;
+ spotShadowRect.right += blurOutset;
+ spotShadowRect.bottom += blurOutset;
+
+ return getShadowGeometry(spotColor, spotShadowRect, std::abs(spotShadowRadius),
+ 2.0f * devSpaceSpotBlur, std::abs(insetWidth));
+}
+
+void fillShadowTextureData(uint8_t* data, size_t shadowTextureWidth) {
+ for (int i = 0; i < shadowTextureWidth; i++) {
+ const float d = 1 - i / ((shadowTextureWidth * 1.0f) - 1.0f);
+ data[i] = static_cast<uint8_t>((exp(-4.0f * d * d) - 0.018f) * 255);
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h
new file mode 100644
index 0000000..912c8bb
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLSkiaShadowPort.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <math/vec4.h>
+#include <renderengine/Mesh.h>
+#include <ui/Rect.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * The shadow geometry logic and vertex generation code has been ported from skia shadow
+ * fast path OpenGL implementation to draw shadows around rects and rounded rects including
+ * circles.
+ *
+ * path: skia/src/gpu/GrRenderTargetContext.cpp GrRenderTargetContext::drawFastShadow
+ *
+ * Modifications made:
+ * - Switched to using std lib math functions
+ * - Fall off function is implemented in vertex shader rather than a shadow texture
+ * - Removed transformations applied on the caster rect since the caster will be in local
+ * coordinate space and will be transformed by the vertex shader.
+ */
+
+enum RRectType {
+ kFill_RRectType,
+ kStroke_RRectType,
+ kOverstroke_RRectType,
+};
+
+struct Geometry {
+ vec4 fColor;
+ float fOuterRadius;
+ float fUmbraInset;
+ float fInnerRadius;
+ float fBlurRadius;
+ FloatRect fDevBounds;
+ RRectType fType;
+ bool fIsCircle;
+ bool fIsStroked;
+};
+
+std::unique_ptr<Geometry> getSpotShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent, const vec4& spotColor,
+ const vec3& lightPosition, float lightRadius);
+
+std::unique_ptr<Geometry> getAmbientShadowGeometry(const FloatRect& casterRect,
+ float casterCornerRadius, float casterZ,
+ bool casterIsTranslucent,
+ const vec4& ambientColor);
+
+int getVertexCountForGeometry(const Geometry& shadowGeometry);
+
+int getIndexCountForGeometry(const Geometry& shadowGeometry);
+
+void fillVerticesForGeometry(const Geometry& shadowGeometry, int vertexCount,
+ Mesh::VertexArray<vec2> position, Mesh::VertexArray<vec4> shadowColor,
+ Mesh::VertexArray<vec3> shadowParams);
+
+void fillIndicesForGeometry(const Geometry& shadowGeometry, int indexCount,
+ int startingVertexOffset, uint16_t* indices);
+
+/**
+ * Maps shadow geometry 'alpha' varying (1 for darkest, 0 for transparent) to
+ * darkness at that spot. Values are determined by an exponential falloff
+ * function provided by UX.
+ *
+ * The texture is used for quick lookup in theshadow shader.
+ *
+ * textureData - filled with shadow texture data that needs to be at least of
+ * size textureWidth
+ *
+ * textureWidth - width of the texture, height is always 1
+ */
+void fillShadowTextureData(uint8_t* textureData, size_t textureWidth);
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp b/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp
new file mode 100644
index 0000000..e50c471
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLVertexBuffer.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "GLVertexBuffer.h"
+
+#include <GLES/gl.h>
+#include <GLES2/gl2.h>
+#include <nativebase/nativebase.h>
+#include <utils/Trace.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GLVertexBuffer::GLVertexBuffer() {
+ glGenBuffers(1, &mBufferName);
+}
+
+GLVertexBuffer::~GLVertexBuffer() {
+ glDeleteBuffers(1, &mBufferName);
+}
+
+void GLVertexBuffer::allocateBuffers(const GLfloat data[], const GLuint size) {
+ ATRACE_CALL();
+ bind();
+ glBufferData(GL_ARRAY_BUFFER, size * sizeof(GLfloat), data, GL_STATIC_DRAW);
+ unbind();
+}
+
+void GLVertexBuffer::bind() const {
+ glBindBuffer(GL_ARRAY_BUFFER, mBufferName);
+}
+
+void GLVertexBuffer::unbind() const {
+ glBindBuffer(GL_ARRAY_BUFFER, 0);
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/GLVertexBuffer.h b/media/libstagefright/renderfright/gl/GLVertexBuffer.h
new file mode 100644
index 0000000..c0fd0c1
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/GLVertexBuffer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES2/gl2.h>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class GLVertexBuffer {
+public:
+ explicit GLVertexBuffer();
+ ~GLVertexBuffer();
+
+ void allocateBuffers(const GLfloat data[], const GLuint size);
+ uint32_t getBufferName() const { return mBufferName; }
+ void bind() const;
+ void unbind() const;
+
+private:
+ uint32_t mBufferName;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ImageManager.cpp b/media/libstagefright/renderfright/gl/ImageManager.cpp
new file mode 100644
index 0000000..6256649
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ImageManager.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#undef LOG_TAG
+#define LOG_TAG "RenderEngine"
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <pthread.h>
+
+#include <processgroup/sched_policy.h>
+#include <utils/Trace.h>
+#include "GLESRenderEngine.h"
+#include "ImageManager.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+ImageManager::ImageManager(GLESRenderEngine* engine) : mEngine(engine) {}
+
+void ImageManager::initThread() {
+ mThread = std::thread([this]() { threadMain(); });
+ pthread_setname_np(mThread.native_handle(), "ImageManager");
+ // Use SCHED_FIFO to minimize jitter
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (pthread_setschedparam(mThread.native_handle(), SCHED_FIFO, ¶m) != 0) {
+ ALOGE("Couldn't set SCHED_FIFO for ImageManager");
+ }
+}
+
+ImageManager::~ImageManager() {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mRunning = false;
+ }
+ mCondition.notify_all();
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+void ImageManager::cacheAsync(const sp<GraphicBuffer>& buffer,
+ const std::shared_ptr<Barrier>& barrier) {
+ if (buffer == nullptr) {
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ barrier->isOpen = true;
+ barrier->result = BAD_VALUE;
+ }
+ barrier->condition.notify_one();
+ return;
+ }
+ ATRACE_CALL();
+ QueueEntry entry = {QueueEntry::Operation::Insert, buffer, buffer->getId(), barrier};
+ queueOperation(std::move(entry));
+}
+
+status_t ImageManager::cache(const sp<GraphicBuffer>& buffer) {
+ ATRACE_CALL();
+ auto barrier = std::make_shared<Barrier>();
+ cacheAsync(buffer, barrier);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ barrier->condition.wait(barrier->mutex,
+ [&]() REQUIRES(barrier->mutex) { return barrier->isOpen; });
+ return barrier->result;
+}
+
+void ImageManager::releaseAsync(uint64_t bufferId, const std::shared_ptr<Barrier>& barrier) {
+ ATRACE_CALL();
+ QueueEntry entry = {QueueEntry::Operation::Delete, nullptr, bufferId, barrier};
+ queueOperation(std::move(entry));
+}
+
+void ImageManager::queueOperation(const QueueEntry&& entry) {
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mQueue.emplace(entry);
+ ATRACE_INT("ImageManagerQueueDepth", mQueue.size());
+ }
+ mCondition.notify_one();
+}
+
+void ImageManager::threadMain() {
+ set_sched_policy(0, SP_FOREGROUND);
+ bool run;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ run = mRunning;
+ }
+ while (run) {
+ QueueEntry entry;
+ {
+ std::lock_guard<std::mutex> lock(mMutex);
+ mCondition.wait(mMutex,
+ [&]() REQUIRES(mMutex) { return !mQueue.empty() || !mRunning; });
+ run = mRunning;
+
+ if (!mRunning) {
+ // if mRunning is false, then ImageManager is being destroyed, so
+ // bail out now.
+ break;
+ }
+
+ entry = mQueue.front();
+ mQueue.pop();
+ ATRACE_INT("ImageManagerQueueDepth", mQueue.size());
+ }
+
+ status_t result = NO_ERROR;
+ switch (entry.op) {
+ case QueueEntry::Operation::Delete:
+ mEngine->unbindExternalTextureBufferInternal(entry.bufferId);
+ break;
+ case QueueEntry::Operation::Insert:
+ result = mEngine->cacheExternalTextureBufferInternal(entry.buffer);
+ break;
+ }
+ if (entry.barrier != nullptr) {
+ {
+ std::lock_guard<std::mutex> entryLock(entry.barrier->mutex);
+ entry.barrier->result = result;
+ entry.barrier->isOpen = true;
+ }
+ entry.barrier->condition.notify_one();
+ }
+ }
+
+ ALOGD("Reached end of threadMain, terminating ImageManager thread!");
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ImageManager.h b/media/libstagefright/renderfright/gl/ImageManager.h
new file mode 100644
index 0000000..be67de8
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ImageManager.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include <ui/GraphicBuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GLESRenderEngine;
+
+class ImageManager {
+public:
+ struct Barrier {
+ std::mutex mutex;
+ std::condition_variable_any condition;
+ bool isOpen GUARDED_BY(mutex) = false;
+ status_t result GUARDED_BY(mutex) = NO_ERROR;
+ };
+ ImageManager(GLESRenderEngine* engine);
+ ~ImageManager();
+ // Starts the background thread for the ImageManager
+ // We need this to guarantee that the class is fully-constructed before the
+ // thread begins running.
+ void initThread();
+ void cacheAsync(const sp<GraphicBuffer>& buffer, const std::shared_ptr<Barrier>& barrier)
+ EXCLUDES(mMutex);
+ status_t cache(const sp<GraphicBuffer>& buffer);
+ void releaseAsync(uint64_t bufferId, const std::shared_ptr<Barrier>& barrier) EXCLUDES(mMutex);
+
+private:
+ struct QueueEntry {
+ enum class Operation { Delete, Insert };
+
+ Operation op = Operation::Delete;
+ sp<GraphicBuffer> buffer = nullptr;
+ uint64_t bufferId = 0;
+ std::shared_ptr<Barrier> barrier = nullptr;
+ };
+
+ void queueOperation(const QueueEntry&& entry);
+ void threadMain();
+ GLESRenderEngine* const mEngine;
+ std::thread mThread;
+ std::condition_variable_any mCondition;
+ std::mutex mMutex;
+ std::queue<QueueEntry> mQueue GUARDED_BY(mMutex);
+
+ bool mRunning GUARDED_BY(mMutex) = true;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/Program.cpp b/media/libstagefright/renderfright/gl/Program.cpp
new file mode 100644
index 0000000..f4fbf35
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/Program.cpp
@@ -0,0 +1,163 @@
+/*Gluint
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Program.h"
+
+#include <stdint.h>
+
+#include <log/log.h>
+#include <math/mat4.h>
+#include <utils/String8.h>
+#include "ProgramCache.h"
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+Program::Program(const ProgramCache::Key& /*needs*/, const char* vertex, const char* fragment)
+ : mInitialized(false) {
+ GLuint vertexId = buildShader(vertex, GL_VERTEX_SHADER);
+ GLuint fragmentId = buildShader(fragment, GL_FRAGMENT_SHADER);
+ GLuint programId = glCreateProgram();
+ glAttachShader(programId, vertexId);
+ glAttachShader(programId, fragmentId);
+ glBindAttribLocation(programId, position, "position");
+ glBindAttribLocation(programId, texCoords, "texCoords");
+ glBindAttribLocation(programId, cropCoords, "cropCoords");
+ glBindAttribLocation(programId, shadowColor, "shadowColor");
+ glBindAttribLocation(programId, shadowParams, "shadowParams");
+ glLinkProgram(programId);
+
+ GLint status;
+ glGetProgramiv(programId, GL_LINK_STATUS, &status);
+ if (status != GL_TRUE) {
+ ALOGE("Error while linking shaders:");
+ GLint infoLen = 0;
+ glGetProgramiv(programId, GL_INFO_LOG_LENGTH, &infoLen);
+ if (infoLen > 1) {
+ GLchar log[infoLen];
+ glGetProgramInfoLog(programId, infoLen, 0, &log[0]);
+ ALOGE("%s", log);
+ }
+ glDetachShader(programId, vertexId);
+ glDetachShader(programId, fragmentId);
+ glDeleteShader(vertexId);
+ glDeleteShader(fragmentId);
+ glDeleteProgram(programId);
+ } else {
+ mProgram = programId;
+ mVertexShader = vertexId;
+ mFragmentShader = fragmentId;
+ mInitialized = true;
+ mProjectionMatrixLoc = glGetUniformLocation(programId, "projection");
+ mTextureMatrixLoc = glGetUniformLocation(programId, "texture");
+ mSamplerLoc = glGetUniformLocation(programId, "sampler");
+ mColorLoc = glGetUniformLocation(programId, "color");
+ mDisplayMaxLuminanceLoc = glGetUniformLocation(programId, "displayMaxLuminance");
+ mMaxMasteringLuminanceLoc = glGetUniformLocation(programId, "maxMasteringLuminance");
+ mMaxContentLuminanceLoc = glGetUniformLocation(programId, "maxContentLuminance");
+ mInputTransformMatrixLoc = glGetUniformLocation(programId, "inputTransformMatrix");
+ mOutputTransformMatrixLoc = glGetUniformLocation(programId, "outputTransformMatrix");
+ mCornerRadiusLoc = glGetUniformLocation(programId, "cornerRadius");
+ mCropCenterLoc = glGetUniformLocation(programId, "cropCenter");
+
+ // set-up the default values for our uniforms
+ glUseProgram(programId);
+ glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, mat4().asArray());
+ glEnableVertexAttribArray(0);
+ }
+}
+
+bool Program::isValid() const {
+ return mInitialized;
+}
+
+void Program::use() {
+ glUseProgram(mProgram);
+}
+
+GLuint Program::getAttrib(const char* name) const {
+ // TODO: maybe use a local cache
+ return glGetAttribLocation(mProgram, name);
+}
+
+GLint Program::getUniform(const char* name) const {
+ // TODO: maybe use a local cache
+ return glGetUniformLocation(mProgram, name);
+}
+
+GLuint Program::buildShader(const char* source, GLenum type) {
+ GLuint shader = glCreateShader(type);
+ glShaderSource(shader, 1, &source, 0);
+ glCompileShader(shader);
+ GLint status;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
+ if (status != GL_TRUE) {
+ // Some drivers return wrong values for GL_INFO_LOG_LENGTH
+ // use a fixed size instead
+ GLchar log[512];
+ glGetShaderInfoLog(shader, sizeof(log), 0, log);
+ ALOGE("Error while compiling shader: \n%s\n%s", source, log);
+ glDeleteShader(shader);
+ return 0;
+ }
+ return shader;
+}
+
+void Program::setUniforms(const Description& desc) {
+ // TODO: we should have a mechanism here to not always reset uniforms that
+ // didn't change for this program.
+
+ if (mSamplerLoc >= 0) {
+ glUniform1i(mSamplerLoc, 0);
+ glUniformMatrix4fv(mTextureMatrixLoc, 1, GL_FALSE, desc.texture.getMatrix().asArray());
+ }
+ if (mColorLoc >= 0) {
+ const float color[4] = {desc.color.r, desc.color.g, desc.color.b, desc.color.a};
+ glUniform4fv(mColorLoc, 1, color);
+ }
+ if (mInputTransformMatrixLoc >= 0) {
+ mat4 inputTransformMatrix = desc.inputTransformMatrix;
+ glUniformMatrix4fv(mInputTransformMatrixLoc, 1, GL_FALSE, inputTransformMatrix.asArray());
+ }
+ if (mOutputTransformMatrixLoc >= 0) {
+ // The output transform matrix and color matrix can be combined as one matrix
+ // that is applied right before applying OETF.
+ mat4 outputTransformMatrix = desc.colorMatrix * desc.outputTransformMatrix;
+ glUniformMatrix4fv(mOutputTransformMatrixLoc, 1, GL_FALSE, outputTransformMatrix.asArray());
+ }
+ if (mDisplayMaxLuminanceLoc >= 0) {
+ glUniform1f(mDisplayMaxLuminanceLoc, desc.displayMaxLuminance);
+ }
+ if (mMaxMasteringLuminanceLoc >= 0) {
+ glUniform1f(mMaxMasteringLuminanceLoc, desc.maxMasteringLuminance);
+ }
+ if (mMaxContentLuminanceLoc >= 0) {
+ glUniform1f(mMaxContentLuminanceLoc, desc.maxContentLuminance);
+ }
+ if (mCornerRadiusLoc >= 0) {
+ glUniform1f(mCornerRadiusLoc, desc.cornerRadius);
+ }
+ if (mCropCenterLoc >= 0) {
+ glUniform2f(mCropCenterLoc, desc.cropSize.x / 2.0f, desc.cropSize.y / 2.0f);
+ }
+ // these uniforms are always present
+ glUniformMatrix4fv(mProjectionMatrixLoc, 1, GL_FALSE, desc.projectionMatrix.asArray());
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/Program.h b/media/libstagefright/renderfright/gl/Program.h
new file mode 100644
index 0000000..fc3755e
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/Program.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_PROGRAM_H
+#define SF_RENDER_ENGINE_PROGRAM_H
+
+#include <stdint.h>
+
+#include <GLES2/gl2.h>
+#include <renderengine/private/Description.h>
+#include "ProgramCache.h"
+
+namespace android {
+
+class String8;
+
+namespace renderengine {
+namespace gl {
+
+/*
+ * Abstracts a GLSL program comprising a vertex and fragment shader
+ */
+class Program {
+public:
+ // known locations for position and texture coordinates
+ enum {
+ /* position of each vertex for vertex shader */
+ position = 0,
+
+ /* UV coordinates for texture mapping */
+ texCoords = 1,
+
+ /* Crop coordinates, in pixels */
+ cropCoords = 2,
+
+ /* Shadow color */
+ shadowColor = 3,
+
+ /* Shadow params */
+ shadowParams = 4,
+ };
+
+ Program(const ProgramCache::Key& needs, const char* vertex, const char* fragment);
+ ~Program() = default;
+
+ /* whether this object is usable */
+ bool isValid() const;
+
+ /* Binds this program to the GLES context */
+ void use();
+
+ /* Returns the location of the specified attribute */
+ GLuint getAttrib(const char* name) const;
+
+ /* Returns the location of the specified uniform */
+ GLint getUniform(const char* name) const;
+
+ /* set-up uniforms from the description */
+ void setUniforms(const Description& desc);
+
+private:
+ GLuint buildShader(const char* source, GLenum type);
+
+ // whether the initialization succeeded
+ bool mInitialized;
+
+ // Name of the OpenGL program and shaders
+ GLuint mProgram;
+ GLuint mVertexShader;
+ GLuint mFragmentShader;
+
+ /* location of the projection matrix uniform */
+ GLint mProjectionMatrixLoc;
+
+ /* location of the texture matrix uniform */
+ GLint mTextureMatrixLoc;
+
+ /* location of the sampler uniform */
+ GLint mSamplerLoc;
+
+ /* location of the color uniform */
+ GLint mColorLoc;
+
+ /* location of display luminance uniform */
+ GLint mDisplayMaxLuminanceLoc;
+ /* location of max mastering luminance uniform */
+ GLint mMaxMasteringLuminanceLoc;
+ /* location of max content luminance uniform */
+ GLint mMaxContentLuminanceLoc;
+
+ /* location of transform matrix */
+ GLint mInputTransformMatrixLoc;
+ GLint mOutputTransformMatrixLoc;
+
+ /* location of corner radius uniform */
+ GLint mCornerRadiusLoc;
+
+ /* location of surface crop origin uniform, for rounded corner clipping */
+ GLint mCropCenterLoc;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_PROGRAM_H */
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.cpp b/media/libstagefright/renderfright/gl/ProgramCache.cpp
new file mode 100644
index 0000000..3ae35ec
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ProgramCache.cpp
@@ -0,0 +1,800 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "ProgramCache.h"
+
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+#include <log/log.h>
+#include <renderengine/private/Description.h>
+#include <utils/String8.h>
+#include <utils/Trace.h>
+#include "Program.h"
+
+ANDROID_SINGLETON_STATIC_INSTANCE(android::renderengine::gl::ProgramCache)
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/*
+ * A simple formatter class to automatically add the endl and
+ * manage the indentation.
+ */
+
+class Formatter;
+static Formatter& indent(Formatter& f);
+static Formatter& dedent(Formatter& f);
+
+class Formatter {
+ String8 mString;
+ int mIndent;
+ typedef Formatter& (*FormaterManipFunc)(Formatter&);
+ friend Formatter& indent(Formatter& f);
+ friend Formatter& dedent(Formatter& f);
+
+public:
+ Formatter() : mIndent(0) {}
+
+ String8 getString() const { return mString; }
+
+ friend Formatter& operator<<(Formatter& out, const char* in) {
+ for (int i = 0; i < out.mIndent; i++) {
+ out.mString.append(" ");
+ }
+ out.mString.append(in);
+ out.mString.append("\n");
+ return out;
+ }
+ friend inline Formatter& operator<<(Formatter& out, const String8& in) {
+ return operator<<(out, in.string());
+ }
+ friend inline Formatter& operator<<(Formatter& to, FormaterManipFunc func) {
+ return (*func)(to);
+ }
+};
+Formatter& indent(Formatter& f) {
+ f.mIndent++;
+ return f;
+}
+Formatter& dedent(Formatter& f) {
+ f.mIndent--;
+ return f;
+}
+
+void ProgramCache::primeCache(
+ EGLContext context, bool useColorManagement, bool toneMapperShaderOnly) {
+ auto& cache = mCaches[context];
+ uint32_t shaderCount = 0;
+
+ if (toneMapperShaderOnly) {
+ Key shaderKey;
+ // base settings used by HDR->SDR tonemap only
+ shaderKey.set(Key::BLEND_MASK | Key::INPUT_TRANSFORM_MATRIX_MASK |
+ Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::OUTPUT_TF_MASK |
+ Key::OPACITY_MASK | Key::ALPHA_MASK |
+ Key::ROUNDED_CORNERS_MASK | Key::TEXTURE_MASK,
+ Key::BLEND_NORMAL | Key::INPUT_TRANSFORM_MATRIX_ON |
+ Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::OUTPUT_TF_SRGB |
+ Key::OPACITY_OPAQUE | Key::ALPHA_EQ_ONE |
+ Key::ROUNDED_CORNERS_OFF | Key::TEXTURE_EXT);
+ for (int i = 0; i < 4; i++) {
+ // Cache input transfer for HLG & ST2084
+ shaderKey.set(Key::INPUT_TF_MASK, (i & 1) ?
+ Key::INPUT_TF_HLG : Key::INPUT_TF_ST2084);
+
+ // Cache Y410 input on or off
+ shaderKey.set(Key::Y410_BT2020_MASK, (i & 2) ?
+ Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+ return;
+ }
+
+ uint32_t keyMask = Key::BLEND_MASK | Key::OPACITY_MASK | Key::ALPHA_MASK | Key::TEXTURE_MASK
+ | Key::ROUNDED_CORNERS_MASK;
+ // Prime the cache for all combinations of the above masks,
+ // leaving off the experimental color matrix mask options.
+
+ nsecs_t timeBefore = systemTime();
+ for (uint32_t keyVal = 0; keyVal <= keyMask; keyVal++) {
+ Key shaderKey;
+ shaderKey.set(keyMask, keyVal);
+ uint32_t tex = shaderKey.getTextureTarget();
+ if (tex != Key::TEXTURE_OFF && tex != Key::TEXTURE_EXT && tex != Key::TEXTURE_2D) {
+ continue;
+ }
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+
+ // Prime for sRGB->P3 conversion
+ if (useColorManagement) {
+ Key shaderKey;
+ shaderKey.set(Key::BLEND_MASK | Key::OUTPUT_TRANSFORM_MATRIX_MASK | Key::INPUT_TF_MASK |
+ Key::OUTPUT_TF_MASK,
+ Key::BLEND_PREMULT | Key::OUTPUT_TRANSFORM_MATRIX_ON | Key::INPUT_TF_SRGB |
+ Key::OUTPUT_TF_SRGB);
+ for (int i = 0; i < 16; i++) {
+ shaderKey.set(Key::OPACITY_MASK,
+ (i & 1) ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT);
+ shaderKey.set(Key::ALPHA_MASK, (i & 2) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE);
+
+ // Cache rounded corners
+ shaderKey.set(Key::ROUNDED_CORNERS_MASK,
+ (i & 4) ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF);
+
+ // Cache texture off option for window transition
+ shaderKey.set(Key::TEXTURE_MASK, (i & 8) ? Key::TEXTURE_EXT : Key::TEXTURE_OFF);
+ if (cache.count(shaderKey) == 0) {
+ cache.emplace(shaderKey, generateProgram(shaderKey));
+ shaderCount++;
+ }
+ }
+ }
+
+ nsecs_t timeAfter = systemTime();
+ float compileTimeMs = static_cast<float>(timeAfter - timeBefore) / 1.0E6;
+ ALOGD("shader cache generated - %u shaders in %f ms\n", shaderCount, compileTimeMs);
+}
+
+ProgramCache::Key ProgramCache::computeKey(const Description& description) {
+ Key needs;
+ needs.set(Key::TEXTURE_MASK,
+ !description.textureEnabled
+ ? Key::TEXTURE_OFF
+ : description.texture.getTextureTarget() == GL_TEXTURE_EXTERNAL_OES
+ ? Key::TEXTURE_EXT
+ : description.texture.getTextureTarget() == GL_TEXTURE_2D
+ ? Key::TEXTURE_2D
+ : Key::TEXTURE_OFF)
+ .set(Key::ALPHA_MASK, (description.color.a < 1) ? Key::ALPHA_LT_ONE : Key::ALPHA_EQ_ONE)
+ .set(Key::BLEND_MASK,
+ description.isPremultipliedAlpha ? Key::BLEND_PREMULT : Key::BLEND_NORMAL)
+ .set(Key::OPACITY_MASK,
+ description.isOpaque ? Key::OPACITY_OPAQUE : Key::OPACITY_TRANSLUCENT)
+ .set(Key::Key::INPUT_TRANSFORM_MATRIX_MASK,
+ description.hasInputTransformMatrix() ? Key::INPUT_TRANSFORM_MATRIX_ON
+ : Key::INPUT_TRANSFORM_MATRIX_OFF)
+ .set(Key::Key::OUTPUT_TRANSFORM_MATRIX_MASK,
+ description.hasOutputTransformMatrix() || description.hasColorMatrix()
+ ? Key::OUTPUT_TRANSFORM_MATRIX_ON
+ : Key::OUTPUT_TRANSFORM_MATRIX_OFF)
+ .set(Key::ROUNDED_CORNERS_MASK,
+ description.cornerRadius > 0 ? Key::ROUNDED_CORNERS_ON : Key::ROUNDED_CORNERS_OFF)
+ .set(Key::SHADOW_MASK, description.drawShadows ? Key::SHADOW_ON : Key::SHADOW_OFF);
+ needs.set(Key::Y410_BT2020_MASK,
+ description.isY410BT2020 ? Key::Y410_BT2020_ON : Key::Y410_BT2020_OFF);
+
+ if (needs.hasTransformMatrix() ||
+ (description.inputTransferFunction != description.outputTransferFunction)) {
+ switch (description.inputTransferFunction) {
+ case Description::TransferFunction::LINEAR:
+ default:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_LINEAR);
+ break;
+ case Description::TransferFunction::SRGB:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_SRGB);
+ break;
+ case Description::TransferFunction::ST2084:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_ST2084);
+ break;
+ case Description::TransferFunction::HLG:
+ needs.set(Key::INPUT_TF_MASK, Key::INPUT_TF_HLG);
+ break;
+ }
+
+ switch (description.outputTransferFunction) {
+ case Description::TransferFunction::LINEAR:
+ default:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_LINEAR);
+ break;
+ case Description::TransferFunction::SRGB:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_SRGB);
+ break;
+ case Description::TransferFunction::ST2084:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_ST2084);
+ break;
+ case Description::TransferFunction::HLG:
+ needs.set(Key::OUTPUT_TF_MASK, Key::OUTPUT_TF_HLG);
+ break;
+ }
+ }
+
+ return needs;
+}
+
+// Generate EOTF that converts signal values to relative display light,
+// both normalized to [0, 1].
+void ProgramCache::generateEOTF(Formatter& fs, const Key& needs) {
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_SRGB:
+ fs << R"__SHADER__(
+ float EOTF_sRGB(float srgb) {
+ return srgb <= 0.04045 ? srgb / 12.92 : pow((srgb + 0.055) / 1.055, 2.4);
+ }
+
+ vec3 EOTF_sRGB(const vec3 srgb) {
+ return vec3(EOTF_sRGB(srgb.r), EOTF_sRGB(srgb.g), EOTF_sRGB(srgb.b));
+ }
+
+ vec3 EOTF(const vec3 srgb) {
+ return sign(srgb.rgb) * EOTF_sRGB(abs(srgb.rgb));
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ vec3 EOTF(const highp vec3 color) {
+ const highp float m1 = (2610.0 / 4096.0) / 4.0;
+ const highp float m2 = (2523.0 / 4096.0) * 128.0;
+ const highp float c1 = (3424.0 / 4096.0);
+ const highp float c2 = (2413.0 / 4096.0) * 32.0;
+ const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+ highp vec3 tmp = pow(clamp(color, 0.0, 1.0), 1.0 / vec3(m2));
+ tmp = max(tmp - c1, 0.0) / (c2 - c3 * tmp);
+ return pow(tmp, 1.0 / vec3(m1));
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp float EOTF_channel(const highp float channel) {
+ const highp float a = 0.17883277;
+ const highp float b = 0.28466892;
+ const highp float c = 0.55991073;
+ return channel <= 0.5 ? channel * channel / 3.0 :
+ (exp((channel - c) / a) + b) / 12.0;
+ }
+
+ vec3 EOTF(const highp vec3 color) {
+ return vec3(EOTF_channel(color.r), EOTF_channel(color.g),
+ EOTF_channel(color.b));
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ vec3 EOTF(const vec3 linear) {
+ return linear;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+void ProgramCache::generateToneMappingProcess(Formatter& fs, const Key& needs) {
+ // Convert relative light to absolute light.
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ return color * 10000.0;
+ }
+ )__SHADER__";
+ break;
+ case Key::INPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ // The formula is:
+ // alpha * pow(Y, gamma - 1.0) * color + beta;
+ // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
+ return color * 1000.0 * pow(color.y, 0.2);
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 ScaleLuminance(highp vec3 color) {
+ return color * displayMaxLuminance;
+ }
+ )__SHADER__";
+ break;
+ }
+
+ // Tone map absolute light to display luminance range.
+ switch (needs.getInputTF()) {
+ case Key::INPUT_TF_ST2084:
+ case Key::INPUT_TF_HLG:
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_HLG:
+ // Right now when mixed PQ and HLG contents are presented,
+ // HLG content will always be converted to PQ. However, for
+ // completeness, we simply clamp the value to [0.0, 1000.0].
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return clamp(color, 0.0, 1000.0);
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ float maxMasteringLumi = maxMasteringLuminance;
+ float maxContentLumi = maxContentLuminance;
+ float maxInLumi = min(maxMasteringLumi, maxContentLumi);
+ float maxOutLumi = displayMaxLuminance;
+
+ float nits = color.y;
+
+ // clamp to max input luminance
+ nits = clamp(nits, 0.0, maxInLumi);
+
+ // scale [0.0, maxInLumi] to [0.0, maxOutLumi]
+ if (maxInLumi <= maxOutLumi) {
+ return color * (maxOutLumi / maxInLumi);
+ } else {
+ // three control points
+ const float x0 = 10.0;
+ const float y0 = 17.0;
+ float x1 = maxOutLumi * 0.75;
+ float y1 = x1;
+ float x2 = x1 + (maxInLumi - x1) / 2.0;
+ float y2 = y1 + (maxOutLumi - y1) * 0.75;
+
+ // horizontal distances between the last three control points
+ float h12 = x2 - x1;
+ float h23 = maxInLumi - x2;
+ // tangents at the last three control points
+ float m1 = (y2 - y1) / h12;
+ float m3 = (maxOutLumi - y2) / h23;
+ float m2 = (m1 + m3) / 2.0;
+
+ if (nits < x0) {
+ // scale [0.0, x0] to [0.0, y0] linearly
+ float slope = y0 / x0;
+ return color * slope;
+ } else if (nits < x1) {
+ // scale [x0, x1] to [y0, y1] linearly
+ float slope = (y1 - y0) / (x1 - x0);
+ nits = y0 + (nits - x0) * slope;
+ } else if (nits < x2) {
+ // scale [x1, x2] to [y1, y2] using Hermite interp
+ float t = (nits - x1) / h12;
+ nits = (y1 * (1.0 + 2.0 * t) + h12 * m1 * t) * (1.0 - t) * (1.0 - t) +
+ (y2 * (3.0 - 2.0 * t) + h12 * m2 * (t - 1.0)) * t * t;
+ } else {
+ // scale [x2, maxInLumi] to [y2, maxOutLumi] using Hermite interp
+ float t = (nits - x2) / h23;
+ nits = (y2 * (1.0 + 2.0 * t) + h23 * m2 * t) * (1.0 - t) * (1.0 - t) +
+ (maxOutLumi * (3.0 - 2.0 * t) + h23 * m3 * (t - 1.0)) * t * t;
+ }
+ }
+
+ // color.y is greater than x0 and is thus non-zero
+ return color * (nits / color.y);
+ }
+ )__SHADER__";
+ break;
+ }
+ break;
+ default:
+ // inverse tone map; the output luminance can be up to maxOutLumi.
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ const float maxOutLumi = 3000.0;
+
+ const float x0 = 5.0;
+ const float y0 = 2.5;
+ float x1 = displayMaxLuminance * 0.7;
+ float y1 = maxOutLumi * 0.15;
+ float x2 = displayMaxLuminance * 0.9;
+ float y2 = maxOutLumi * 0.45;
+ float x3 = displayMaxLuminance;
+ float y3 = maxOutLumi;
+
+ float c1 = y1 / 3.0;
+ float c2 = y2 / 2.0;
+ float c3 = y3 / 1.5;
+
+ float nits = color.y;
+
+ float scale;
+ if (nits <= x0) {
+ // scale [0.0, x0] to [0.0, y0] linearly
+ const float slope = y0 / x0;
+ return color * slope;
+ } else if (nits <= x1) {
+ // scale [x0, x1] to [y0, y1] using a curve
+ float t = (nits - x0) / (x1 - x0);
+ nits = (1.0 - t) * (1.0 - t) * y0 + 2.0 * (1.0 - t) * t * c1 + t * t * y1;
+ } else if (nits <= x2) {
+ // scale [x1, x2] to [y1, y2] using a curve
+ float t = (nits - x1) / (x2 - x1);
+ nits = (1.0 - t) * (1.0 - t) * y1 + 2.0 * (1.0 - t) * t * c2 + t * t * y2;
+ } else {
+ // scale [x2, x3] to [y2, y3] using a curve
+ float t = (nits - x2) / (x3 - x2);
+ nits = (1.0 - t) * (1.0 - t) * y2 + 2.0 * (1.0 - t) * t * c3 + t * t * y3;
+ }
+
+ // color.y is greater than x0 and is thus non-zero
+ return color * (nits / color.y);
+ }
+ )__SHADER__";
+ break;
+ }
+
+ // convert absolute light to relative light.
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / 10000.0;
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / 1000.0 * pow(color.y / 1000.0, -0.2 / 1.2);
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ highp vec3 NormalizeLuminance(highp vec3 color) {
+ return color / displayMaxLuminance;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+// Generate OOTF that modifies the relative scence light to relative display light.
+void ProgramCache::generateOOTF(Formatter& fs, const ProgramCache::Key& needs) {
+ if (!needs.needsToneMapping()) {
+ fs << R"__SHADER__(
+ highp vec3 OOTF(const highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ } else {
+ generateToneMappingProcess(fs, needs);
+ fs << R"__SHADER__(
+ highp vec3 OOTF(const highp vec3 color) {
+ return NormalizeLuminance(ToneMap(ScaleLuminance(color)));
+ }
+ )__SHADER__";
+ }
+}
+
+// Generate OETF that converts relative display light to signal values,
+// both normalized to [0, 1]
+void ProgramCache::generateOETF(Formatter& fs, const Key& needs) {
+ switch (needs.getOutputTF()) {
+ case Key::OUTPUT_TF_SRGB:
+ fs << R"__SHADER__(
+ float OETF_sRGB(const float linear) {
+ return linear <= 0.0031308 ?
+ linear * 12.92 : (pow(linear, 1.0 / 2.4) * 1.055) - 0.055;
+ }
+
+ vec3 OETF_sRGB(const vec3 linear) {
+ return vec3(OETF_sRGB(linear.r), OETF_sRGB(linear.g), OETF_sRGB(linear.b));
+ }
+
+ vec3 OETF(const vec3 linear) {
+ return sign(linear.rgb) * OETF_sRGB(abs(linear.rgb));
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_ST2084:
+ fs << R"__SHADER__(
+ vec3 OETF(const vec3 linear) {
+ const highp float m1 = (2610.0 / 4096.0) / 4.0;
+ const highp float m2 = (2523.0 / 4096.0) * 128.0;
+ const highp float c1 = (3424.0 / 4096.0);
+ const highp float c2 = (2413.0 / 4096.0) * 32.0;
+ const highp float c3 = (2392.0 / 4096.0) * 32.0;
+
+ highp vec3 tmp = pow(linear, vec3(m1));
+ tmp = (c1 + c2 * tmp) / (1.0 + c3 * tmp);
+ return pow(tmp, vec3(m2));
+ }
+ )__SHADER__";
+ break;
+ case Key::OUTPUT_TF_HLG:
+ fs << R"__SHADER__(
+ highp float OETF_channel(const highp float channel) {
+ const highp float a = 0.17883277;
+ const highp float b = 0.28466892;
+ const highp float c = 0.55991073;
+ return channel <= 1.0 / 12.0 ? sqrt(3.0 * channel) :
+ a * log(12.0 * channel - b) + c;
+ }
+
+ vec3 OETF(const highp vec3 color) {
+ return vec3(OETF_channel(color.r), OETF_channel(color.g),
+ OETF_channel(color.b));
+ }
+ )__SHADER__";
+ break;
+ default:
+ fs << R"__SHADER__(
+ vec3 OETF(const vec3 linear) {
+ return linear;
+ }
+ )__SHADER__";
+ break;
+ }
+}
+
+String8 ProgramCache::generateVertexShader(const Key& needs) {
+ Formatter vs;
+ if (needs.hasTextureCoords()) {
+ vs << "attribute vec4 texCoords;"
+ << "varying vec2 outTexCoords;";
+ }
+ if (needs.hasRoundedCorners()) {
+ vs << "attribute lowp vec4 cropCoords;";
+ vs << "varying lowp vec2 outCropCoords;";
+ }
+ if (needs.drawShadows()) {
+ vs << "attribute lowp vec4 shadowColor;";
+ vs << "varying lowp vec4 outShadowColor;";
+ vs << "attribute lowp vec4 shadowParams;";
+ vs << "varying lowp vec3 outShadowParams;";
+ }
+ vs << "attribute vec4 position;"
+ << "uniform mat4 projection;"
+ << "uniform mat4 texture;"
+ << "void main(void) {" << indent << "gl_Position = projection * position;";
+ if (needs.hasTextureCoords()) {
+ vs << "outTexCoords = (texture * texCoords).st;";
+ }
+ if (needs.hasRoundedCorners()) {
+ vs << "outCropCoords = cropCoords.st;";
+ }
+ if (needs.drawShadows()) {
+ vs << "outShadowColor = shadowColor;";
+ vs << "outShadowParams = shadowParams.xyz;";
+ }
+ vs << dedent << "}";
+ return vs.getString();
+}
+
+String8 ProgramCache::generateFragmentShader(const Key& needs) {
+ Formatter fs;
+ if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
+ fs << "#extension GL_OES_EGL_image_external : require";
+ }
+
+ // default precision is required-ish in fragment shaders
+ fs << "precision mediump float;";
+
+ if (needs.getTextureTarget() == Key::TEXTURE_EXT) {
+ fs << "uniform samplerExternalOES sampler;";
+ } else if (needs.getTextureTarget() == Key::TEXTURE_2D) {
+ fs << "uniform sampler2D sampler;";
+ }
+
+ if (needs.hasTextureCoords()) {
+ fs << "varying vec2 outTexCoords;";
+ }
+
+ if (needs.hasRoundedCorners()) {
+ // Rounded corners implementation using a signed distance function.
+ fs << R"__SHADER__(
+ uniform float cornerRadius;
+ uniform vec2 cropCenter;
+ varying vec2 outCropCoords;
+
+ /**
+ * This function takes the current crop coordinates and calculates an alpha value based
+ * on the corner radius and distance from the crop center.
+ */
+ float applyCornerRadius(vec2 cropCoords)
+ {
+ vec2 position = cropCoords - cropCenter;
+ // Scale down the dist vector here, as otherwise large corner
+ // radii can cause floating point issues when computing the norm
+ vec2 dist = (abs(position) - cropCenter + vec2(cornerRadius)) / 16.0;
+ // Once we've found the norm, then scale back up.
+ float plane = length(max(dist, vec2(0.0))) * 16.0;
+ return 1.0 - clamp(plane - cornerRadius, 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ if (needs.drawShadows()) {
+ fs << R"__SHADER__(
+ varying lowp vec4 outShadowColor;
+ varying lowp vec3 outShadowParams;
+
+ /**
+ * Returns the shadow color.
+ */
+ vec4 getShadowColor()
+ {
+ lowp float d = length(outShadowParams.xy);
+ vec2 uv = vec2(outShadowParams.z * (1.0 - d), 0.5);
+ lowp float factor = texture2D(sampler, uv).a;
+ return outShadowColor * factor;
+ }
+ )__SHADER__";
+ }
+
+ if (needs.getTextureTarget() == Key::TEXTURE_OFF || needs.hasAlpha()) {
+ fs << "uniform vec4 color;";
+ }
+
+ if (needs.isY410BT2020()) {
+ fs << R"__SHADER__(
+ vec3 convertY410BT2020(const vec3 color) {
+ const vec3 offset = vec3(0.0625, 0.5, 0.5);
+ const mat3 transform = mat3(
+ vec3(1.1678, 1.1678, 1.1678),
+ vec3( 0.0, -0.1878, 2.1481),
+ vec3(1.6836, -0.6523, 0.0));
+ // Y is in G, U is in R, and V is in B
+ return clamp(transform * (color.grb - offset), 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+ if (needs.needsToneMapping()) {
+ fs << "uniform float displayMaxLuminance;";
+ fs << "uniform float maxMasteringLuminance;";
+ fs << "uniform float maxContentLuminance;";
+ }
+
+ if (needs.hasInputTransformMatrix()) {
+ fs << "uniform mat4 inputTransformMatrix;";
+ fs << R"__SHADER__(
+ highp vec3 InputTransform(const highp vec3 color) {
+ return clamp(vec3(inputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+ }
+ )__SHADER__";
+ } else {
+ fs << R"__SHADER__(
+ highp vec3 InputTransform(const highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ }
+
+ // the transformation from a wider colorspace to a narrower one can
+ // result in >1.0 or <0.0 pixel values
+ if (needs.hasOutputTransformMatrix()) {
+ fs << "uniform mat4 outputTransformMatrix;";
+ fs << R"__SHADER__(
+ highp vec3 OutputTransform(const highp vec3 color) {
+ return clamp(vec3(outputTransformMatrix * vec4(color, 1.0)), 0.0, 1.0);
+ }
+ )__SHADER__";
+ } else {
+ fs << R"__SHADER__(
+ highp vec3 OutputTransform(const highp vec3 color) {
+ return clamp(color, 0.0, 1.0);
+ }
+ )__SHADER__";
+ }
+
+ generateEOTF(fs, needs);
+ generateOOTF(fs, needs);
+ generateOETF(fs, needs);
+ }
+
+ fs << "void main(void) {" << indent;
+ if (needs.drawShadows()) {
+ fs << "gl_FragColor = getShadowColor();";
+ } else {
+ if (needs.isTexturing()) {
+ fs << "gl_FragColor = texture2D(sampler, outTexCoords);";
+ if (needs.isY410BT2020()) {
+ fs << "gl_FragColor.rgb = convertY410BT2020(gl_FragColor.rgb);";
+ }
+ } else {
+ fs << "gl_FragColor.rgb = color.rgb;";
+ fs << "gl_FragColor.a = 1.0;";
+ }
+ if (needs.isOpaque()) {
+ fs << "gl_FragColor.a = 1.0;";
+ }
+ if (needs.hasAlpha()) {
+ // modulate the current alpha value with alpha set
+ if (needs.isPremultiplied()) {
+ // ... and the color too if we're premultiplied
+ fs << "gl_FragColor *= color.a;";
+ } else {
+ fs << "gl_FragColor.a *= color.a;";
+ }
+ }
+ }
+
+ if (needs.hasTransformMatrix() || (needs.getInputTF() != needs.getOutputTF())) {
+ if (!needs.isOpaque() && needs.isPremultiplied()) {
+ // un-premultiply if needed before linearization
+ // avoid divide by 0 by adding 0.5/256 to the alpha channel
+ fs << "gl_FragColor.rgb = gl_FragColor.rgb / (gl_FragColor.a + 0.0019);";
+ }
+ fs << "gl_FragColor.rgb = "
+ "OETF(OutputTransform(OOTF(InputTransform(EOTF(gl_FragColor.rgb)))));";
+ if (!needs.isOpaque() && needs.isPremultiplied()) {
+ // and re-premultiply if needed after gamma correction
+ fs << "gl_FragColor.rgb = gl_FragColor.rgb * (gl_FragColor.a + 0.0019);";
+ }
+ }
+
+ if (needs.hasRoundedCorners()) {
+ if (needs.isPremultiplied()) {
+ fs << "gl_FragColor *= vec4(applyCornerRadius(outCropCoords));";
+ } else {
+ fs << "gl_FragColor.a *= applyCornerRadius(outCropCoords);";
+ }
+ }
+
+ fs << dedent << "}";
+ return fs.getString();
+}
+
+std::unique_ptr<Program> ProgramCache::generateProgram(const Key& needs) {
+ ATRACE_CALL();
+
+ // vertex shader
+ String8 vs = generateVertexShader(needs);
+
+ // fragment shader
+ String8 fs = generateFragmentShader(needs);
+
+ return std::make_unique<Program>(needs, vs.string(), fs.string());
+}
+
+void ProgramCache::useProgram(EGLContext context, const Description& description) {
+ // generate the key for the shader based on the description
+ Key needs(computeKey(description));
+
+ // look-up the program in the cache
+ auto& cache = mCaches[context];
+ auto it = cache.find(needs);
+ if (it == cache.end()) {
+ // we didn't find our program, so generate one...
+ nsecs_t time = systemTime();
+ it = cache.emplace(needs, generateProgram(needs)).first;
+ time = systemTime() - time;
+
+ ALOGV(">>> generated new program for context %p: needs=%08X, time=%u ms (%zu programs)",
+ context, needs.mKey, uint32_t(ns2ms(time)), cache.size());
+ }
+
+ // here we have a suitable program for this description
+ std::unique_ptr<Program>& program = it->second;
+ if (program->isValid()) {
+ program->use();
+ program->setUniforms(description);
+ }
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.h b/media/libstagefright/renderfright/gl/ProgramCache.h
new file mode 100644
index 0000000..901e631
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/ProgramCache.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_PROGRAMCACHE_H
+#define SF_RENDER_ENGINE_PROGRAMCACHE_H
+
+#include <memory>
+#include <unordered_map>
+
+#include <EGL/egl.h>
+#include <GLES2/gl2.h>
+#include <renderengine/private/Description.h>
+#include <utils/Singleton.h>
+#include <utils/TypeHelpers.h>
+
+namespace android {
+
+class String8;
+
+namespace renderengine {
+
+struct Description;
+
+namespace gl {
+
+class Formatter;
+class Program;
+
+/*
+ * This class generates GLSL programs suitable to handle a given
+ * Description. It's responsible for figuring out what to
+ * generate from a Description.
+ * It also maintains a cache of these Programs.
+ */
+class ProgramCache : public Singleton<ProgramCache> {
+public:
+ /*
+ * Key is used to retrieve a Program in the cache.
+ * A Key is generated from a Description.
+ */
+ class Key {
+ friend class ProgramCache;
+ typedef uint32_t key_t;
+ key_t mKey;
+
+ public:
+ enum {
+ BLEND_SHIFT = 0,
+ BLEND_MASK = 1 << BLEND_SHIFT,
+ BLEND_PREMULT = 1 << BLEND_SHIFT,
+ BLEND_NORMAL = 0 << BLEND_SHIFT,
+
+ OPACITY_SHIFT = 1,
+ OPACITY_MASK = 1 << OPACITY_SHIFT,
+ OPACITY_OPAQUE = 1 << OPACITY_SHIFT,
+ OPACITY_TRANSLUCENT = 0 << OPACITY_SHIFT,
+
+ ALPHA_SHIFT = 2,
+ ALPHA_MASK = 1 << ALPHA_SHIFT,
+ ALPHA_LT_ONE = 1 << ALPHA_SHIFT,
+ ALPHA_EQ_ONE = 0 << ALPHA_SHIFT,
+
+ TEXTURE_SHIFT = 3,
+ TEXTURE_MASK = 3 << TEXTURE_SHIFT,
+ TEXTURE_OFF = 0 << TEXTURE_SHIFT,
+ TEXTURE_EXT = 1 << TEXTURE_SHIFT,
+ TEXTURE_2D = 2 << TEXTURE_SHIFT,
+
+ ROUNDED_CORNERS_SHIFT = 5,
+ ROUNDED_CORNERS_MASK = 1 << ROUNDED_CORNERS_SHIFT,
+ ROUNDED_CORNERS_OFF = 0 << ROUNDED_CORNERS_SHIFT,
+ ROUNDED_CORNERS_ON = 1 << ROUNDED_CORNERS_SHIFT,
+
+ INPUT_TRANSFORM_MATRIX_SHIFT = 6,
+ INPUT_TRANSFORM_MATRIX_MASK = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+ INPUT_TRANSFORM_MATRIX_OFF = 0 << INPUT_TRANSFORM_MATRIX_SHIFT,
+ INPUT_TRANSFORM_MATRIX_ON = 1 << INPUT_TRANSFORM_MATRIX_SHIFT,
+
+ OUTPUT_TRANSFORM_MATRIX_SHIFT = 7,
+ OUTPUT_TRANSFORM_MATRIX_MASK = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+ OUTPUT_TRANSFORM_MATRIX_OFF = 0 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+ OUTPUT_TRANSFORM_MATRIX_ON = 1 << OUTPUT_TRANSFORM_MATRIX_SHIFT,
+
+ INPUT_TF_SHIFT = 8,
+ INPUT_TF_MASK = 3 << INPUT_TF_SHIFT,
+ INPUT_TF_LINEAR = 0 << INPUT_TF_SHIFT,
+ INPUT_TF_SRGB = 1 << INPUT_TF_SHIFT,
+ INPUT_TF_ST2084 = 2 << INPUT_TF_SHIFT,
+ INPUT_TF_HLG = 3 << INPUT_TF_SHIFT,
+
+ OUTPUT_TF_SHIFT = 10,
+ OUTPUT_TF_MASK = 3 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_LINEAR = 0 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_SRGB = 1 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_ST2084 = 2 << OUTPUT_TF_SHIFT,
+ OUTPUT_TF_HLG = 3 << OUTPUT_TF_SHIFT,
+
+ Y410_BT2020_SHIFT = 12,
+ Y410_BT2020_MASK = 1 << Y410_BT2020_SHIFT,
+ Y410_BT2020_OFF = 0 << Y410_BT2020_SHIFT,
+ Y410_BT2020_ON = 1 << Y410_BT2020_SHIFT,
+
+ SHADOW_SHIFT = 13,
+ SHADOW_MASK = 1 << SHADOW_SHIFT,
+ SHADOW_OFF = 0 << SHADOW_SHIFT,
+ SHADOW_ON = 1 << SHADOW_SHIFT,
+ };
+
+ inline Key() : mKey(0) {}
+ inline Key(const Key& rhs) : mKey(rhs.mKey) {}
+
+ inline Key& set(key_t mask, key_t value) {
+ mKey = (mKey & ~mask) | value;
+ return *this;
+ }
+
+ inline bool isTexturing() const { return (mKey & TEXTURE_MASK) != TEXTURE_OFF; }
+ inline bool hasTextureCoords() const { return isTexturing() && !drawShadows(); }
+ inline int getTextureTarget() const { return (mKey & TEXTURE_MASK); }
+ inline bool isPremultiplied() const { return (mKey & BLEND_MASK) == BLEND_PREMULT; }
+ inline bool isOpaque() const { return (mKey & OPACITY_MASK) == OPACITY_OPAQUE; }
+ inline bool hasAlpha() const { return (mKey & ALPHA_MASK) == ALPHA_LT_ONE; }
+ inline bool hasRoundedCorners() const {
+ return (mKey & ROUNDED_CORNERS_MASK) == ROUNDED_CORNERS_ON;
+ }
+ inline bool drawShadows() const { return (mKey & SHADOW_MASK) == SHADOW_ON; }
+ inline bool hasInputTransformMatrix() const {
+ return (mKey & INPUT_TRANSFORM_MATRIX_MASK) == INPUT_TRANSFORM_MATRIX_ON;
+ }
+ inline bool hasOutputTransformMatrix() const {
+ return (mKey & OUTPUT_TRANSFORM_MATRIX_MASK) == OUTPUT_TRANSFORM_MATRIX_ON;
+ }
+ inline bool hasTransformMatrix() const {
+ return hasInputTransformMatrix() || hasOutputTransformMatrix();
+ }
+ inline int getInputTF() const { return (mKey & INPUT_TF_MASK); }
+ inline int getOutputTF() const { return (mKey & OUTPUT_TF_MASK); }
+
+ // When HDR and non-HDR contents are mixed, or different types of HDR contents are
+ // mixed, we will do a tone mapping process to tone map the input content to output
+ // content. Currently, the following conversions handled, they are:
+ // * SDR -> HLG
+ // * SDR -> PQ
+ // * HLG -> PQ
+ inline bool needsToneMapping() const {
+ int inputTF = getInputTF();
+ int outputTF = getOutputTF();
+
+ // Return false when converting from SDR to SDR.
+ if (inputTF == Key::INPUT_TF_SRGB && outputTF == Key::OUTPUT_TF_LINEAR) {
+ return false;
+ }
+ if (inputTF == Key::INPUT_TF_LINEAR && outputTF == Key::OUTPUT_TF_SRGB) {
+ return false;
+ }
+
+ inputTF >>= Key::INPUT_TF_SHIFT;
+ outputTF >>= Key::OUTPUT_TF_SHIFT;
+ return inputTF != outputTF;
+ }
+ inline bool isY410BT2020() const { return (mKey & Y410_BT2020_MASK) == Y410_BT2020_ON; }
+
+ // for use by std::unordered_map
+
+ bool operator==(const Key& other) const { return mKey == other.mKey; }
+
+ struct Hash {
+ size_t operator()(const Key& key) const { return static_cast<size_t>(key.mKey); }
+ };
+ };
+
+ ProgramCache() = default;
+ ~ProgramCache() = default;
+
+ // Generate shaders to populate the cache
+ void primeCache(const EGLContext context, bool useColorManagement, bool toneMapperShaderOnly);
+
+ size_t getSize(const EGLContext context) { return mCaches[context].size(); }
+
+ // useProgram lookup a suitable program in the cache or generates one
+ // if none can be found.
+ void useProgram(const EGLContext context, const Description& description);
+
+private:
+ // compute a cache Key from a Description
+ static Key computeKey(const Description& description);
+ // Generate EOTF based from Key.
+ static void generateEOTF(Formatter& fs, const Key& needs);
+ // Generate necessary tone mapping methods for OOTF.
+ static void generateToneMappingProcess(Formatter& fs, const Key& needs);
+ // Generate OOTF based from Key.
+ static void generateOOTF(Formatter& fs, const Key& needs);
+ // Generate OETF based from Key.
+ static void generateOETF(Formatter& fs, const Key& needs);
+ // generates a program from the Key
+ static std::unique_ptr<Program> generateProgram(const Key& needs);
+ // generates the vertex shader from the Key
+ static String8 generateVertexShader(const Key& needs);
+ // generates the fragment shader from the Key
+ static String8 generateFragmentShader(const Key& needs);
+
+ // Key/Value map used for caching Programs. Currently the cache
+ // is never shrunk (and the GL program objects are never deleted).
+ std::unordered_map<EGLContext, std::unordered_map<Key, std::unique_ptr<Program>, Key::Hash>>
+ mCaches;
+};
+
+} // namespace gl
+} // namespace renderengine
+
+ANDROID_BASIC_TYPES_TRAITS(renderengine::gl::ProgramCache::Key)
+
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_PROGRAMCACHE_H */
diff --git a/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp b/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp
new file mode 100644
index 0000000..19f18c0
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/BlurFilter.cpp
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "BlurFilter.h"
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <GLES3/gl3.h>
+#include <GLES3/gl3ext.h>
+#include <ui/GraphicTypes.h>
+#include <cstdint>
+
+#include <utils/Trace.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+BlurFilter::BlurFilter(GLESRenderEngine& engine)
+ : mEngine(engine),
+ mCompositionFbo(engine),
+ mPingFbo(engine),
+ mPongFbo(engine),
+ mMixProgram(engine),
+ mBlurProgram(engine) {
+ mMixProgram.compile(getVertexShader(), getMixFragShader());
+ mMPosLoc = mMixProgram.getAttributeLocation("aPosition");
+ mMUvLoc = mMixProgram.getAttributeLocation("aUV");
+ mMTextureLoc = mMixProgram.getUniformLocation("uTexture");
+ mMCompositionTextureLoc = mMixProgram.getUniformLocation("uCompositionTexture");
+ mMMixLoc = mMixProgram.getUniformLocation("uMix");
+
+ mBlurProgram.compile(getVertexShader(), getFragmentShader());
+ mBPosLoc = mBlurProgram.getAttributeLocation("aPosition");
+ mBUvLoc = mBlurProgram.getAttributeLocation("aUV");
+ mBTextureLoc = mBlurProgram.getUniformLocation("uTexture");
+ mBOffsetLoc = mBlurProgram.getUniformLocation("uOffset");
+
+ static constexpr auto size = 2.0f;
+ static constexpr auto translation = 1.0f;
+ const GLfloat vboData[] = {
+ // Vertex data
+ translation - size, -translation - size,
+ translation - size, -translation + size,
+ translation + size, -translation + size,
+ // UV data
+ 0.0f, 0.0f - translation,
+ 0.0f, size - translation,
+ size, size - translation
+ };
+ mMeshBuffer.allocateBuffers(vboData, 12 /* size */);
+}
+
+status_t BlurFilter::setAsDrawTarget(const DisplaySettings& display, uint32_t radius) {
+ ATRACE_NAME("BlurFilter::setAsDrawTarget");
+ mRadius = radius;
+ mDisplayX = display.physicalDisplay.left;
+ mDisplayY = display.physicalDisplay.top;
+
+ if (mDisplayWidth < display.physicalDisplay.width() ||
+ mDisplayHeight < display.physicalDisplay.height()) {
+ ATRACE_NAME("BlurFilter::allocatingTextures");
+
+ mDisplayWidth = display.physicalDisplay.width();
+ mDisplayHeight = display.physicalDisplay.height();
+ mCompositionFbo.allocateBuffers(mDisplayWidth, mDisplayHeight);
+
+ const uint32_t fboWidth = floorf(mDisplayWidth * kFboScale);
+ const uint32_t fboHeight = floorf(mDisplayHeight * kFboScale);
+ mPingFbo.allocateBuffers(fboWidth, fboHeight);
+ mPongFbo.allocateBuffers(fboWidth, fboHeight);
+
+ if (mPingFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid ping buffer");
+ return mPingFbo.getStatus();
+ }
+ if (mPongFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid pong buffer");
+ return mPongFbo.getStatus();
+ }
+ if (mCompositionFbo.getStatus() != GL_FRAMEBUFFER_COMPLETE) {
+ ALOGE("Invalid composition buffer");
+ return mCompositionFbo.getStatus();
+ }
+ if (!mBlurProgram.isValid()) {
+ ALOGE("Invalid shader");
+ return GL_INVALID_OPERATION;
+ }
+ }
+
+ mCompositionFbo.bind();
+ glViewport(0, 0, mCompositionFbo.getBufferWidth(), mCompositionFbo.getBufferHeight());
+ return NO_ERROR;
+}
+
+void BlurFilter::drawMesh(GLuint uv, GLuint position) {
+
+ glEnableVertexAttribArray(uv);
+ glEnableVertexAttribArray(position);
+ mMeshBuffer.bind();
+ glVertexAttribPointer(position, 2 /* size */, GL_FLOAT, GL_FALSE,
+ 2 * sizeof(GLfloat) /* stride */, 0 /* offset */);
+ glVertexAttribPointer(uv, 2 /* size */, GL_FLOAT, GL_FALSE, 0 /* stride */,
+ (GLvoid*)(6 * sizeof(GLfloat)) /* offset */);
+ mMeshBuffer.unbind();
+
+ // draw mesh
+ glDrawArrays(GL_TRIANGLES, 0 /* first */, 3 /* count */);
+}
+
+status_t BlurFilter::prepare() {
+ ATRACE_NAME("BlurFilter::prepare");
+
+ // Kawase is an approximation of Gaussian, but it behaves differently from it.
+ // A radius transformation is required for approximating them, and also to introduce
+ // non-integer steps, necessary to smoothly interpolate large radii.
+ const auto radius = mRadius / 6.0f;
+
+ // Calculate how many passes we'll do, based on the radius.
+ // Too many passes will make the operation expensive.
+ const auto passes = min(kMaxPasses, (uint32_t)ceil(radius));
+
+ const float radiusByPasses = radius / (float)passes;
+ const float stepX = radiusByPasses / (float)mCompositionFbo.getBufferWidth();
+ const float stepY = radiusByPasses / (float)mCompositionFbo.getBufferHeight();
+
+ // Let's start by downsampling and blurring the composited frame simultaneously.
+ mBlurProgram.useProgram();
+ glActiveTexture(GL_TEXTURE0);
+ glUniform1i(mBTextureLoc, 0);
+ glBindTexture(GL_TEXTURE_2D, mCompositionFbo.getTextureName());
+ glUniform2f(mBOffsetLoc, stepX, stepY);
+ glViewport(0, 0, mPingFbo.getBufferWidth(), mPingFbo.getBufferHeight());
+ mPingFbo.bind();
+ drawMesh(mBUvLoc, mBPosLoc);
+
+ // And now we'll ping pong between our textures, to accumulate the result of various offsets.
+ GLFramebuffer* read = &mPingFbo;
+ GLFramebuffer* draw = &mPongFbo;
+ glViewport(0, 0, draw->getBufferWidth(), draw->getBufferHeight());
+ for (auto i = 1; i < passes; i++) {
+ ATRACE_NAME("BlurFilter::renderPass");
+ draw->bind();
+
+ glBindTexture(GL_TEXTURE_2D, read->getTextureName());
+ glUniform2f(mBOffsetLoc, stepX * i, stepY * i);
+
+ drawMesh(mBUvLoc, mBPosLoc);
+
+ // Swap buffers for next iteration
+ auto tmp = draw;
+ draw = read;
+ read = tmp;
+ }
+ mLastDrawTarget = read;
+
+ return NO_ERROR;
+}
+
+status_t BlurFilter::render(bool multiPass) {
+ ATRACE_NAME("BlurFilter::render");
+
+ // Now let's scale our blur up. It will be interpolated with the larger composited
+ // texture for the first frames, to hide downscaling artifacts.
+ GLfloat mix = fmin(1.0, mRadius / kMaxCrossFadeRadius);
+
+ // When doing multiple passes, we cannot try to read mCompositionFbo, given that we'll
+ // be writing onto it. Let's disable the crossfade, otherwise we'd need 1 extra frame buffer,
+ // as large as the screen size.
+ if (mix >= 1 || multiPass) {
+ mLastDrawTarget->bindAsReadBuffer();
+ glBlitFramebuffer(0, 0, mLastDrawTarget->getBufferWidth(),
+ mLastDrawTarget->getBufferHeight(), mDisplayX, mDisplayY, mDisplayWidth,
+ mDisplayHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
+ return NO_ERROR;
+ }
+
+ mMixProgram.useProgram();
+ glUniform1f(mMMixLoc, mix);
+ glActiveTexture(GL_TEXTURE0);
+ glBindTexture(GL_TEXTURE_2D, mLastDrawTarget->getTextureName());
+ glUniform1i(mMTextureLoc, 0);
+ glActiveTexture(GL_TEXTURE1);
+ glBindTexture(GL_TEXTURE_2D, mCompositionFbo.getTextureName());
+ glUniform1i(mMCompositionTextureLoc, 1);
+
+ drawMesh(mMUvLoc, mMPosLoc);
+
+ glUseProgram(0);
+ glActiveTexture(GL_TEXTURE0);
+ mEngine.checkErrors("Drawing blur mesh");
+ return NO_ERROR;
+}
+
+string BlurFilter::getVertexShader() const {
+ return R"SHADER(#version 310 es
+ precision mediump float;
+
+ in vec2 aPosition;
+ in highp vec2 aUV;
+ out highp vec2 vUV;
+
+ void main() {
+ vUV = aUV;
+ gl_Position = vec4(aPosition, 0.0, 1.0);
+ }
+ )SHADER";
+}
+
+string BlurFilter::getFragmentShader() const {
+ return R"SHADER(#version 310 es
+ precision mediump float;
+
+ uniform sampler2D uTexture;
+ uniform vec2 uOffset;
+
+ in highp vec2 vUV;
+ out vec4 fragColor;
+
+ void main() {
+ fragColor = texture(uTexture, vUV, 0.0);
+ fragColor += texture(uTexture, vUV + vec2( uOffset.x, uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2( uOffset.x, -uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2(-uOffset.x, uOffset.y), 0.0);
+ fragColor += texture(uTexture, vUV + vec2(-uOffset.x, -uOffset.y), 0.0);
+
+ fragColor = vec4(fragColor.rgb * 0.2, 1.0);
+ }
+ )SHADER";
+}
+
+string BlurFilter::getMixFragShader() const {
+ string shader = R"SHADER(#version 310 es
+ precision mediump float;
+
+ in highp vec2 vUV;
+ out vec4 fragColor;
+
+ uniform sampler2D uCompositionTexture;
+ uniform sampler2D uTexture;
+ uniform float uMix;
+
+ void main() {
+ vec4 blurred = texture(uTexture, vUV);
+ vec4 composition = texture(uCompositionTexture, vUV);
+ fragColor = mix(composition, blurred, uMix);
+ }
+ )SHADER";
+ return shader;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/BlurFilter.h b/media/libstagefright/renderfright/gl/filters/BlurFilter.h
new file mode 100644
index 0000000..593a8fd
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/BlurFilter.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ui/GraphicTypes.h>
+#include "../GLESRenderEngine.h"
+#include "../GLFramebuffer.h"
+#include "../GLVertexBuffer.h"
+#include "GenericProgram.h"
+
+using namespace std;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+/**
+ * This is an implementation of a Kawase blur, as described in here:
+ * https://community.arm.com/cfs-file/__key/communityserver-blogs-components-weblogfiles/
+ * 00-00-00-20-66/siggraph2015_2D00_mmg_2D00_marius_2D00_notes.pdf
+ */
+class BlurFilter {
+public:
+ // Downsample FBO to improve performance
+ static constexpr float kFboScale = 0.25f;
+ // Maximum number of render passes
+ static constexpr uint32_t kMaxPasses = 4;
+ // To avoid downscaling artifacts, we interpolate the blurred fbo with the full composited
+ // image, up to this radius.
+ static constexpr float kMaxCrossFadeRadius = 30.0f;
+
+ explicit BlurFilter(GLESRenderEngine& engine);
+ virtual ~BlurFilter(){};
+
+ // Set up render targets, redirecting output to offscreen texture.
+ status_t setAsDrawTarget(const DisplaySettings&, uint32_t radius);
+ // Execute blur passes, rendering to offscreen texture.
+ status_t prepare();
+ // Render blur to the bound framebuffer (screen).
+ status_t render(bool multiPass);
+
+private:
+ uint32_t mRadius;
+ void drawMesh(GLuint uv, GLuint position);
+ string getVertexShader() const;
+ string getFragmentShader() const;
+ string getMixFragShader() const;
+
+ GLESRenderEngine& mEngine;
+ // Frame buffer holding the composited background.
+ GLFramebuffer mCompositionFbo;
+ // Frame buffers holding the blur passes.
+ GLFramebuffer mPingFbo;
+ GLFramebuffer mPongFbo;
+ uint32_t mDisplayWidth = 0;
+ uint32_t mDisplayHeight = 0;
+ uint32_t mDisplayX = 0;
+ uint32_t mDisplayY = 0;
+ // Buffer holding the final blur pass.
+ GLFramebuffer* mLastDrawTarget;
+
+ // VBO containing vertex and uv data of a fullscreen triangle.
+ GLVertexBuffer mMeshBuffer;
+
+ GenericProgram mMixProgram;
+ GLuint mMPosLoc;
+ GLuint mMUvLoc;
+ GLuint mMMixLoc;
+ GLuint mMTextureLoc;
+ GLuint mMCompositionTextureLoc;
+
+ GenericProgram mBlurProgram;
+ GLuint mBPosLoc;
+ GLuint mBUvLoc;
+ GLuint mBTextureLoc;
+ GLuint mBOffsetLoc;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp b/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp
new file mode 100644
index 0000000..bb35889
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/GenericProgram.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GenericProgram.h"
+
+#include <GLES/gl.h>
+#include <GLES/glext.h>
+#include <GLES2/gl2.h>
+#include <GLES2/gl2ext.h>
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+GenericProgram::GenericProgram(GLESRenderEngine& engine) : mEngine(engine) {}
+
+GenericProgram::~GenericProgram() {
+ if (mVertexShaderHandle != 0) {
+ if (mProgramHandle != 0) {
+ glDetachShader(mProgramHandle, mVertexShaderHandle);
+ }
+ glDeleteShader(mVertexShaderHandle);
+ }
+
+ if (mFragmentShaderHandle != 0) {
+ if (mProgramHandle != 0) {
+ glDetachShader(mProgramHandle, mFragmentShaderHandle);
+ }
+ glDeleteShader(mFragmentShaderHandle);
+ }
+
+ if (mProgramHandle != 0) {
+ glDeleteProgram(mProgramHandle);
+ }
+}
+
+void GenericProgram::compile(string vertexShader, string fragmentShader) {
+ mVertexShaderHandle = compileShader(GL_VERTEX_SHADER, vertexShader);
+ mFragmentShaderHandle = compileShader(GL_FRAGMENT_SHADER, fragmentShader);
+ if (mVertexShaderHandle == 0 || mFragmentShaderHandle == 0) {
+ ALOGE("Aborting program creation.");
+ return;
+ }
+ mProgramHandle = createAndLink(mVertexShaderHandle, mFragmentShaderHandle);
+ mEngine.checkErrors("Linking program");
+}
+
+void GenericProgram::useProgram() const {
+ glUseProgram(mProgramHandle);
+}
+
+GLuint GenericProgram::compileShader(GLuint type, string src) const {
+ const GLuint shader = glCreateShader(type);
+ if (shader == 0) {
+ mEngine.checkErrors("Creating shader");
+ return 0;
+ }
+ const GLchar* charSrc = (const GLchar*)src.c_str();
+ glShaderSource(shader, 1, &charSrc, nullptr);
+ glCompileShader(shader);
+
+ GLint isCompiled = 0;
+ glGetShaderiv(shader, GL_COMPILE_STATUS, &isCompiled);
+ if (isCompiled == GL_FALSE) {
+ GLint maxLength = 0;
+ glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength);
+ string errorLog;
+ errorLog.reserve(maxLength);
+ glGetShaderInfoLog(shader, maxLength, &maxLength, errorLog.data());
+ glDeleteShader(shader);
+ ALOGE("Error compiling shader: %s", errorLog.c_str());
+ return 0;
+ }
+ return shader;
+}
+GLuint GenericProgram::createAndLink(GLuint vertexShader, GLuint fragmentShader) const {
+ const GLuint program = glCreateProgram();
+ mEngine.checkErrors("Creating program");
+
+ glAttachShader(program, vertexShader);
+ glAttachShader(program, fragmentShader);
+ glLinkProgram(program);
+ mEngine.checkErrors("Linking program");
+ return program;
+}
+
+GLuint GenericProgram::getUniformLocation(const string name) const {
+ if (mProgramHandle == 0) {
+ ALOGE("Can't get location of %s on an invalid program.", name.c_str());
+ return -1;
+ }
+ return glGetUniformLocation(mProgramHandle, (const GLchar*)name.c_str());
+}
+
+GLuint GenericProgram::getAttributeLocation(const string name) const {
+ if (mProgramHandle == 0) {
+ ALOGE("Can't get location of %s on an invalid program.", name.c_str());
+ return -1;
+ }
+ return glGetAttribLocation(mProgramHandle, (const GLchar*)name.c_str());
+}
+
+bool GenericProgram::isValid() const {
+ return mProgramHandle != 0;
+}
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/gl/filters/GenericProgram.h b/media/libstagefright/renderfright/gl/filters/GenericProgram.h
new file mode 100644
index 0000000..6da2a5a
--- /dev/null
+++ b/media/libstagefright/renderfright/gl/filters/GenericProgram.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ui/GraphicTypes.h>
+#include "../GLESRenderEngine.h"
+#include "../GLFramebuffer.h"
+
+using namespace std;
+
+namespace android {
+namespace renderengine {
+namespace gl {
+
+class GenericProgram {
+public:
+ explicit GenericProgram(GLESRenderEngine& renderEngine);
+ ~GenericProgram();
+ void compile(string vertexShader, string fragmentShader);
+ bool isValid() const;
+ void useProgram() const;
+ GLuint getAttributeLocation(const string name) const;
+ GLuint getUniformLocation(const string name) const;
+
+private:
+ GLuint compileShader(GLuint type, const string src) const;
+ GLuint createAndLink(GLuint vertexShader, GLuint fragmentShader) const;
+
+ GLESRenderEngine& mEngine;
+ GLuint mVertexShaderHandle = 0;
+ GLuint mFragmentShaderHandle = 0;
+ GLuint mProgramHandle = 0;
+};
+
+} // namespace gl
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h b/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h
new file mode 100644
index 0000000..ca16d2c
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/DisplaySettings.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iosfwd>
+
+#include <math/mat4.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <ui/Transform.h>
+
+namespace android {
+namespace renderengine {
+
+// DisplaySettings contains the settings that are applicable when drawing all
+// layers for a given display.
+struct DisplaySettings {
+ // Rectangle describing the physical display. We will project from the
+ // logical clip onto this rectangle.
+ Rect physicalDisplay = Rect::INVALID_RECT;
+
+ // Rectangle bounded by the x,y- clipping planes in the logical display, so
+ // that the orthographic projection matrix can be computed. When
+ // constructing this matrix, z-coordinate bound are assumed to be at z=0 and
+ // z=1.
+ Rect clip = Rect::INVALID_RECT;
+
+ // Maximum luminance pulled from the display's HDR capabilities.
+ float maxLuminance = 1.0f;
+
+ // Output dataspace that will be populated if wide color gamut is used, or
+ // DataSpace::UNKNOWN otherwise.
+ ui::Dataspace outputDataspace = ui::Dataspace::UNKNOWN;
+
+ // Additional color transform to apply in linear space after transforming
+ // to the output dataspace.
+ mat4 colorTransform = mat4();
+
+ // Region that will be cleared to (0, 0, 0, 1) prior to rendering.
+ // This is specified in layer-stack space.
+ Region clearRegion = Region::INVALID_REGION;
+
+ // An additional orientation flag to be applied after clipping the output.
+ // By way of example, this may be used for supporting fullscreen screenshot
+ // capture of a device in landscape while the buffer is in portrait
+ // orientation.
+ uint32_t orientation = ui::Transform::ROT_0;
+};
+
+static inline bool operator==(const DisplaySettings& lhs, const DisplaySettings& rhs) {
+ return lhs.physicalDisplay == rhs.physicalDisplay && lhs.clip == rhs.clip &&
+ lhs.maxLuminance == rhs.maxLuminance && lhs.outputDataspace == rhs.outputDataspace &&
+ lhs.colorTransform == rhs.colorTransform &&
+ lhs.clearRegion.hasSameRects(rhs.clearRegion) && lhs.orientation == rhs.orientation;
+}
+
+// Defining PrintTo helps with Google Tests.
+static inline void PrintTo(const DisplaySettings& settings, ::std::ostream* os) {
+ *os << "DisplaySettings {";
+ *os << "\n .physicalDisplay = ";
+ PrintTo(settings.physicalDisplay, os);
+ *os << "\n .clip = ";
+ PrintTo(settings.clip, os);
+ *os << "\n .maxLuminance = " << settings.maxLuminance;
+ *os << "\n .outputDataspace = ";
+ PrintTo(settings.outputDataspace, os);
+ *os << "\n .colorTransform = " << settings.colorTransform;
+ *os << "\n .clearRegion = ";
+ PrintTo(settings.clearRegion, os);
+ *os << "\n .orientation = " << settings.orientation;
+ *os << "\n}";
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Framebuffer.h b/media/libstagefright/renderfright/include/renderengine/Framebuffer.h
new file mode 100644
index 0000000..6511127
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Framebuffer.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdint>
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+
+class Framebuffer {
+public:
+ virtual ~Framebuffer() = default;
+
+ virtual bool setNativeWindowBuffer(ANativeWindowBuffer* nativeBuffer, bool isProtected,
+ const bool useFramebufferCache) = 0;
+};
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Image.h b/media/libstagefright/renderfright/include/renderengine/Image.h
new file mode 100644
index 0000000..3bb4731
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Image.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+struct ANativeWindowBuffer;
+
+namespace android {
+namespace renderengine {
+
+class Image {
+public:
+ virtual ~Image() = default;
+ virtual bool setNativeWindowBuffer(ANativeWindowBuffer* buffer, bool isProtected) = 0;
+};
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/LayerSettings.h b/media/libstagefright/renderfright/include/renderengine/LayerSettings.h
new file mode 100644
index 0000000..95e9367
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/LayerSettings.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iosfwd>
+
+#include <math/mat4.h>
+#include <math/vec3.h>
+#include <renderengine/Texture.h>
+#include <ui/Fence.h>
+#include <ui/FloatRect.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Rect.h>
+#include <ui/Region.h>
+#include <ui/Transform.h>
+
+namespace android {
+namespace renderengine {
+
+// Metadata describing the input buffer to render from.
+struct Buffer {
+ // Buffer containing the image that we will render.
+ // If buffer == nullptr, then the rest of the fields in this struct will be
+ // ignored.
+ sp<GraphicBuffer> buffer = nullptr;
+
+ // Fence that will fire when the buffer is ready to be bound.
+ sp<Fence> fence = nullptr;
+
+ // Texture identifier to bind the external texture to.
+ // TODO(alecmouri): This is GL-specific...make the type backend-agnostic.
+ uint32_t textureName = 0;
+
+ // Whether to use filtering when rendering the texture.
+ bool useTextureFiltering = false;
+
+ // Transform matrix to apply to texture coordinates.
+ mat4 textureTransform = mat4();
+
+ // Whether to use pre-multiplied alpha.
+ bool usePremultipliedAlpha = true;
+
+ // Override flag that alpha for each pixel in the buffer *must* be 1.0.
+ // LayerSettings::alpha is still used if isOpaque==true - this flag only
+ // overrides the alpha channel of the buffer.
+ bool isOpaque = false;
+
+ // HDR color-space setting for Y410.
+ bool isY410BT2020 = false;
+ float maxMasteringLuminance = 0.0;
+ float maxContentLuminance = 0.0;
+};
+
+// Metadata describing the layer geometry.
+struct Geometry {
+ // Boundaries of the layer.
+ FloatRect boundaries = FloatRect();
+
+ // Transform matrix to apply to mesh coordinates.
+ mat4 positionTransform = mat4();
+
+ // Radius of rounded corners, if greater than 0. Otherwise, this layer's
+ // corners are not rounded.
+ // Having corner radius will force GPU composition on the layer and its children, drawing it
+ // with a special shader. The shader will receive the radius and the crop rectangle as input,
+ // modifying the opacity of the destination texture, multiplying it by a number between 0 and 1.
+ // We query Layer#getRoundedCornerState() to retrieve the radius as well as the rounded crop
+ // rectangle to figure out how to apply the radius for this layer. The crop rectangle will be
+ // in local layer coordinate space, so we have to take the layer transform into account when
+ // walking up the tree.
+ float roundedCornersRadius = 0.0;
+
+ // Rectangle within which corners will be rounded.
+ FloatRect roundedCornersCrop = FloatRect();
+};
+
+// Descriptor of the source pixels for this layer.
+struct PixelSource {
+ // Source buffer
+ Buffer buffer = Buffer();
+
+ // The solid color with which to fill the layer.
+ // This should only be populated if we don't render from an application
+ // buffer.
+ half3 solidColor = half3(0.0f, 0.0f, 0.0f);
+};
+
+/*
+ * Contains the configuration for the shadows drawn by single layer. Shadow follows
+ * material design guidelines.
+ */
+struct ShadowSettings {
+ // Color to the ambient shadow. The alpha is premultiplied.
+ vec4 ambientColor = vec4();
+
+ // Color to the spot shadow. The alpha is premultiplied. The position of the spot shadow
+ // depends on the light position.
+ vec4 spotColor = vec4();
+
+ // Position of the light source used to cast the spot shadow.
+ vec3 lightPos = vec3();
+
+ // Radius of the spot light source. Smaller radius will have sharper edges,
+ // larger radius will have softer shadows
+ float lightRadius = 0.f;
+
+ // Length of the cast shadow. If length is <= 0.f no shadows will be drawn.
+ float length = 0.f;
+
+ // If true fill in the casting layer is translucent and the shadow needs to fill the bounds.
+ // Otherwise the shadow will only be drawn around the edges of the casting layer.
+ bool casterIsTranslucent = false;
+};
+
+// The settings that RenderEngine requires for correctly rendering a Layer.
+struct LayerSettings {
+ // Geometry information
+ Geometry geometry = Geometry();
+
+ // Source pixels for this layer.
+ PixelSource source = PixelSource();
+
+ // Alpha option to blend with the source pixels
+ half alpha = half(0.0);
+
+ // Color space describing how the source pixels should be interpreted.
+ ui::Dataspace sourceDataspace = ui::Dataspace::UNKNOWN;
+
+ // Additional layer-specific color transform to be applied before the global
+ // transform.
+ mat4 colorTransform = mat4();
+
+ // True if blending will be forced to be disabled.
+ bool disableBlending = false;
+
+ ShadowSettings shadow;
+
+ int backgroundBlurRadius = 0;
+};
+
+// Keep in sync with custom comparison function in
+// compositionengine/impl/ClientCompositionRequestCache.cpp
+static inline bool operator==(const Buffer& lhs, const Buffer& rhs) {
+ return lhs.buffer == rhs.buffer && lhs.fence == rhs.fence &&
+ lhs.textureName == rhs.textureName &&
+ lhs.useTextureFiltering == rhs.useTextureFiltering &&
+ lhs.textureTransform == rhs.textureTransform &&
+ lhs.usePremultipliedAlpha == rhs.usePremultipliedAlpha &&
+ lhs.isOpaque == rhs.isOpaque && lhs.isY410BT2020 == rhs.isY410BT2020 &&
+ lhs.maxMasteringLuminance == rhs.maxMasteringLuminance &&
+ lhs.maxContentLuminance == rhs.maxContentLuminance;
+}
+
+static inline bool operator==(const Geometry& lhs, const Geometry& rhs) {
+ return lhs.boundaries == rhs.boundaries && lhs.positionTransform == rhs.positionTransform &&
+ lhs.roundedCornersRadius == rhs.roundedCornersRadius &&
+ lhs.roundedCornersCrop == rhs.roundedCornersCrop;
+}
+
+static inline bool operator==(const PixelSource& lhs, const PixelSource& rhs) {
+ return lhs.buffer == rhs.buffer && lhs.solidColor == rhs.solidColor;
+}
+
+static inline bool operator==(const ShadowSettings& lhs, const ShadowSettings& rhs) {
+ return lhs.ambientColor == rhs.ambientColor && lhs.spotColor == rhs.spotColor &&
+ lhs.lightPos == rhs.lightPos && lhs.lightRadius == rhs.lightRadius &&
+ lhs.length == rhs.length && lhs.casterIsTranslucent == rhs.casterIsTranslucent;
+}
+
+static inline bool operator==(const LayerSettings& lhs, const LayerSettings& rhs) {
+ return lhs.geometry == rhs.geometry && lhs.source == rhs.source && lhs.alpha == rhs.alpha &&
+ lhs.sourceDataspace == rhs.sourceDataspace &&
+ lhs.colorTransform == rhs.colorTransform &&
+ lhs.disableBlending == rhs.disableBlending && lhs.shadow == rhs.shadow &&
+ lhs.backgroundBlurRadius == rhs.backgroundBlurRadius;
+}
+
+// Defining PrintTo helps with Google Tests.
+
+static inline void PrintTo(const Buffer& settings, ::std::ostream* os) {
+ *os << "Buffer {";
+ *os << "\n .buffer = " << settings.buffer.get();
+ *os << "\n .fence = " << settings.fence.get();
+ *os << "\n .textureName = " << settings.textureName;
+ *os << "\n .useTextureFiltering = " << settings.useTextureFiltering;
+ *os << "\n .textureTransform = " << settings.textureTransform;
+ *os << "\n .usePremultipliedAlpha = " << settings.usePremultipliedAlpha;
+ *os << "\n .isOpaque = " << settings.isOpaque;
+ *os << "\n .isY410BT2020 = " << settings.isY410BT2020;
+ *os << "\n .maxMasteringLuminance = " << settings.maxMasteringLuminance;
+ *os << "\n .maxContentLuminance = " << settings.maxContentLuminance;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const Geometry& settings, ::std::ostream* os) {
+ *os << "Geometry {";
+ *os << "\n .boundaries = ";
+ PrintTo(settings.boundaries, os);
+ *os << "\n .positionTransform = " << settings.positionTransform;
+ *os << "\n .roundedCornersRadius = " << settings.roundedCornersRadius;
+ *os << "\n .roundedCornersCrop = ";
+ PrintTo(settings.roundedCornersCrop, os);
+ *os << "\n}";
+}
+
+static inline void PrintTo(const PixelSource& settings, ::std::ostream* os) {
+ *os << "PixelSource {";
+ *os << "\n .buffer = ";
+ PrintTo(settings.buffer, os);
+ *os << "\n .solidColor = " << settings.solidColor;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const ShadowSettings& settings, ::std::ostream* os) {
+ *os << "ShadowSettings {";
+ *os << "\n .ambientColor = " << settings.ambientColor;
+ *os << "\n .spotColor = " << settings.spotColor;
+ *os << "\n .lightPos = " << settings.lightPos;
+ *os << "\n .lightRadius = " << settings.lightRadius;
+ *os << "\n .length = " << settings.length;
+ *os << "\n .casterIsTranslucent = " << settings.casterIsTranslucent;
+ *os << "\n}";
+}
+
+static inline void PrintTo(const LayerSettings& settings, ::std::ostream* os) {
+ *os << "LayerSettings {";
+ *os << "\n .geometry = ";
+ PrintTo(settings.geometry, os);
+ *os << "\n .source = ";
+ PrintTo(settings.source, os);
+ *os << "\n .alpha = " << settings.alpha;
+ *os << "\n .sourceDataspace = ";
+ PrintTo(settings.sourceDataspace, os);
+ *os << "\n .colorTransform = " << settings.colorTransform;
+ *os << "\n .disableBlending = " << settings.disableBlending;
+ *os << "\n .backgroundBlurRadius = " << settings.backgroundBlurRadius;
+ *os << "\n .shadow = ";
+ PrintTo(settings.shadow, os);
+ *os << "\n}";
+}
+
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/Mesh.h b/media/libstagefright/renderfright/include/renderengine/Mesh.h
new file mode 100644
index 0000000..167f13f
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Mesh.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_MESH_H
+#define SF_RENDER_ENGINE_MESH_H
+
+#include <vector>
+
+#include <stdint.h>
+
+namespace android {
+namespace renderengine {
+
+class Mesh {
+public:
+ class Builder;
+
+ enum Primitive {
+ TRIANGLES = 0x0004, // GL_TRIANGLES
+ TRIANGLE_STRIP = 0x0005, // GL_TRIANGLE_STRIP
+ TRIANGLE_FAN = 0x0006 // GL_TRIANGLE_FAN
+ };
+
+ ~Mesh() = default;
+
+ /*
+ * VertexArray handles the stride automatically.
+ */
+ template <typename TYPE>
+ class VertexArray {
+ friend class Mesh;
+ float* mData;
+ size_t mStride;
+ size_t mOffset = 0;
+ VertexArray(float* data, size_t stride) : mData(data), mStride(stride) {}
+
+ public:
+ // Returns a vertex array at an offset so its easier to append attributes from
+ // multiple sources.
+ VertexArray(VertexArray<TYPE>& other, size_t offset)
+ : mData(other.mData), mStride(other.mStride), mOffset(offset) {}
+
+ TYPE& operator[](size_t index) {
+ return *reinterpret_cast<TYPE*>(&mData[(index + mOffset) * mStride]);
+ }
+ TYPE const& operator[](size_t index) const {
+ return *reinterpret_cast<TYPE const*>(&mData[(index + mOffset) * mStride]);
+ }
+ };
+
+ template <typename TYPE>
+ VertexArray<TYPE> getPositionArray() {
+ return VertexArray<TYPE>(getPositions(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getTexCoordArray() {
+ return VertexArray<TYPE>(getTexCoords(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getCropCoordArray() {
+ return VertexArray<TYPE>(getCropCoords(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getShadowColorArray() {
+ return VertexArray<TYPE>(getShadowColor(), mStride);
+ }
+
+ template <typename TYPE>
+ VertexArray<TYPE> getShadowParamsArray() {
+ return VertexArray<TYPE>(getShadowParams(), mStride);
+ }
+
+ uint16_t* getIndicesArray() { return getIndices(); }
+
+ Primitive getPrimitive() const;
+
+ // returns a pointer to the vertices positions
+ float const* getPositions() const;
+
+ // returns a pointer to the vertices texture coordinates
+ float const* getTexCoords() const;
+
+ // returns a pointer to the vertices crop coordinates
+ float const* getCropCoords() const;
+
+ // returns a pointer to colors
+ float const* getShadowColor() const;
+
+ // returns a pointer to the shadow params
+ float const* getShadowParams() const;
+
+ // returns a pointer to indices
+ uint16_t const* getIndices() const;
+
+ // number of vertices in this mesh
+ size_t getVertexCount() const;
+
+ // dimension of vertices
+ size_t getVertexSize() const;
+
+ // dimension of texture coordinates
+ size_t getTexCoordsSize() const;
+
+ size_t getShadowParamsSize() const;
+
+ size_t getShadowColorSize() const;
+
+ size_t getIndexCount() const;
+
+ // return stride in bytes
+ size_t getByteStride() const;
+
+ // return stride in floats
+ size_t getStride() const;
+
+private:
+ Mesh(Primitive primitive, size_t vertexCount, size_t vertexSize, size_t texCoordSize,
+ size_t cropCoordsSize, size_t shadowColorSize, size_t shadowParamsSize, size_t indexCount);
+ Mesh(const Mesh&);
+ Mesh& operator=(const Mesh&);
+ Mesh const& operator=(const Mesh&) const;
+
+ float* getPositions();
+ float* getTexCoords();
+ float* getCropCoords();
+ float* getShadowColor();
+ float* getShadowParams();
+ uint16_t* getIndices();
+
+ std::vector<float> mVertices;
+ size_t mVertexCount;
+ size_t mVertexSize;
+ size_t mTexCoordsSize;
+ size_t mCropCoordsSize;
+ size_t mShadowColorSize;
+ size_t mShadowParamsSize;
+ size_t mStride;
+ Primitive mPrimitive;
+ std::vector<uint16_t> mIndices;
+ size_t mIndexCount;
+};
+
+class Mesh::Builder {
+public:
+ Builder& setPrimitive(Primitive primitive) {
+ mPrimitive = primitive;
+ return *this;
+ };
+ Builder& setVertices(size_t vertexCount, size_t vertexSize) {
+ mVertexCount = vertexCount;
+ mVertexSize = vertexSize;
+ return *this;
+ };
+ Builder& setTexCoords(size_t texCoordsSize) {
+ mTexCoordsSize = texCoordsSize;
+ return *this;
+ };
+ Builder& setCropCoords(size_t cropCoordsSize) {
+ mCropCoordsSize = cropCoordsSize;
+ return *this;
+ };
+ Builder& setShadowAttrs() {
+ mShadowParamsSize = 3;
+ mShadowColorSize = 4;
+ return *this;
+ };
+ Builder& setIndices(size_t indexCount) {
+ mIndexCount = indexCount;
+ return *this;
+ };
+ Mesh build() const {
+ return Mesh{mPrimitive, mVertexCount, mVertexSize, mTexCoordsSize,
+ mCropCoordsSize, mShadowColorSize, mShadowParamsSize, mIndexCount};
+ }
+
+private:
+ size_t mVertexCount = 0;
+ size_t mVertexSize = 0;
+ size_t mTexCoordsSize = 0;
+ size_t mCropCoordsSize = 0;
+ size_t mShadowColorSize = 0;
+ size_t mShadowParamsSize = 0;
+ size_t mIndexCount = 0;
+ Primitive mPrimitive;
+};
+
+} // namespace renderengine
+} // namespace android
+#endif /* SF_RENDER_ENGINE_MESH_H */
diff --git a/media/libstagefright/renderfright/include/renderengine/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
new file mode 100644
index 0000000..09a0f65
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/RenderEngine.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDERENGINE_H_
+#define SF_RENDERENGINE_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <memory>
+
+#include <android-base/unique_fd.h>
+#include <math/mat4.h>
+#include <renderengine/DisplaySettings.h>
+#include <renderengine/Framebuffer.h>
+#include <renderengine/Image.h>
+#include <renderengine/LayerSettings.h>
+#include <ui/GraphicTypes.h>
+#include <ui/Transform.h>
+
+/**
+ * Allows to set RenderEngine backend to GLES (default) or Vulkan (NOT yet supported).
+ */
+#define PROPERTY_DEBUG_RENDERENGINE_BACKEND "debug.renderengine.backend"
+
+struct ANativeWindowBuffer;
+
+namespace android {
+
+class Rect;
+class Region;
+
+namespace renderengine {
+
+class BindNativeBufferAsFramebuffer;
+class Image;
+class Mesh;
+class Texture;
+struct RenderEngineCreationArgs;
+
+namespace threaded {
+class RenderEngineThreaded;
+}
+
+namespace impl {
+class RenderEngine;
+}
+
+enum class Protection {
+ UNPROTECTED = 1,
+ PROTECTED = 2,
+};
+
+class RenderEngine {
+public:
+ enum class ContextPriority {
+ LOW = 1,
+ MEDIUM = 2,
+ HIGH = 3,
+ };
+
+ enum class RenderEngineType {
+ GLES = 1,
+ THREADED = 2,
+ };
+
+ static std::unique_ptr<RenderEngine> create(const RenderEngineCreationArgs& args);
+
+ virtual ~RenderEngine() = 0;
+
+ // ----- BEGIN DEPRECATED INTERFACE -----
+ // This interface, while still in use until a suitable replacement is built,
+ // should be considered deprecated, minus some methods which still may be
+ // used to support legacy behavior.
+ virtual void primeCache() const = 0;
+
+ // dump the extension strings. always call the base class.
+ virtual void dump(std::string& result) = 0;
+
+ virtual bool useNativeFenceSync() const = 0;
+ virtual bool useWaitSync() const = 0;
+ virtual void genTextures(size_t count, uint32_t* names) = 0;
+ virtual void deleteTextures(size_t count, uint32_t const* names) = 0;
+ virtual void bindExternalTextureImage(uint32_t texName, const Image& image) = 0;
+ // Legacy public method used by devices that don't support native fence
+ // synchronization in their GPU driver, as this method provides implicit
+ // synchronization for latching buffers.
+ virtual status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) = 0;
+ // Caches Image resources for this buffer, but does not bind the buffer to
+ // a particular texture.
+ // Note that work is deferred to an additional thread, i.e. this call
+ // is made asynchronously, but the caller can expect that cache/unbind calls
+ // are performed in a manner that's conflict serializable, i.e. unbinding
+ // a buffer should never occur before binding the buffer if the caller
+ // called {bind, cache}ExternalTextureBuffer before calling unbind.
+ virtual void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) = 0;
+ // Removes internal resources referenced by the bufferId. This method should be
+ // invoked when the caller will no longer hold a reference to a GraphicBuffer
+ // and needs to clean up its resources.
+ // Note that work is deferred to an additional thread, i.e. this call
+ // is made asynchronously, but the caller can expect that cache/unbind calls
+ // are performed in a manner that's conflict serializable, i.e. unbinding
+ // a buffer should never occur before binding the buffer if the caller
+ // called {bind, cache}ExternalTextureBuffer before calling unbind.
+ virtual void unbindExternalTextureBuffer(uint64_t bufferId) = 0;
+ // When binding a native buffer, it must be done before setViewportAndProjection
+ // Returns NO_ERROR when binds successfully, NO_MEMORY when there's no memory for allocation.
+ virtual status_t bindFrameBuffer(Framebuffer* framebuffer) = 0;
+ virtual void unbindFrameBuffer(Framebuffer* framebuffer) = 0;
+
+ enum class CleanupMode {
+ CLEAN_OUTPUT_RESOURCES,
+ CLEAN_ALL,
+ };
+ // Clean-up method that should be called on the main thread after the
+ // drawFence returned by drawLayers fires. This method will free up
+ // resources used by the most recently drawn frame. If the frame is still
+ // being drawn, then this call is silently ignored.
+ //
+ // If mode is CLEAN_OUTPUT_RESOURCES, then only resources related to the
+ // output framebuffer are cleaned up, including the sibling texture.
+ //
+ // If mode is CLEAN_ALL, then we also cleanup resources related to any input
+ // buffers.
+ //
+ // Returns true if resources were cleaned up, and false if we didn't need to
+ // do any work.
+ virtual bool cleanupPostRender(CleanupMode mode = CleanupMode::CLEAN_OUTPUT_RESOURCES) = 0;
+
+ // queries
+ virtual size_t getMaxTextureSize() const = 0;
+ virtual size_t getMaxViewportDims() const = 0;
+
+ // ----- END DEPRECATED INTERFACE -----
+
+ // ----- BEGIN NEW INTERFACE -----
+
+ virtual bool isProtected() const = 0;
+ virtual bool supportsProtectedContent() const = 0;
+ virtual bool useProtectedContext(bool useProtectedContext) = 0;
+
+ // Renders layers for a particular display via GPU composition. This method
+ // should be called for every display that needs to be rendered via the GPU.
+ // @param display The display-wide settings that should be applied prior to
+ // drawing any layers.
+ //
+ // Assumptions when calling this method:
+ // 1. There is exactly one caller - i.e. multi-threading is not supported.
+ // 2. Additional threads may be calling the {bind,cache}ExternalTexture
+ // methods above. But the main thread is responsible for holding resources
+ // such that Image destruction does not occur while this method is called.
+ //
+ // TODO(b/136806342): This should behavior should ideally be fixed since
+ // the above two assumptions are brittle, as conditional thread safetyness
+ // may be insufficient when maximizing rendering performance in the future.
+ //
+ // @param layers The layers to draw onto the display, in Z-order.
+ // @param buffer The buffer which will be drawn to. This buffer will be
+ // ready once drawFence fires.
+ // @param useFramebufferCache True if the framebuffer cache should be used.
+ // If an implementation does not cache output framebuffers, then this
+ // parameter does nothing.
+ // @param bufferFence Fence signalling that the buffer is ready to be drawn
+ // to.
+ // @param drawFence A pointer to a fence, which will fire when the buffer
+ // has been drawn to and is ready to be examined. The fence will be
+ // initialized by this method. The caller will be responsible for owning the
+ // fence.
+ // @return An error code indicating whether drawing was successful. For
+ // now, this always returns NO_ERROR.
+ virtual status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) = 0;
+
+protected:
+ // Gets a framebuffer to render to. This framebuffer may or may not be
+ // cached depending on the implementation.
+ //
+ // Note that this method does not transfer ownership, so the caller most not
+ // live longer than RenderEngine.
+ virtual Framebuffer* getFramebufferForDrawing() = 0;
+ friend class BindNativeBufferAsFramebuffer;
+ friend class threaded::RenderEngineThreaded;
+};
+
+struct RenderEngineCreationArgs {
+ int pixelFormat;
+ uint32_t imageCacheSize;
+ bool useColorManagement;
+ bool enableProtectedContext;
+ bool precacheToneMapperShaderOnly;
+ bool supportsBackgroundBlur;
+ RenderEngine::ContextPriority contextPriority;
+ RenderEngine::RenderEngineType renderEngineType;
+
+ struct Builder;
+
+private:
+ // must be created by Builder via constructor with full argument list
+ RenderEngineCreationArgs(int _pixelFormat, uint32_t _imageCacheSize, bool _useColorManagement,
+ bool _enableProtectedContext, bool _precacheToneMapperShaderOnly,
+ bool _supportsBackgroundBlur,
+ RenderEngine::ContextPriority _contextPriority,
+ RenderEngine::RenderEngineType _renderEngineType)
+ : pixelFormat(_pixelFormat),
+ imageCacheSize(_imageCacheSize),
+ useColorManagement(_useColorManagement),
+ enableProtectedContext(_enableProtectedContext),
+ precacheToneMapperShaderOnly(_precacheToneMapperShaderOnly),
+ supportsBackgroundBlur(_supportsBackgroundBlur),
+ contextPriority(_contextPriority),
+ renderEngineType(_renderEngineType) {}
+ RenderEngineCreationArgs() = delete;
+};
+
+struct RenderEngineCreationArgs::Builder {
+ Builder() {}
+
+ Builder& setPixelFormat(int pixelFormat) {
+ this->pixelFormat = pixelFormat;
+ return *this;
+ }
+ Builder& setImageCacheSize(uint32_t imageCacheSize) {
+ this->imageCacheSize = imageCacheSize;
+ return *this;
+ }
+ Builder& setUseColorManagerment(bool useColorManagement) {
+ this->useColorManagement = useColorManagement;
+ return *this;
+ }
+ Builder& setEnableProtectedContext(bool enableProtectedContext) {
+ this->enableProtectedContext = enableProtectedContext;
+ return *this;
+ }
+ Builder& setPrecacheToneMapperShaderOnly(bool precacheToneMapperShaderOnly) {
+ this->precacheToneMapperShaderOnly = precacheToneMapperShaderOnly;
+ return *this;
+ }
+ Builder& setSupportsBackgroundBlur(bool supportsBackgroundBlur) {
+ this->supportsBackgroundBlur = supportsBackgroundBlur;
+ return *this;
+ }
+ Builder& setContextPriority(RenderEngine::ContextPriority contextPriority) {
+ this->contextPriority = contextPriority;
+ return *this;
+ }
+ Builder& setRenderEngineType(RenderEngine::RenderEngineType renderEngineType) {
+ this->renderEngineType = renderEngineType;
+ return *this;
+ }
+ RenderEngineCreationArgs build() const {
+ return RenderEngineCreationArgs(pixelFormat, imageCacheSize, useColorManagement,
+ enableProtectedContext, precacheToneMapperShaderOnly,
+ supportsBackgroundBlur, contextPriority, renderEngineType);
+ }
+
+private:
+ // 1 means RGBA_8888
+ int pixelFormat = 1;
+ uint32_t imageCacheSize = 0;
+ bool useColorManagement = true;
+ bool enableProtectedContext = false;
+ bool precacheToneMapperShaderOnly = false;
+ bool supportsBackgroundBlur = false;
+ RenderEngine::ContextPriority contextPriority = RenderEngine::ContextPriority::MEDIUM;
+ RenderEngine::RenderEngineType renderEngineType = RenderEngine::RenderEngineType::GLES;
+};
+
+class BindNativeBufferAsFramebuffer {
+public:
+ BindNativeBufferAsFramebuffer(RenderEngine& engine, ANativeWindowBuffer* buffer,
+ const bool useFramebufferCache)
+ : mEngine(engine), mFramebuffer(mEngine.getFramebufferForDrawing()), mStatus(NO_ERROR) {
+ mStatus = mFramebuffer->setNativeWindowBuffer(buffer, mEngine.isProtected(),
+ useFramebufferCache)
+ ? mEngine.bindFrameBuffer(mFramebuffer)
+ : NO_MEMORY;
+ }
+ ~BindNativeBufferAsFramebuffer() {
+ mFramebuffer->setNativeWindowBuffer(nullptr, false, /*arbitrary*/ true);
+ mEngine.unbindFrameBuffer(mFramebuffer);
+ }
+ status_t getStatus() const { return mStatus; }
+
+private:
+ RenderEngine& mEngine;
+ Framebuffer* mFramebuffer;
+ status_t mStatus;
+};
+
+namespace impl {
+
+// impl::RenderEngine contains common implementation that is graphics back-end agnostic.
+class RenderEngine : public renderengine::RenderEngine {
+public:
+ virtual ~RenderEngine() = 0;
+
+ bool useNativeFenceSync() const override;
+ bool useWaitSync() const override;
+
+protected:
+ RenderEngine(const RenderEngineCreationArgs& args);
+ const RenderEngineCreationArgs mArgs;
+};
+
+} // namespace impl
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDERENGINE_H_ */
diff --git a/media/libstagefright/renderfright/include/renderengine/Texture.h b/media/libstagefright/renderfright/include/renderengine/Texture.h
new file mode 100644
index 0000000..c69ace0
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/Texture.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_TEXTURE_H
+#define SF_RENDER_ENGINE_TEXTURE_H
+
+#include <stdint.h>
+
+#include <math/mat4.h>
+
+namespace android {
+namespace renderengine {
+
+class Texture {
+public:
+ enum Target { TEXTURE_2D = 0x0DE1, TEXTURE_EXTERNAL = 0x8D65 };
+
+ Texture();
+ Texture(Target textureTarget, uint32_t textureName);
+ ~Texture();
+
+ void init(Target textureTarget, uint32_t textureName);
+
+ void setMatrix(float const* matrix);
+ void setFiltering(bool enabled);
+ void setDimensions(size_t width, size_t height);
+
+ uint32_t getTextureName() const;
+ uint32_t getTextureTarget() const;
+
+ const mat4& getMatrix() const;
+ bool getFiltering() const;
+ size_t getWidth() const;
+ size_t getHeight() const;
+
+private:
+ uint32_t mTextureName;
+ uint32_t mTextureTarget;
+ size_t mWidth;
+ size_t mHeight;
+ bool mFiltering;
+ mat4 mTextureMatrix;
+};
+
+} // namespace renderengine
+} // namespace android
+#endif /* SF_RENDER_ENGINE_TEXTURE_H */
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h b/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h
new file mode 100644
index 0000000..dfb6a4e
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/Framebuffer.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/Framebuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class Framebuffer : public renderengine::Framebuffer {
+public:
+ Framebuffer();
+ ~Framebuffer() override;
+
+ MOCK_METHOD3(setNativeWindowBuffer, bool(ANativeWindowBuffer*, bool, const bool));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/Image.h b/media/libstagefright/renderfright/include/renderengine/mock/Image.h
new file mode 100644
index 0000000..2b0eed1
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/Image.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/Image.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class Image : public renderengine::Image {
+public:
+ Image();
+ ~Image() override;
+
+ MOCK_METHOD2(setNativeWindowBuffer, bool(ANativeWindowBuffer* buffer, bool isProtected));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h b/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h
new file mode 100644
index 0000000..e03dd58
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/mock/RenderEngine.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include <renderengine/DisplaySettings.h>
+#include <renderengine/LayerSettings.h>
+#include <renderengine/Mesh.h>
+#include <renderengine/RenderEngine.h>
+#include <renderengine/Texture.h>
+#include <ui/Fence.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/Region.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+class RenderEngine : public renderengine::RenderEngine {
+public:
+ RenderEngine();
+ ~RenderEngine() override;
+
+ MOCK_METHOD0(getFramebufferForDrawing, Framebuffer*());
+ MOCK_CONST_METHOD0(primeCache, void());
+ MOCK_METHOD1(dump, void(std::string&));
+ MOCK_CONST_METHOD0(useNativeFenceSync, bool());
+ MOCK_CONST_METHOD0(useWaitSync, bool());
+ MOCK_CONST_METHOD0(isCurrent, bool());
+ MOCK_METHOD2(genTextures, void(size_t, uint32_t*));
+ MOCK_METHOD2(deleteTextures, void(size_t, uint32_t const*));
+ MOCK_METHOD2(bindExternalTextureImage, void(uint32_t, const renderengine::Image&));
+ MOCK_METHOD1(cacheExternalTextureBuffer, void(const sp<GraphicBuffer>&));
+ MOCK_METHOD3(bindExternalTextureBuffer,
+ status_t(uint32_t, const sp<GraphicBuffer>&, const sp<Fence>&));
+ MOCK_METHOD1(unbindExternalTextureBuffer, void(uint64_t));
+ MOCK_METHOD1(bindFrameBuffer, status_t(renderengine::Framebuffer*));
+ MOCK_METHOD1(unbindFrameBuffer, void(renderengine::Framebuffer*));
+ MOCK_METHOD1(drawMesh, void(const renderengine::Mesh&));
+ MOCK_CONST_METHOD0(getMaxTextureSize, size_t());
+ MOCK_CONST_METHOD0(getMaxViewportDims, size_t());
+ MOCK_CONST_METHOD0(isProtected, bool());
+ MOCK_CONST_METHOD0(supportsProtectedContent, bool());
+ MOCK_METHOD1(useProtectedContext, bool(bool));
+ MOCK_METHOD1(cleanupPostRender, bool(CleanupMode mode));
+ MOCK_METHOD6(drawLayers,
+ status_t(const DisplaySettings&, const std::vector<const LayerSettings*>&,
+ const sp<GraphicBuffer>&, const bool, base::unique_fd&&,
+ base::unique_fd*));
+};
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/include/renderengine/private/Description.h b/media/libstagefright/renderfright/include/renderengine/private/Description.h
new file mode 100644
index 0000000..a62161a
--- /dev/null
+++ b/media/libstagefright/renderfright/include/renderengine/private/Description.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SF_RENDER_ENGINE_DESCRIPTION_H_
+#define SF_RENDER_ENGINE_DESCRIPTION_H_
+
+#include <renderengine/Texture.h>
+#include <ui/GraphicTypes.h>
+
+namespace android {
+namespace renderengine {
+
+/*
+ * This is the structure that holds the state of the rendering engine.
+ * This class is used to generate a corresponding GLSL program and set the
+ * appropriate uniform.
+ */
+struct Description {
+ enum class TransferFunction : int {
+ LINEAR,
+ SRGB,
+ ST2084,
+ HLG, // Hybrid Log-Gamma for HDR.
+ };
+
+ static TransferFunction dataSpaceToTransferFunction(ui::Dataspace dataSpace);
+
+ Description() = default;
+ ~Description() = default;
+
+ bool hasInputTransformMatrix() const;
+ bool hasOutputTransformMatrix() const;
+ bool hasColorMatrix() const;
+
+ // whether textures are premultiplied
+ bool isPremultipliedAlpha = false;
+ // whether this layer is marked as opaque
+ bool isOpaque = true;
+
+ // corner radius of the layer
+ float cornerRadius = 0;
+
+ // Size of the rounded rectangle we are cropping to
+ half2 cropSize;
+
+ // Texture this layer uses
+ Texture texture;
+ bool textureEnabled = false;
+
+ // color used when texturing is disabled or when setting alpha.
+ half4 color;
+
+ // true if the sampled pixel values are in Y410/BT2020 rather than RGBA
+ bool isY410BT2020 = false;
+
+ // transfer functions for the input/output
+ TransferFunction inputTransferFunction = TransferFunction::LINEAR;
+ TransferFunction outputTransferFunction = TransferFunction::LINEAR;
+
+ float displayMaxLuminance;
+ float maxMasteringLuminance;
+ float maxContentLuminance;
+
+ // projection matrix
+ mat4 projectionMatrix;
+
+ // The color matrix will be applied in linear space right before OETF.
+ mat4 colorMatrix;
+ mat4 inputTransformMatrix;
+ mat4 outputTransformMatrix;
+
+ // True if this layer will draw a shadow.
+ bool drawShadows = false;
+};
+
+} // namespace renderengine
+} // namespace android
+
+#endif /* SF_RENDER_ENGINE_DESCRIPTION_H_ */
diff --git a/media/libstagefright/renderfright/mock/Framebuffer.cpp b/media/libstagefright/renderfright/mock/Framebuffer.cpp
new file mode 100644
index 0000000..fbdcaab
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/Framebuffer.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/Framebuffer.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+Framebuffer::Framebuffer() = default;
+Framebuffer::~Framebuffer() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/mock/Image.cpp b/media/libstagefright/renderfright/mock/Image.cpp
new file mode 100644
index 0000000..57f4346
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/Image.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/Image.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+Image::Image() = default;
+Image::~Image() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/mock/RenderEngine.cpp b/media/libstagefright/renderfright/mock/RenderEngine.cpp
new file mode 100644
index 0000000..261636d
--- /dev/null
+++ b/media/libstagefright/renderfright/mock/RenderEngine.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <renderengine/mock/RenderEngine.h>
+
+namespace android {
+namespace renderengine {
+namespace mock {
+
+// The Google Mock documentation recommends explicit non-header instantiations
+// for better compile time performance.
+RenderEngine::RenderEngine() = default;
+RenderEngine::~RenderEngine() = default;
+
+} // namespace mock
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/tests/Android.bp b/media/libstagefright/renderfright/tests/Android.bp
new file mode 100644
index 0000000..9fee646
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/Android.bp
@@ -0,0 +1,41 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_test {
+ name: "librenderfright_test",
+ defaults: ["surfaceflinger_defaults"],
+ test_suites: ["device-tests"],
+ srcs: [
+ "RenderEngineTest.cpp",
+ "RenderEngineThreadedTest.cpp",
+ ],
+ static_libs: [
+ "libgmock",
+ "librenderfright",
+ "librenderfright_mocks",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libEGL",
+ "libGLESv2",
+ "libgui",
+ "liblog",
+ "libnativewindow",
+ "libprocessgroup",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+}
diff --git a/media/libstagefright/renderfright/tests/RenderEngineTest.cpp b/media/libstagefright/renderfright/tests/RenderEngineTest.cpp
new file mode 100644
index 0000000..730f606
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/RenderEngineTest.cpp
@@ -0,0 +1,1469 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// TODO(b/129481165): remove the #pragma below and fix conversion issues
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wconversion"
+
+#include <chrono>
+#include <condition_variable>
+#include <fstream>
+
+#include <cutils/properties.h>
+#include <gtest/gtest.h>
+#include <renderengine/RenderEngine.h>
+#include <sync/sync.h>
+#include <ui/PixelFormat.h>
+#include "../gl/GLESRenderEngine.h"
+#include "../threaded/RenderEngineThreaded.h"
+
+constexpr int DEFAULT_DISPLAY_WIDTH = 128;
+constexpr int DEFAULT_DISPLAY_HEIGHT = 256;
+constexpr int DEFAULT_DISPLAY_OFFSET = 64;
+constexpr bool WRITE_BUFFER_TO_FILE_ON_FAILURE = false;
+
+namespace android {
+
+struct RenderEngineTest : public ::testing::Test {
+ static void SetUpTestSuite() {
+ sRE = renderengine::gl::GLESRenderEngine::create(
+ renderengine::RenderEngineCreationArgs::Builder()
+ .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
+ .setImageCacheSize(1)
+ .setUseColorManagerment(false)
+ .setEnableProtectedContext(false)
+ .setPrecacheToneMapperShaderOnly(false)
+ .setSupportsBackgroundBlur(true)
+ .setContextPriority(renderengine::RenderEngine::ContextPriority::MEDIUM)
+ .setRenderEngineType(renderengine::RenderEngine::RenderEngineType::GLES)
+ .build());
+ }
+
+ static void TearDownTestSuite() {
+ // The ordering here is important - sCurrentBuffer must live longer
+ // than RenderEngine to avoid a null reference on tear-down.
+ sRE = nullptr;
+ sCurrentBuffer = nullptr;
+ }
+
+ static sp<GraphicBuffer> allocateDefaultBuffer() {
+ return new GraphicBuffer(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT,
+ HAL_PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_RENDER,
+ "output");
+ }
+
+ // Allocates a 1x1 buffer to fill with a solid color
+ static sp<GraphicBuffer> allocateSourceBuffer(uint32_t width, uint32_t height) {
+ return new GraphicBuffer(width, height, HAL_PIXEL_FORMAT_RGBA_8888, 1,
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_TEXTURE,
+ "input");
+ }
+
+ RenderEngineTest() { mBuffer = allocateDefaultBuffer(); }
+
+ ~RenderEngineTest() {
+ if (WRITE_BUFFER_TO_FILE_ON_FAILURE && ::testing::Test::HasFailure()) {
+ writeBufferToFile("/data/texture_out_");
+ }
+ for (uint32_t texName : mTexNames) {
+ sRE->deleteTextures(1, &texName);
+ EXPECT_FALSE(sRE->isTextureNameKnownForTesting(texName));
+ }
+ }
+
+ void writeBufferToFile(const char* basename) {
+ std::string filename(basename);
+ filename.append(::testing::UnitTest::GetInstance()->current_test_info()->name());
+ filename.append(".ppm");
+ std::ofstream file(filename.c_str(), std::ios::binary);
+ if (!file.is_open()) {
+ ALOGE("Unable to open file: %s", filename.c_str());
+ ALOGE("You may need to do: \"adb shell setenforce 0\" to enable "
+ "surfaceflinger to write debug images");
+ return;
+ }
+
+ uint8_t* pixels;
+ mBuffer->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+
+ file << "P6\n";
+ file << mBuffer->getWidth() << "\n";
+ file << mBuffer->getHeight() << "\n";
+ file << 255 << "\n";
+
+ std::vector<uint8_t> outBuffer(mBuffer->getWidth() * mBuffer->getHeight() * 3);
+ auto outPtr = reinterpret_cast<uint8_t*>(outBuffer.data());
+
+ for (int32_t j = 0; j < mBuffer->getHeight(); j++) {
+ const uint8_t* src = pixels + (mBuffer->getStride() * j) * 4;
+ for (int32_t i = 0; i < mBuffer->getWidth(); i++) {
+ // Only copy R, G and B components
+ outPtr[0] = src[0];
+ outPtr[1] = src[1];
+ outPtr[2] = src[2];
+ outPtr += 3;
+
+ src += 4;
+ }
+ }
+ file.write(reinterpret_cast<char*>(outBuffer.data()), outBuffer.size());
+ mBuffer->unlock();
+ }
+
+ void expectBufferColor(const Region& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a) {
+ size_t c;
+ Rect const* rect = region.getArray(&c);
+ for (size_t i = 0; i < c; i++, rect++) {
+ expectBufferColor(*rect, r, g, b, a);
+ }
+ }
+
+ void expectBufferColor(const Rect& rect, uint8_t r, uint8_t g, uint8_t b, uint8_t a,
+ uint8_t tolerance = 0) {
+ auto colorCompare = [tolerance](const uint8_t* colorA, const uint8_t* colorB) {
+ auto colorBitCompare = [tolerance](uint8_t a, uint8_t b) {
+ uint8_t tmp = a >= b ? a - b : b - a;
+ return tmp <= tolerance;
+ };
+ return std::equal(colorA, colorA + 4, colorB, colorBitCompare);
+ };
+
+ expectBufferColor(rect, r, g, b, a, colorCompare);
+ }
+
+ void expectBufferColor(const Rect& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a,
+ std::function<bool(const uint8_t* a, const uint8_t* b)> colorCompare) {
+ uint8_t* pixels;
+ mBuffer->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ int32_t maxFails = 10;
+ int32_t fails = 0;
+ for (int32_t j = 0; j < region.getHeight(); j++) {
+ const uint8_t* src =
+ pixels + (mBuffer->getStride() * (region.top + j) + region.left) * 4;
+ for (int32_t i = 0; i < region.getWidth(); i++) {
+ const uint8_t expected[4] = {r, g, b, a};
+ bool equal = colorCompare(src, expected);
+ EXPECT_TRUE(equal)
+ << "pixel @ (" << region.left + i << ", " << region.top + j << "): "
+ << "expected (" << static_cast<uint32_t>(r) << ", "
+ << static_cast<uint32_t>(g) << ", " << static_cast<uint32_t>(b) << ", "
+ << static_cast<uint32_t>(a) << "), "
+ << "got (" << static_cast<uint32_t>(src[0]) << ", "
+ << static_cast<uint32_t>(src[1]) << ", " << static_cast<uint32_t>(src[2])
+ << ", " << static_cast<uint32_t>(src[3]) << ")";
+ src += 4;
+ if (!equal && ++fails >= maxFails) {
+ break;
+ }
+ }
+ if (fails >= maxFails) {
+ break;
+ }
+ }
+ mBuffer->unlock();
+ }
+
+ void expectAlpha(const Rect& rect, uint8_t a) {
+ auto colorCompare = [](const uint8_t* colorA, const uint8_t* colorB) {
+ return colorA[3] == colorB[3];
+ };
+ expectBufferColor(rect, 0.0f /* r */, 0.0f /*g */, 0.0f /* b */, a, colorCompare);
+ }
+
+ void expectShadowColor(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow, const ubyte4& casterColor,
+ const ubyte4& backgroundColor) {
+ const Rect casterRect(castingLayer.geometry.boundaries);
+ Region casterRegion = Region(casterRect);
+ const float casterCornerRadius = castingLayer.geometry.roundedCornersRadius;
+ if (casterCornerRadius > 0.0f) {
+ // ignore the corners if a corner radius is set
+ Rect cornerRect(casterCornerRadius, casterCornerRadius);
+ casterRegion.subtractSelf(cornerRect.offsetTo(casterRect.left, casterRect.top));
+ casterRegion.subtractSelf(
+ cornerRect.offsetTo(casterRect.right - casterCornerRadius, casterRect.top));
+ casterRegion.subtractSelf(
+ cornerRect.offsetTo(casterRect.left, casterRect.bottom - casterCornerRadius));
+ casterRegion.subtractSelf(cornerRect.offsetTo(casterRect.right - casterCornerRadius,
+ casterRect.bottom - casterCornerRadius));
+ }
+
+ const float shadowInset = shadow.length * -1.0f;
+ const Rect casterWithShadow =
+ Rect(casterRect).inset(shadowInset, shadowInset, shadowInset, shadowInset);
+ const Region shadowRegion = Region(casterWithShadow).subtractSelf(casterRect);
+ const Region backgroundRegion = Region(fullscreenRect()).subtractSelf(casterWithShadow);
+
+ // verify casting layer
+ expectBufferColor(casterRegion, casterColor.r, casterColor.g, casterColor.b, casterColor.a);
+
+ // verify shadows by testing just the alpha since its difficult to validate the shadow color
+ size_t c;
+ Rect const* r = shadowRegion.getArray(&c);
+ for (size_t i = 0; i < c; i++, r++) {
+ expectAlpha(*r, 255);
+ }
+
+ // verify background
+ expectBufferColor(backgroundRegion, backgroundColor.r, backgroundColor.g, backgroundColor.b,
+ backgroundColor.a);
+ }
+
+ static renderengine::ShadowSettings getShadowSettings(const vec2& casterPos, float shadowLength,
+ bool casterIsTranslucent) {
+ renderengine::ShadowSettings shadow;
+ shadow.ambientColor = {0.0f, 0.0f, 0.0f, 0.039f};
+ shadow.spotColor = {0.0f, 0.0f, 0.0f, 0.19f};
+ shadow.lightPos = vec3(casterPos.x, casterPos.y, 0);
+ shadow.lightRadius = 0.0f;
+ shadow.length = shadowLength;
+ shadow.casterIsTranslucent = casterIsTranslucent;
+ return shadow;
+ }
+
+ static Rect fullscreenRect() { return Rect(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT); }
+
+ static Rect offsetRect() {
+ return Rect(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT);
+ }
+
+ static Rect offsetRectAtZero() {
+ return Rect(DEFAULT_DISPLAY_WIDTH - DEFAULT_DISPLAY_OFFSET,
+ DEFAULT_DISPLAY_HEIGHT - DEFAULT_DISPLAY_OFFSET);
+ }
+
+ void invokeDraw(renderengine::DisplaySettings settings,
+ std::vector<const renderengine::LayerSettings*> layers,
+ sp<GraphicBuffer> buffer) {
+ base::unique_fd fence;
+ status_t status =
+ sRE->drawLayers(settings, layers, buffer, true, base::unique_fd(), &fence);
+ sCurrentBuffer = buffer;
+
+ int fd = fence.release();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ close(fd);
+ }
+
+ ASSERT_EQ(NO_ERROR, status);
+ if (layers.size() > 0) {
+ ASSERT_TRUE(sRE->isFramebufferImageCachedForTesting(buffer->getId()));
+ }
+ }
+
+ void drawEmptyLayers() {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ // Meaningless buffer since we don't do any drawing
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ invokeDraw(settings, layers, buffer);
+ }
+
+ template <typename SourceVariant>
+ void fillBuffer(half r, half g, half b, half a);
+
+ template <typename SourceVariant>
+ void fillRedBuffer();
+
+ template <typename SourceVariant>
+ void fillGreenBuffer();
+
+ template <typename SourceVariant>
+ void fillBlueBuffer();
+
+ template <typename SourceVariant>
+ void fillRedTransparentBuffer();
+
+ template <typename SourceVariant>
+ void fillRedOffsetBuffer();
+
+ template <typename SourceVariant>
+ void fillBufferPhysicalOffset();
+
+ template <typename SourceVariant>
+ void fillBufferCheckers(uint32_t rotation);
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate0();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate90();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate180();
+
+ template <typename SourceVariant>
+ void fillBufferCheckersRotate270();
+
+ template <typename SourceVariant>
+ void fillBufferWithLayerTransform();
+
+ template <typename SourceVariant>
+ void fillBufferLayerTransform();
+
+ template <typename SourceVariant>
+ void fillBufferWithColorTransform();
+
+ template <typename SourceVariant>
+ void fillBufferColorTransform();
+
+ template <typename SourceVariant>
+ void fillRedBufferWithRoundedCorners();
+
+ template <typename SourceVariant>
+ void fillBufferWithRoundedCorners();
+
+ template <typename SourceVariant>
+ void fillBufferAndBlurBackground();
+
+ template <typename SourceVariant>
+ void overlayCorners();
+
+ void fillRedBufferTextureTransform();
+
+ void fillBufferTextureTransform();
+
+ void fillRedBufferWithPremultiplyAlpha();
+
+ void fillBufferWithPremultiplyAlpha();
+
+ void fillRedBufferWithoutPremultiplyAlpha();
+
+ void fillBufferWithoutPremultiplyAlpha();
+
+ void fillGreenColorBufferThenClearRegion();
+
+ void clearLeftRegion();
+
+ void clearRegion();
+
+ template <typename SourceVariant>
+ void drawShadow(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow, const ubyte4& casterColor,
+ const ubyte4& backgroundColor);
+
+ // Keep around the same renderengine object to save on initialization time.
+ // For now, exercise the GL backend directly so that some caching specifics
+ // can be tested without changing the interface.
+ static std::unique_ptr<renderengine::gl::GLESRenderEngine> sRE;
+ // Hack to avoid NPE in the EGL driver: the GraphicBuffer needs to
+ // be freed *after* RenderEngine is destroyed, so that the EGL image is
+ // destroyed first.
+ static sp<GraphicBuffer> sCurrentBuffer;
+
+ sp<GraphicBuffer> mBuffer;
+
+ std::vector<uint32_t> mTexNames;
+};
+
+std::unique_ptr<renderengine::gl::GLESRenderEngine> RenderEngineTest::sRE = nullptr;
+sp<GraphicBuffer> RenderEngineTest::sCurrentBuffer = nullptr;
+
+struct ColorSourceVariant {
+ static void fillColor(renderengine::LayerSettings& layer, half r, half g, half b,
+ RenderEngineTest* /*fixture*/) {
+ layer.source.solidColor = half3(r, g, b);
+ }
+};
+
+struct RelaxOpaqueBufferVariant {
+ static void setOpaqueBit(renderengine::LayerSettings& layer) {
+ layer.source.buffer.isOpaque = false;
+ }
+
+ static uint8_t getAlphaChannel() { return 255; }
+};
+
+struct ForceOpaqueBufferVariant {
+ static void setOpaqueBit(renderengine::LayerSettings& layer) {
+ layer.source.buffer.isOpaque = true;
+ }
+
+ static uint8_t getAlphaChannel() {
+ // The isOpaque bit will override the alpha channel, so this should be
+ // arbitrary.
+ return 10;
+ }
+};
+
+template <typename OpaquenessVariant>
+struct BufferSourceVariant {
+ static void fillColor(renderengine::LayerSettings& layer, half r, half g, half b,
+ RenderEngineTest* fixture) {
+ sp<GraphicBuffer> buf = RenderEngineTest::allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ fixture->sRE->genTextures(1, &texName);
+ fixture->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+
+ for (int32_t j = 0; j < buf->getHeight(); j++) {
+ uint8_t* iter = pixels + (buf->getStride() * j) * 4;
+ for (int32_t i = 0; i < buf->getWidth(); i++) {
+ iter[0] = uint8_t(r * 255);
+ iter[1] = uint8_t(g * 255);
+ iter[2] = uint8_t(b * 255);
+ iter[3] = OpaquenessVariant::getAlphaChannel();
+ iter += 4;
+ }
+ }
+
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ OpaquenessVariant::setOpaqueBit(layer);
+ }
+};
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBuffer(half r, half g, half b, half a) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(layer, r, g, b, this);
+ layer.alpha = a;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedBuffer() {
+ fillBuffer<SourceVariant>(1.0f, 0.0f, 0.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillGreenBuffer() {
+ fillBuffer<SourceVariant>(0.0f, 1.0f, 0.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 0, 255, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBlueBuffer() {
+ fillBuffer<SourceVariant>(0.0f, 0.0f, 1.0f, 1.0f);
+ expectBufferColor(fullscreenRect(), 0, 0, 255, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedTransparentBuffer() {
+ fillBuffer<SourceVariant>(1.0f, 0.0f, 0.0f, .2f);
+ expectBufferColor(fullscreenRect(), 51, 0, 0, 51);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedOffsetBuffer() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = offsetRect();
+ settings.clip = offsetRectAtZero();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = offsetRectAtZero().toFloatRect();
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferPhysicalOffset() {
+ fillRedOffsetBuffer<SourceVariant>();
+
+ expectBufferColor(Rect(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+ Rect offsetRegionLeft(DEFAULT_DISPLAY_OFFSET, DEFAULT_DISPLAY_HEIGHT);
+ Rect offsetRegionTop(DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_OFFSET);
+
+ expectBufferColor(offsetRegionLeft, 0, 0, 0, 0);
+ expectBufferColor(offsetRegionTop, 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckers(uint32_t orientationFlag) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 2x2
+ settings.clip = Rect(2, 2);
+ settings.orientation = orientationFlag;
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layerOne;
+ Rect rectOne(0, 0, 1, 1);
+ layerOne.geometry.boundaries = rectOne.toFloatRect();
+ SourceVariant::fillColor(layerOne, 1.0f, 0.0f, 0.0f, this);
+ layerOne.alpha = 1.0f;
+
+ renderengine::LayerSettings layerTwo;
+ Rect rectTwo(0, 1, 1, 2);
+ layerTwo.geometry.boundaries = rectTwo.toFloatRect();
+ SourceVariant::fillColor(layerTwo, 0.0f, 1.0f, 0.0f, this);
+ layerTwo.alpha = 1.0f;
+
+ renderengine::LayerSettings layerThree;
+ Rect rectThree(1, 0, 2, 1);
+ layerThree.geometry.boundaries = rectThree.toFloatRect();
+ SourceVariant::fillColor(layerThree, 0.0f, 0.0f, 1.0f, this);
+ layerThree.alpha = 1.0f;
+
+ layers.push_back(&layerOne);
+ layers.push_back(&layerTwo);
+ layers.push_back(&layerThree);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate0() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_0);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 255, 0, 0,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 0, 255, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate90() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_90);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 255, 0,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 255, 0, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 255, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate180() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_180);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 0,
+ 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 255, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 255, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferCheckersRotate270() {
+ fillBufferCheckers<SourceVariant>(ui::Transform::ROT_270);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 255,
+ 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT / 2),
+ 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT / 2, DEFAULT_DISPLAY_WIDTH / 2,
+ DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithLayerTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 2x2
+ settings.clip = Rect(2, 2);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+ // Translate one pixel diagonally
+ layer.geometry.positionTransform = mat4(1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1);
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.source.solidColor = half3(1.0f, 0.0f, 0.0f);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferLayerTransform() {
+ fillBufferWithLayerTransform<SourceVariant>();
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT / 2), 0, 0, 0, 0);
+ expectBufferColor(Rect(0, 0, DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT / 2,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 255, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithColorTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+ SourceVariant::fillColor(layer, 0.5f, 0.25f, 0.125f, this);
+ layer.alpha = 1.0f;
+
+ // construct a fake color matrix
+ // annihilate green and blue channels
+ settings.colorTransform = mat4::scale(vec4(1, 0, 0, 1));
+ // set red channel to red + green
+ layer.colorTransform = mat4(1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1);
+
+ layer.alpha = 1.0f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferColorTransform() {
+ fillBufferWithColorTransform<SourceVariant>();
+ expectBufferColor(fullscreenRect(), 191, 0, 0, 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillRedBufferWithRoundedCorners() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ layer.geometry.roundedCornersRadius = 5.0f;
+ layer.geometry.roundedCornersCrop = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0f;
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferWithRoundedCorners() {
+ fillRedBufferWithRoundedCorners<SourceVariant>();
+ // Corners should be ignored...
+ expectBufferColor(Rect(0, 0, 1, 1), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH - 1, 0, DEFAULT_DISPLAY_WIDTH, 1), 0, 0, 0, 0);
+ expectBufferColor(Rect(0, DEFAULT_DISPLAY_HEIGHT - 1, 1, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH - 1, DEFAULT_DISPLAY_HEIGHT - 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+ // ...And the non-rounded portion should be red.
+ // Other pixels may be anti-aliased, so let's not check those.
+ expectBufferColor(Rect(5, 5, DEFAULT_DISPLAY_WIDTH - 5, DEFAULT_DISPLAY_HEIGHT - 5), 255, 0, 0,
+ 255);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::fillBufferAndBlurBackground() {
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.surface_flinger.supports_background_blur", value, "0");
+ if (!atoi(value)) {
+ // This device doesn't support blurs, no-op.
+ return;
+ }
+
+ auto blurRadius = 50;
+ auto center = DEFAULT_DISPLAY_WIDTH / 2;
+
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings backgroundLayer;
+ backgroundLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ SourceVariant::fillColor(backgroundLayer, 0.0f, 1.0f, 0.0f, this);
+ backgroundLayer.alpha = 1.0f;
+ layers.push_back(&backgroundLayer);
+
+ renderengine::LayerSettings leftLayer;
+ leftLayer.geometry.boundaries =
+ Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT).toFloatRect();
+ SourceVariant::fillColor(leftLayer, 1.0f, 0.0f, 0.0f, this);
+ leftLayer.alpha = 1.0f;
+ layers.push_back(&leftLayer);
+
+ renderengine::LayerSettings blurLayer;
+ blurLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ blurLayer.backgroundBlurRadius = blurRadius;
+ blurLayer.alpha = 0;
+ layers.push_back(&blurLayer);
+
+ invokeDraw(settings, layers, mBuffer);
+
+ expectBufferColor(Rect(center - 1, center - 5, center, center + 5), 150, 150, 0, 255,
+ 50 /* tolerance */);
+ expectBufferColor(Rect(center, center - 5, center + 1, center + 5), 150, 150, 0, 255,
+ 50 /* tolerance */);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::overlayCorners() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layersFirst;
+
+ renderengine::LayerSettings layerOne;
+ layerOne.geometry.boundaries =
+ FloatRect(0, 0, DEFAULT_DISPLAY_WIDTH / 3.0, DEFAULT_DISPLAY_HEIGHT / 3.0);
+ SourceVariant::fillColor(layerOne, 1.0f, 0.0f, 0.0f, this);
+ layerOne.alpha = 0.2;
+
+ layersFirst.push_back(&layerOne);
+ invokeDraw(settings, layersFirst, mBuffer);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3, DEFAULT_DISPLAY_HEIGHT / 3), 51, 0, 0, 51);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3 + 1, DEFAULT_DISPLAY_HEIGHT / 3 + 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+
+ std::vector<const renderengine::LayerSettings*> layersSecond;
+ renderengine::LayerSettings layerTwo;
+ layerTwo.geometry.boundaries =
+ FloatRect(DEFAULT_DISPLAY_WIDTH / 3.0, DEFAULT_DISPLAY_HEIGHT / 3.0,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT);
+ SourceVariant::fillColor(layerTwo, 0.0f, 1.0f, 0.0f, this);
+ layerTwo.alpha = 1.0f;
+
+ layersSecond.push_back(&layerTwo);
+ invokeDraw(settings, layersSecond, mBuffer);
+
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3, DEFAULT_DISPLAY_HEIGHT / 3), 0, 0, 0, 0);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 3 + 1, DEFAULT_DISPLAY_HEIGHT / 3 + 1,
+ DEFAULT_DISPLAY_WIDTH, DEFAULT_DISPLAY_HEIGHT),
+ 0, 255, 0, 255);
+}
+
+void RenderEngineTest::fillRedBufferTextureTransform() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ // Here will allocate a checker board texture, but transform texture
+ // coordinates so that only the upper left is applied.
+ sp<GraphicBuffer> buf = allocateSourceBuffer(2, 2);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ // Red top left, Green top right, Blue bottom left, Black bottom right
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ pixels[4] = 0;
+ pixels[5] = 255;
+ pixels[6] = 0;
+ pixels[7] = 255;
+ pixels[8] = 0;
+ pixels[9] = 0;
+ pixels[10] = 255;
+ pixels[11] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ // Transform coordinates to only be inside the red quadrant.
+ layer.source.buffer.textureTransform = mat4::scale(vec4(0.2, 0.2, 1, 1));
+ layer.alpha = 1.0f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferTextureTransform() {
+ fillRedBufferTextureTransform();
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+void RenderEngineTest::fillRedBufferWithPremultiplyAlpha() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 1x1
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ layer.source.buffer.usePremultipliedAlpha = true;
+ layer.alpha = 0.5f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferWithPremultiplyAlpha() {
+ fillRedBufferWithPremultiplyAlpha();
+ expectBufferColor(fullscreenRect(), 128, 0, 0, 128);
+}
+
+void RenderEngineTest::fillRedBufferWithoutPremultiplyAlpha() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 1x1
+ settings.clip = Rect(1, 1);
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ RenderEngineTest::sRE->genTextures(1, &texName);
+ this->mTexNames.push_back(texName);
+
+ uint8_t* pixels;
+ buf->lock(GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN,
+ reinterpret_cast<void**>(&pixels));
+ pixels[0] = 255;
+ pixels[1] = 0;
+ pixels[2] = 0;
+ pixels[3] = 255;
+ buf->unlock();
+
+ layer.source.buffer.buffer = buf;
+ layer.source.buffer.textureName = texName;
+ layer.source.buffer.usePremultipliedAlpha = false;
+ layer.alpha = 0.5f;
+ layer.geometry.boundaries = Rect(1, 1).toFloatRect();
+
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::fillBufferWithoutPremultiplyAlpha() {
+ fillRedBufferWithoutPremultiplyAlpha();
+ expectBufferColor(fullscreenRect(), 128, 0, 0, 64, 1);
+}
+
+void RenderEngineTest::clearLeftRegion() {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ // Here logical space is 4x4
+ settings.clip = Rect(4, 4);
+ settings.clearRegion = Region(Rect(2, 4));
+ std::vector<const renderengine::LayerSettings*> layers;
+ // fake layer, without bounds should not render anything
+ renderengine::LayerSettings layer;
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+}
+
+void RenderEngineTest::clearRegion() {
+ // Reuse mBuffer
+ clearLeftRegion();
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, DEFAULT_DISPLAY_HEIGHT), 0, 0, 0, 255);
+ expectBufferColor(Rect(DEFAULT_DISPLAY_WIDTH / 2, 0, DEFAULT_DISPLAY_WIDTH,
+ DEFAULT_DISPLAY_HEIGHT),
+ 0, 0, 0, 0);
+}
+
+template <typename SourceVariant>
+void RenderEngineTest::drawShadow(const renderengine::LayerSettings& castingLayer,
+ const renderengine::ShadowSettings& shadow,
+ const ubyte4& casterColor, const ubyte4& backgroundColor) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ // add background layer
+ renderengine::LayerSettings bgLayer;
+ bgLayer.geometry.boundaries = fullscreenRect().toFloatRect();
+ ColorSourceVariant::fillColor(bgLayer, backgroundColor.r / 255.0f, backgroundColor.g / 255.0f,
+ backgroundColor.b / 255.0f, this);
+ bgLayer.alpha = backgroundColor.a / 255.0f;
+ layers.push_back(&bgLayer);
+
+ // add shadow layer
+ renderengine::LayerSettings shadowLayer;
+ shadowLayer.geometry.boundaries = castingLayer.geometry.boundaries;
+ shadowLayer.alpha = castingLayer.alpha;
+ shadowLayer.shadow = shadow;
+ layers.push_back(&shadowLayer);
+
+ // add layer casting the shadow
+ renderengine::LayerSettings layer = castingLayer;
+ SourceVariant::fillColor(layer, casterColor.r / 255.0f, casterColor.g / 255.0f,
+ casterColor.b / 255.0f, this);
+ layers.push_back(&layer);
+
+ invokeDraw(settings, layers, mBuffer);
+}
+
+TEST_F(RenderEngineTest, drawLayers_noLayersToDraw) {
+ drawEmptyLayers();
+}
+
+TEST_F(RenderEngineTest, drawLayers_nullOutputBuffer) {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layers.push_back(&layer);
+ base::unique_fd fence;
+ status_t status = sRE->drawLayers(settings, layers, nullptr, true, base::unique_fd(), &fence);
+
+ ASSERT_EQ(BAD_VALUE, status);
+}
+
+TEST_F(RenderEngineTest, drawLayers_nullOutputFence) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ status_t status = sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), nullptr);
+ sCurrentBuffer = mBuffer;
+ ASSERT_EQ(NO_ERROR, status);
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+TEST_F(RenderEngineTest, drawLayers_doesNotCacheFramebuffer) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ status_t status = sRE->drawLayers(settings, layers, mBuffer, false, base::unique_fd(), nullptr);
+ sCurrentBuffer = mBuffer;
+ ASSERT_EQ(NO_ERROR, status);
+ ASSERT_FALSE(sRE->isFramebufferImageCachedForTesting(mBuffer->getId()));
+ expectBufferColor(fullscreenRect(), 255, 0, 0, 255);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_colorSource) {
+ fillRedBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_colorSource) {
+ fillGreenBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_colorSource) {
+ fillBlueBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_colorSource) {
+ fillRedTransparentBuffer<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_colorSource) {
+ fillBufferPhysicalOffset<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_colorSource) {
+ fillBufferCheckersRotate0<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_colorSource) {
+ fillBufferCheckersRotate90<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_colorSource) {
+ fillBufferCheckersRotate180<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_colorSource) {
+ fillBufferCheckersRotate270<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_colorSource) {
+ fillBufferLayerTransform<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_colorSource) {
+ fillBufferLayerTransform<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_colorSource) {
+ fillBufferWithRoundedCorners<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_colorSource) {
+ fillBufferAndBlurBackground<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_colorSource) {
+ overlayCorners<ColorSourceVariant>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_opaqueBufferSource) {
+ fillRedBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_opaqueBufferSource) {
+ fillGreenBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_opaqueBufferSource) {
+ fillBlueBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_opaqueBufferSource) {
+ fillRedTransparentBuffer<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_opaqueBufferSource) {
+ fillBufferPhysicalOffset<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_opaqueBufferSource) {
+ fillBufferCheckersRotate0<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_opaqueBufferSource) {
+ fillBufferCheckersRotate90<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_opaqueBufferSource) {
+ fillBufferCheckersRotate180<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_opaqueBufferSource) {
+ fillBufferCheckersRotate270<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_opaqueBufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_opaqueBufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_opaqueBufferSource) {
+ fillBufferWithRoundedCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_opaqueBufferSource) {
+ fillBufferAndBlurBackground<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_opaqueBufferSource) {
+ overlayCorners<BufferSourceVariant<ForceOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedBuffer_bufferSource) {
+ fillRedBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillGreenBuffer_bufferSource) {
+ fillGreenBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBlueBuffer_bufferSource) {
+ fillBlueBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillRedTransparentBuffer_bufferSource) {
+ fillRedTransparentBuffer<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferPhysicalOffset_bufferSource) {
+ fillBufferPhysicalOffset<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate0_bufferSource) {
+ fillBufferCheckersRotate0<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate90_bufferSource) {
+ fillBufferCheckersRotate90<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate180_bufferSource) {
+ fillBufferCheckersRotate180<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferCheckersRotate270_bufferSource) {
+ fillBufferCheckersRotate270<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferLayerTransform_bufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferColorTransform_bufferSource) {
+ fillBufferLayerTransform<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferRoundedCorners_bufferSource) {
+ fillBufferWithRoundedCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferAndBlurBackground_bufferSource) {
+ fillBufferAndBlurBackground<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_overlayCorners_bufferSource) {
+ overlayCorners<BufferSourceVariant<RelaxOpaqueBufferVariant>>();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBufferTextureTransform) {
+ fillBufferTextureTransform();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBuffer_premultipliesAlpha) {
+ fillBufferWithPremultiplyAlpha();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillBuffer_withoutPremultiplyingAlpha) {
+ fillBufferWithoutPremultiplyAlpha();
+}
+
+TEST_F(RenderEngineTest, drawLayers_clearRegion) {
+ clearRegion();
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillsBufferAndCachesImages) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+
+ layers.push_back(&layer);
+ invokeDraw(settings, layers, mBuffer);
+ uint64_t bufferId = layer.source.buffer.buffer->getId();
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->unbindExternalTextureBufferForTesting(bufferId);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+}
+
+TEST_F(RenderEngineTest, bindExternalBuffer_withNullBuffer) {
+ status_t result = sRE->bindExternalTextureBuffer(0, nullptr, nullptr);
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineTest, bindExternalBuffer_cachesImages) {
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint32_t texName;
+ sRE->genTextures(1, &texName);
+ mTexNames.push_back(texName);
+
+ sRE->bindExternalTextureBuffer(texName, buf, nullptr);
+ uint64_t bufferId = buf->getId();
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->unbindExternalTextureBufferForTesting(bufferId);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+}
+
+TEST_F(RenderEngineTest, cacheExternalBuffer_withNullBuffer) {
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->cacheExternalTextureBufferForTesting(nullptr);
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_TRUE(barrier->isOpen);
+ EXPECT_EQ(BAD_VALUE, barrier->result);
+}
+
+TEST_F(RenderEngineTest, cacheExternalBuffer_cachesImages) {
+ sp<GraphicBuffer> buf = allocateSourceBuffer(1, 1);
+ uint64_t bufferId = buf->getId();
+ std::shared_ptr<renderengine::gl::ImageManager::Barrier> barrier =
+ sRE->cacheExternalTextureBufferForTesting(buf);
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ }
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ barrier = sRE->unbindExternalTextureBufferForTesting(bufferId);
+ {
+ std::lock_guard<std::mutex> lock(barrier->mutex);
+ ASSERT_TRUE(barrier->condition.wait_for(barrier->mutex, std::chrono::seconds(5),
+ [&]() REQUIRES(barrier->mutex) {
+ return barrier->isOpen;
+ }));
+ EXPECT_EQ(NO_ERROR, barrier->result);
+ }
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterLayerMinSize) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(1, 1);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<ColorSourceVariant>(castingLayer, settings, casterColor, backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterColorLayer) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<ColorSourceVariant>(castingLayer, settings, casterColor, backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterOpaqueBufferLayer) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<ForceOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_casterWithRoundedCorner) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.geometry.roundedCornersRadius = 3.0f;
+ castingLayer.geometry.roundedCornersCrop = casterBounds.toFloatRect();
+ castingLayer.alpha = 1.0f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ false /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<ForceOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+ expectShadowColor(castingLayer, settings, casterColor, backgroundColor);
+}
+
+TEST_F(RenderEngineTest, drawLayers_fillShadow_translucentCasterWithAlpha) {
+ const ubyte4 casterColor(255, 0, 0, 255);
+ const ubyte4 backgroundColor(255, 255, 255, 255);
+ const float shadowLength = 5.0f;
+ Rect casterBounds(DEFAULT_DISPLAY_WIDTH / 3.0f, DEFAULT_DISPLAY_HEIGHT / 3.0f);
+ casterBounds.offsetBy(shadowLength + 1, shadowLength + 1);
+ renderengine::LayerSettings castingLayer;
+ castingLayer.geometry.boundaries = casterBounds.toFloatRect();
+ castingLayer.alpha = 0.5f;
+ renderengine::ShadowSettings settings =
+ getShadowSettings(vec2(casterBounds.left, casterBounds.top), shadowLength,
+ true /* casterIsTranslucent */);
+
+ drawShadow<BufferSourceVariant<RelaxOpaqueBufferVariant>>(castingLayer, settings, casterColor,
+ backgroundColor);
+
+ // verify only the background since the shadow will draw behind the caster
+ const float shadowInset = settings.length * -1.0f;
+ const Rect casterWithShadow =
+ Rect(casterBounds).inset(shadowInset, shadowInset, shadowInset, shadowInset);
+ const Region backgroundRegion = Region(fullscreenRect()).subtractSelf(casterWithShadow);
+ expectBufferColor(backgroundRegion, backgroundColor.r, backgroundColor.g, backgroundColor.b,
+ backgroundColor.a);
+}
+
+TEST_F(RenderEngineTest, cleanupPostRender_cleansUpOnce) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ base::unique_fd fenceOne;
+ sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), &fenceOne);
+ base::unique_fd fenceTwo;
+ sRE->drawLayers(settings, layers, mBuffer, true, std::move(fenceOne), &fenceTwo);
+
+ const int fd = fenceTwo.get();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ }
+ // Only cleanup the first time.
+ EXPECT_TRUE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+ EXPECT_FALSE(sRE->cleanupPostRender(
+ renderengine::RenderEngine::CleanupMode::CLEAN_OUTPUT_RESOURCES));
+}
+
+TEST_F(RenderEngineTest, cleanupPostRender_whenCleaningAll_replacesTextureMemory) {
+ renderengine::DisplaySettings settings;
+ settings.physicalDisplay = fullscreenRect();
+ settings.clip = fullscreenRect();
+
+ std::vector<const renderengine::LayerSettings*> layers;
+ renderengine::LayerSettings layer;
+ layer.geometry.boundaries = fullscreenRect().toFloatRect();
+ BufferSourceVariant<ForceOpaqueBufferVariant>::fillColor(layer, 1.0f, 0.0f, 0.0f, this);
+ layer.alpha = 1.0;
+ layers.push_back(&layer);
+
+ base::unique_fd fence;
+ sRE->drawLayers(settings, layers, mBuffer, true, base::unique_fd(), &fence);
+
+ const int fd = fence.get();
+ if (fd >= 0) {
+ sync_wait(fd, -1);
+ }
+
+ uint64_t bufferId = layer.source.buffer.buffer->getId();
+ uint32_t texName = layer.source.buffer.textureName;
+ EXPECT_TRUE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(bufferId, sRE->getBufferIdForTextureNameForTesting(texName));
+
+ EXPECT_TRUE(sRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL));
+
+ // Now check that our view of memory is good.
+ EXPECT_FALSE(sRE->isImageCachedForTesting(bufferId));
+ EXPECT_EQ(std::nullopt, sRE->getBufferIdForTextureNameForTesting(bufferId));
+ EXPECT_TRUE(sRE->isTextureNameKnownForTesting(texName));
+}
+
+} // namespace android
+
+// TODO(b/129481165): remove the #pragma below and fix conversion issues
+#pragma clang diagnostic pop // ignored "-Wconversion"
diff --git a/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp b/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp
new file mode 100644
index 0000000..97c7442
--- /dev/null
+++ b/media/libstagefright/renderfright/tests/RenderEngineThreadedTest.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cutils/properties.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <renderengine/mock/RenderEngine.h>
+#include "../threaded/RenderEngineThreaded.h"
+
+namespace android {
+
+using testing::_;
+using testing::Eq;
+using testing::Mock;
+using testing::Return;
+
+struct RenderEngineThreadedTest : public ::testing::Test {
+ ~RenderEngineThreadedTest() {}
+
+ void SetUp() override {
+ mThreadedRE = renderengine::threaded::RenderEngineThreaded::create(
+ [this]() { return std::unique_ptr<renderengine::RenderEngine>(mRenderEngine); });
+ }
+
+ std::unique_ptr<renderengine::threaded::RenderEngineThreaded> mThreadedRE;
+ renderengine::mock::RenderEngine* mRenderEngine = new renderengine::mock::RenderEngine();
+};
+
+TEST_F(RenderEngineThreadedTest, dump) {
+ std::string testString = "XYZ";
+ EXPECT_CALL(*mRenderEngine, dump(_));
+ mThreadedRE->dump(testString);
+}
+
+TEST_F(RenderEngineThreadedTest, primeCache) {
+ EXPECT_CALL(*mRenderEngine, primeCache());
+ mThreadedRE->primeCache();
+}
+
+TEST_F(RenderEngineThreadedTest, genTextures) {
+ uint32_t texName;
+ EXPECT_CALL(*mRenderEngine, genTextures(1, &texName));
+ mThreadedRE->genTextures(1, &texName);
+}
+
+TEST_F(RenderEngineThreadedTest, deleteTextures) {
+ uint32_t texName;
+ EXPECT_CALL(*mRenderEngine, deleteTextures(1, &texName));
+ mThreadedRE->deleteTextures(1, &texName);
+}
+
+TEST_F(RenderEngineThreadedTest, bindExternalBuffer_nullptrBuffer) {
+ EXPECT_CALL(*mRenderEngine, bindExternalTextureBuffer(0, Eq(nullptr), Eq(nullptr)))
+ .WillOnce(Return(BAD_VALUE));
+ status_t result = mThreadedRE->bindExternalTextureBuffer(0, nullptr, nullptr);
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineThreadedTest, bindExternalBuffer_withBuffer) {
+ sp<GraphicBuffer> buf = new GraphicBuffer();
+ EXPECT_CALL(*mRenderEngine, bindExternalTextureBuffer(0, buf, Eq(nullptr)))
+ .WillOnce(Return(NO_ERROR));
+ status_t result = mThreadedRE->bindExternalTextureBuffer(0, buf, nullptr);
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cacheExternalTextureBuffer_nullptr) {
+ EXPECT_CALL(*mRenderEngine, cacheExternalTextureBuffer(Eq(nullptr)));
+ mThreadedRE->cacheExternalTextureBuffer(nullptr);
+}
+
+TEST_F(RenderEngineThreadedTest, cacheExternalTextureBuffer_withBuffer) {
+ sp<GraphicBuffer> buf = new GraphicBuffer();
+ EXPECT_CALL(*mRenderEngine, cacheExternalTextureBuffer(buf));
+ mThreadedRE->cacheExternalTextureBuffer(buf);
+}
+
+TEST_F(RenderEngineThreadedTest, unbindExternalTextureBuffer) {
+ EXPECT_CALL(*mRenderEngine, unbindExternalTextureBuffer(0x0));
+ mThreadedRE->unbindExternalTextureBuffer(0x0);
+}
+
+TEST_F(RenderEngineThreadedTest, bindFrameBuffer_returnsBadValue) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, bindFrameBuffer(framebuffer.get())).WillOnce(Return(BAD_VALUE));
+ status_t result = mThreadedRE->bindFrameBuffer(framebuffer.get());
+ ASSERT_EQ(BAD_VALUE, result);
+}
+
+TEST_F(RenderEngineThreadedTest, bindFrameBuffer_returnsNoError) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, bindFrameBuffer(framebuffer.get())).WillOnce(Return(NO_ERROR));
+ status_t result = mThreadedRE->bindFrameBuffer(framebuffer.get());
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+TEST_F(RenderEngineThreadedTest, unbindFrameBuffer) {
+ std::unique_ptr<renderengine::Framebuffer> framebuffer;
+ EXPECT_CALL(*mRenderEngine, unbindFrameBuffer(framebuffer.get()));
+ mThreadedRE->unbindFrameBuffer(framebuffer.get());
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxTextureSize_returns20) {
+ size_t size = 20;
+ EXPECT_CALL(*mRenderEngine, getMaxTextureSize()).WillOnce(Return(size));
+ size_t result = mThreadedRE->getMaxTextureSize();
+ ASSERT_EQ(size, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxTextureSize_returns0) {
+ size_t size = 0;
+ EXPECT_CALL(*mRenderEngine, getMaxTextureSize()).WillOnce(Return(size));
+ size_t result = mThreadedRE->getMaxTextureSize();
+ ASSERT_EQ(size, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxViewportDims_returns20) {
+ size_t dims = 20;
+ EXPECT_CALL(*mRenderEngine, getMaxViewportDims()).WillOnce(Return(dims));
+ size_t result = mThreadedRE->getMaxViewportDims();
+ ASSERT_EQ(dims, result);
+}
+
+TEST_F(RenderEngineThreadedTest, getMaxViewportDims_returns0) {
+ size_t dims = 0;
+ EXPECT_CALL(*mRenderEngine, getMaxViewportDims()).WillOnce(Return(dims));
+ size_t result = mThreadedRE->getMaxViewportDims();
+ ASSERT_EQ(dims, result);
+}
+
+TEST_F(RenderEngineThreadedTest, isProtected_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(false));
+ status_t result = mThreadedRE->isProtected();
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, isProtected_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, isProtected()).WillOnce(Return(true));
+ size_t result = mThreadedRE->isProtected();
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, supportsProtectedContent_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(false));
+ status_t result = mThreadedRE->supportsProtectedContent();
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, supportsProtectedContent_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, supportsProtectedContent()).WillOnce(Return(true));
+ status_t result = mThreadedRE->supportsProtectedContent();
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, useProtectedContext_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine, useProtectedContext(false)).WillOnce(Return(false));
+ status_t result = mThreadedRE->useProtectedContext(false);
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, useProtectedContext_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine, useProtectedContext(false)).WillOnce(Return(true));
+ status_t result = mThreadedRE->useProtectedContext(false);
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cleanupPostRender_returnsFalse) {
+ EXPECT_CALL(*mRenderEngine,
+ cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL))
+ .WillOnce(Return(false));
+ status_t result =
+ mThreadedRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
+ ASSERT_EQ(false, result);
+}
+
+TEST_F(RenderEngineThreadedTest, cleanupPostRender_returnsTrue) {
+ EXPECT_CALL(*mRenderEngine,
+ cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL))
+ .WillOnce(Return(true));
+ status_t result =
+ mThreadedRE->cleanupPostRender(renderengine::RenderEngine::CleanupMode::CLEAN_ALL);
+ ASSERT_EQ(true, result);
+}
+
+TEST_F(RenderEngineThreadedTest, drawLayers) {
+ renderengine::DisplaySettings settings;
+ std::vector<const renderengine::LayerSettings*> layers;
+ sp<GraphicBuffer> buffer = new GraphicBuffer();
+ base::unique_fd bufferFence;
+ base::unique_fd drawFence;
+
+ EXPECT_CALL(*mRenderEngine, drawLayers)
+ .WillOnce([](const renderengine::DisplaySettings&,
+ const std::vector<const renderengine::LayerSettings*>&,
+ const sp<GraphicBuffer>&, const bool, base::unique_fd&&,
+ base::unique_fd*) -> status_t { return NO_ERROR; });
+
+ status_t result = mThreadedRE->drawLayers(settings, layers, buffer, false,
+ std::move(bufferFence), &drawFence);
+ ASSERT_EQ(NO_ERROR, result);
+}
+
+} // namespace android
diff --git a/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp
new file mode 100644
index 0000000..d4184fd
--- /dev/null
+++ b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.cpp
@@ -0,0 +1,403 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include "RenderEngineThreaded.h"
+
+#include <sched.h>
+#include <chrono>
+#include <future>
+
+#include <android-base/stringprintf.h>
+#include <private/gui/SyncFeatures.h>
+#include <utils/Trace.h>
+
+#include "gl/GLESRenderEngine.h"
+
+using namespace std::chrono_literals;
+
+namespace android {
+namespace renderengine {
+namespace threaded {
+
+std::unique_ptr<RenderEngineThreaded> RenderEngineThreaded::create(CreateInstanceFactory factory) {
+ return std::make_unique<RenderEngineThreaded>(std::move(factory));
+}
+
+RenderEngineThreaded::RenderEngineThreaded(CreateInstanceFactory factory) {
+ ATRACE_CALL();
+
+ std::lock_guard lockThread(mThreadMutex);
+ mThread = std::thread(&RenderEngineThreaded::threadMain, this, factory);
+}
+
+RenderEngineThreaded::~RenderEngineThreaded() {
+ {
+ std::lock_guard lock(mThreadMutex);
+ mRunning = false;
+ mCondition.notify_one();
+ }
+
+ if (mThread.joinable()) {
+ mThread.join();
+ }
+}
+
+// NO_THREAD_SAFETY_ANALYSIS is because std::unique_lock presently lacks thread safety annotations.
+void RenderEngineThreaded::threadMain(CreateInstanceFactory factory) NO_THREAD_SAFETY_ANALYSIS {
+ ATRACE_CALL();
+
+ struct sched_param param = {0};
+ param.sched_priority = 2;
+ if (sched_setscheduler(0, SCHED_FIFO, ¶m) != 0) {
+ ALOGE("Couldn't set SCHED_FIFO");
+ }
+
+ mRenderEngine = factory();
+
+ std::unique_lock<std::mutex> lock(mThreadMutex);
+ pthread_setname_np(pthread_self(), mThreadName);
+
+ while (mRunning) {
+ if (!mFunctionCalls.empty()) {
+ auto task = mFunctionCalls.front();
+ mFunctionCalls.pop();
+ task(*mRenderEngine);
+ }
+ mCondition.wait(lock, [this]() REQUIRES(mThreadMutex) {
+ return !mRunning || !mFunctionCalls.empty();
+ });
+ }
+}
+
+void RenderEngineThreaded::primeCache() const {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::primeCache");
+ instance.primeCache();
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::dump(std::string& result) {
+ std::promise<std::string> resultPromise;
+ std::future<std::string> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &result](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::dump");
+ std::string localResult = result;
+ instance.dump(localResult);
+ resultPromise.set_value(std::move(localResult));
+ });
+ }
+ mCondition.notify_one();
+ // Note: This is an rvalue.
+ result.assign(resultFuture.get());
+}
+
+bool RenderEngineThreaded::useNativeFenceSync() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& /*instance*/) {
+ ATRACE_NAME("REThreaded::useNativeFenceSync");
+ bool returnValue = SyncFeatures::getInstance().useNativeFenceSync();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::useWaitSync() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& /*instance*/) {
+ ATRACE_NAME("REThreaded::useWaitSync");
+ bool returnValue = SyncFeatures::getInstance().useWaitSync();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::genTextures(size_t count, uint32_t* names) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, count, names](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::genTextures");
+ instance.genTextures(count, names);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::deleteTextures(size_t count, uint32_t const* names) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, count, &names](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::deleteTextures");
+ instance.deleteTextures(count, names);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::bindExternalTextureImage(uint32_t texName, const Image& image) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, texName, &image](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindExternalTextureImage");
+ instance.bindExternalTextureImage(texName, image);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+status_t RenderEngineThreaded::bindExternalTextureBuffer(uint32_t texName,
+ const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, texName, &buffer, &fence](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindExternalTextureBuffer");
+ status_t status = instance.bindExternalTextureBuffer(texName, buffer, fence);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &buffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::cacheExternalTextureBuffer");
+ instance.cacheExternalTextureBuffer(buffer);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+void RenderEngineThreaded::unbindExternalTextureBuffer(uint64_t bufferId) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &bufferId](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::unbindExternalTextureBuffer");
+ instance.unbindExternalTextureBuffer(bufferId);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+status_t RenderEngineThreaded::bindFrameBuffer(Framebuffer* framebuffer) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &framebuffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::bindFrameBuffer");
+ status_t status = instance.bindFrameBuffer(framebuffer);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+void RenderEngineThreaded::unbindFrameBuffer(Framebuffer* framebuffer) {
+ std::promise<void> resultPromise;
+ std::future<void> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &framebuffer](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::unbindFrameBuffer");
+ instance.unbindFrameBuffer(framebuffer);
+ resultPromise.set_value();
+ });
+ }
+ mCondition.notify_one();
+ resultFuture.wait();
+}
+
+size_t RenderEngineThreaded::getMaxTextureSize() const {
+ std::promise<size_t> resultPromise;
+ std::future<size_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getMaxTextureSize");
+ size_t size = instance.getMaxTextureSize();
+ resultPromise.set_value(size);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+size_t RenderEngineThreaded::getMaxViewportDims() const {
+ std::promise<size_t> resultPromise;
+ std::future<size_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getMaxViewportDims");
+ size_t size = instance.getMaxViewportDims();
+ resultPromise.set_value(size);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::isProtected() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::isProtected");
+ bool returnValue = instance.isProtected();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::supportsProtectedContent() const {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::supportsProtectedContent");
+ bool returnValue = instance.supportsProtectedContent();
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::useProtectedContext(bool useProtectedContext) {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push(
+ [&resultPromise, useProtectedContext](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::useProtectedContext");
+ bool returnValue = instance.useProtectedContext(useProtectedContext);
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+Framebuffer* RenderEngineThreaded::getFramebufferForDrawing() {
+ std::promise<Framebuffer*> resultPromise;
+ std::future<Framebuffer*> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::getFramebufferForDrawing");
+ Framebuffer* framebuffer = instance.getFramebufferForDrawing();
+ resultPromise.set_value(framebuffer);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+bool RenderEngineThreaded::cleanupPostRender(CleanupMode mode) {
+ std::promise<bool> resultPromise;
+ std::future<bool> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, mode](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::cleanupPostRender");
+ bool returnValue = instance.cleanupPostRender(mode);
+ resultPromise.set_value(returnValue);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+status_t RenderEngineThreaded::drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer,
+ const bool useFramebufferCache,
+ base::unique_fd&& bufferFence,
+ base::unique_fd* drawFence) {
+ std::promise<status_t> resultPromise;
+ std::future<status_t> resultFuture = resultPromise.get_future();
+ {
+ std::lock_guard lock(mThreadMutex);
+ mFunctionCalls.push([&resultPromise, &display, &layers, &buffer, useFramebufferCache,
+ &bufferFence, &drawFence](renderengine::RenderEngine& instance) {
+ ATRACE_NAME("REThreaded::drawLayers");
+ status_t status = instance.drawLayers(display, layers, buffer, useFramebufferCache,
+ std::move(bufferFence), drawFence);
+ resultPromise.set_value(status);
+ });
+ }
+ mCondition.notify_one();
+ return resultFuture.get();
+}
+
+} // namespace threaded
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h
new file mode 100644
index 0000000..86a49e9
--- /dev/null
+++ b/media/libstagefright/renderfright/threaded/RenderEngineThreaded.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "renderengine/RenderEngine.h"
+
+namespace android {
+namespace renderengine {
+namespace threaded {
+
+using CreateInstanceFactory = std::function<std::unique_ptr<renderengine::RenderEngine>()>;
+
+/**
+ * This class extends a basic RenderEngine class. It contains a thread. Each time a function of
+ * this class is called, we create a lambda function that is put on a queue. The main thread then
+ * executes the functions in order.
+ */
+class RenderEngineThreaded : public RenderEngine {
+public:
+ static std::unique_ptr<RenderEngineThreaded> create(CreateInstanceFactory factory);
+
+ RenderEngineThreaded(CreateInstanceFactory factory);
+ ~RenderEngineThreaded() override;
+ void primeCache() const override;
+
+ void dump(std::string& result) override;
+
+ bool useNativeFenceSync() const override;
+ bool useWaitSync() const override;
+ void genTextures(size_t count, uint32_t* names) override;
+ void deleteTextures(size_t count, uint32_t const* names) override;
+ void bindExternalTextureImage(uint32_t texName, const Image& image) override;
+ status_t bindExternalTextureBuffer(uint32_t texName, const sp<GraphicBuffer>& buffer,
+ const sp<Fence>& fence) override;
+ void cacheExternalTextureBuffer(const sp<GraphicBuffer>& buffer) override;
+ void unbindExternalTextureBuffer(uint64_t bufferId) override;
+ status_t bindFrameBuffer(Framebuffer* framebuffer) override;
+ void unbindFrameBuffer(Framebuffer* framebuffer) override;
+ size_t getMaxTextureSize() const override;
+ size_t getMaxViewportDims() const override;
+
+ bool isProtected() const override;
+ bool supportsProtectedContent() const override;
+ bool useProtectedContext(bool useProtectedContext) override;
+ bool cleanupPostRender(CleanupMode mode) override;
+
+ status_t drawLayers(const DisplaySettings& display,
+ const std::vector<const LayerSettings*>& layers,
+ const sp<GraphicBuffer>& buffer, const bool useFramebufferCache,
+ base::unique_fd&& bufferFence, base::unique_fd* drawFence) override;
+
+protected:
+ Framebuffer* getFramebufferForDrawing() override;
+
+private:
+ void threadMain(CreateInstanceFactory factory);
+
+ /* ------------------------------------------------------------------------
+ * Threading
+ */
+ const char* const mThreadName = "RenderEngineThread";
+ // Protects the creation and destruction of mThread.
+ mutable std::mutex mThreadMutex;
+ std::thread mThread GUARDED_BY(mThreadMutex);
+ bool mRunning GUARDED_BY(mThreadMutex) = true;
+ mutable std::queue<std::function<void(renderengine::RenderEngine& instance)>> mFunctionCalls
+ GUARDED_BY(mThreadMutex);
+ mutable std::condition_variable mCondition;
+
+ /* ------------------------------------------------------------------------
+ * Render Engine
+ */
+ std::unique_ptr<renderengine::RenderEngine> mRenderEngine;
+};
+} // namespace threaded
+} // namespace renderengine
+} // namespace android
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 4bc67e8..a0b66a7 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -37,12 +37,73 @@
mAccessUnitRTPTime(0),
mNextExpectedSeqNoValid(false),
mNextExpectedSeqNo(0),
- mAccessUnitDamaged(false) {
+ mAccessUnitDamaged(false),
+ mFirstIFrameProvided(false),
+ mLastIFrameProvidedAtMs(0) {
}
AAVCAssembler::~AAVCAssembler() {
}
+int32_t AAVCAssembler::addNack(
+ const sp<ARTPSource> &source) {
+ List<sp<ABuffer>> *queue = source->queue();
+ int32_t nackCount = 0;
+
+ List<sp<ABuffer> >::iterator it = queue->begin();
+
+ if (it == queue->end()) {
+ return nackCount /* 0 */;
+ }
+
+ uint16_t queueHeadSeqNum = (*it)->int32Data();
+
+ // move to the packet after which RTCP:NACK was sent.
+ for (; it != queue->end(); ++it) {
+ int32_t seqNum = (*it)->int32Data();
+ if (seqNum >= source->mHighestNackNumber) {
+ break;
+ }
+ }
+
+ int32_t nackStartAt = -1;
+
+ while (it != queue->end()) {
+ int32_t seqBeforeLast = (*it)->int32Data();
+ // increase iterator.
+ if ((++it) == queue->end()) {
+ break;
+ }
+ int32_t seqLast = (*it)->int32Data();
+
+ if ((seqLast - seqBeforeLast) < 0) {
+ ALOGD("addNack: found end of seqNum from(%d) to(%d)", seqBeforeLast, seqLast);
+ source->mHighestNackNumber = 0;
+ }
+
+ // missed packet found
+ if (seqLast > (seqBeforeLast + 1) &&
+ // we didn't send RTCP:NACK for this packet yet.
+ (seqLast - 1) > source->mHighestNackNumber) {
+ source->mHighestNackNumber = seqLast - 1;
+ nackStartAt = seqBeforeLast + 1;
+ break;
+ }
+
+ }
+
+ if (nackStartAt != -1) {
+ nackCount = source->mHighestNackNumber - nackStartAt + 1;
+ ALOGD("addNack: nackCount=%d, nackFrom=%d, nackTo=%d", nackCount,
+ nackStartAt, source->mHighestNackNumber);
+
+ uint16_t mask = (uint16_t)(0xffff) >> (16 - nackCount + 1);
+ source->setSeqNumToNACK(nackStartAt, mask, queueHeadSeqNum);
+ }
+
+ return nackCount;
+}
+
ARTPAssembler::AssemblyStatus AAVCAssembler::addNALUnit(
const sp<ARTPSource> &source) {
List<sp<ABuffer> > *queue = source->queue();
@@ -51,22 +112,62 @@
return NOT_ENOUGH_DATA;
}
+ sp<ABuffer> buffer = *queue->begin();
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ int64_t startTime = source->mFirstSysTime / 1000;
+ int64_t nowTime = ALooper::GetNowUs() / 1000;
+ int64_t playedTime = nowTime - startTime;
+ int64_t playedTimeRtp =
+ source->mFirstRtpTime + (((uint32_t)playedTime) * (source->mClockRate / 1000));
+ const uint32_t jitterTime =
+ (uint32_t)(source->mClockRate / ((float)1000 / (source->mJbTimeMs)));
+ uint32_t expiredTimeInJb = rtpTime + jitterTime;
+ bool isExpired = expiredTimeInJb <= (playedTimeRtp);
+ bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
+ bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+
+ if (mShowQueue && mShowQueueCnt < 20) {
+ showCurrentQueue(queue);
+ printNowTimeUs(startTime, nowTime, playedTime);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+ mShowQueueCnt++;
+ }
+
+ AAVCAssembler::addNack(source);
+
+ if (!isExpired) {
+ ALOGV("buffering in jitter buffer.");
+ return NOT_ENOUGH_DATA;
+ }
+
+ if (isTooLate200) {
+ ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
+ }
+
+ if (isTooLate300) {
+ ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
+ ((long long)playedTimeRtp) - expiredTimeInJb, buffer->int32Data());
+ printNowTimeUs(startTime, nowTime, playedTime);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+
+ mNextExpectedSeqNo = pickProperSeq(queue, jitterTime, playedTimeRtp);
+ }
+
if (mNextExpectedSeqNoValid) {
- List<sp<ABuffer> >::iterator it = queue->begin();
- while (it != queue->end()) {
- if ((uint32_t)(*it)->int32Data() >= mNextExpectedSeqNo) {
- break;
- }
+ int32_t size = queue->size();
+ int32_t cntRemove = deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
- it = queue->erase(it);
+ if (cntRemove > 0) {
+ source->noticeAbandonBuffer(cntRemove);
+ ALOGW("delete %d of %d buffers", cntRemove, size);
}
-
if (queue->empty()) {
return NOT_ENOUGH_DATA;
}
}
- sp<ABuffer> buffer = *queue->begin();
+ buffer = *queue->begin();
if (!mNextExpectedSeqNoValid) {
mNextExpectedSeqNoValid = true;
@@ -123,12 +224,30 @@
}
}
+void AAVCAssembler::checkIFrameProvided(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ return;
+ }
+ const uint8_t *data = buffer->data();
+ unsigned nalType = data[0] & 0x1f;
+ if (nalType == 0x5) {
+ mFirstIFrameProvided = true;
+ mLastIFrameProvidedAtMs = ALooper::GetNowUs() / 1000;
+
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ ALOGD("got First I-frame to be decoded. rtpTime=%u, size=%zu", rtpTime, buffer->size());
+ }
+}
+
void AAVCAssembler::addSingleNALUnit(const sp<ABuffer> &buffer) {
ALOGV("addSingleNALUnit of size %zu", buffer->size());
#if !LOG_NDEBUG
hexdump(buffer->data(), buffer->size());
#endif
+ checkIFrameProvided(buffer);
+
uint32_t rtpTime;
CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
@@ -216,6 +335,11 @@
size_t totalCount = 1;
bool complete = false;
+ uint32_t rtpTimeStartAt;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTimeStartAt));
+ uint32_t startSeqNo = buffer->int32Data();
+ bool pFrame = nalType == 0x1;
+
if (data[1] & 0x40) {
// Huh? End bit also set on the first buffer.
@@ -224,6 +348,8 @@
complete = true;
} else {
List<sp<ABuffer> >::iterator it = ++queue->begin();
+ int32_t connected = 1;
+ bool snapped = false;
while (it != queue->end()) {
ALOGV("sequence length %zu", totalCount);
@@ -233,26 +359,32 @@
size_t size = buffer->size();
if ((uint32_t)buffer->int32Data() != expectedSeqNo) {
- ALOGV("sequence not complete, expected seqNo %d, got %d",
- expectedSeqNo, (uint32_t)buffer->int32Data());
+ ALOGD("sequence not complete, expected seqNo %u, got %u, nalType %u",
+ expectedSeqNo, (unsigned)buffer->int32Data(), nalType);
+ snapped = true;
- return WRONG_SEQUENCE_NUMBER;
+ if (!pFrame) {
+ return WRONG_SEQUENCE_NUMBER;
+ }
}
+ if (!snapped) {
+ connected++;
+ }
+
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
if (size < 2
|| data[0] != indicator
|| (data[1] & 0x1f) != nalType
- || (data[1] & 0x80)) {
+ || (data[1] & 0x80)
+ || rtpTime != rtpTimeStartAt) {
ALOGV("Ignoring malformed FU buffer.");
// Delete the whole start of the FU.
- it = queue->begin();
- for (size_t i = 0; i <= totalCount; ++i) {
- it = queue->erase(it);
- }
-
mNextExpectedSeqNo = expectedSeqNo + 1;
+ deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
return MALFORMED_PACKET;
}
@@ -260,9 +392,17 @@
totalSize += size - 2;
++totalCount;
- expectedSeqNo = expectedSeqNo + 1;
+ expectedSeqNo = (uint32_t)buffer->int32Data() + 1;
if (data[1] & 0x40) {
+ if (pFrame && !recycleUnit(startSeqNo, expectedSeqNo,
+ connected, totalCount, 0.5f)) {
+ mNextExpectedSeqNo = expectedSeqNo;
+ deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
+
+ return MALFORMED_PACKET;
+ }
+
// This is the last fragment.
complete = true;
break;
@@ -290,6 +430,7 @@
unit->data()[0] = (nri << 5) | nalType;
size_t offset = 1;
+ int32_t cvo = -1;
List<sp<ABuffer> >::iterator it = queue->begin();
for (size_t i = 0; i < totalCount; ++i) {
const sp<ABuffer> &buffer = *it;
@@ -300,6 +441,8 @@
#endif
memcpy(unit->data() + offset, buffer->data() + 2, buffer->size() - 2);
+
+ buffer->meta()->findInt32("cvo", &cvo);
offset += buffer->size() - 2;
it = queue->erase(it);
@@ -307,6 +450,10 @@
unit->setRange(0, totalSize);
+ if (cvo >= 0) {
+ unit->meta()->setInt32("cvo", cvo);
+ }
+
addSingleNALUnit(unit);
ALOGV("successfully assembled a NAL unit from fragments.");
@@ -327,6 +474,7 @@
sp<ABuffer> accessUnit = new ABuffer(totalSize);
size_t offset = 0;
+ int32_t cvo = -1;
for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
it != mNALUnits.end(); ++it) {
memcpy(accessUnit->data() + offset, "\x00\x00\x00\x01", 4);
@@ -335,6 +483,8 @@
sp<ABuffer> nal = *it;
memcpy(accessUnit->data() + offset, nal->data(), nal->size());
offset += nal->size();
+
+ nal->meta()->findInt32("cvo", &cvo);
}
CopyTimes(accessUnit, *mNALUnits.begin());
@@ -343,6 +493,9 @@
printf(mAccessUnitDamaged ? "X" : ".");
fflush(stdout);
#endif
+ if (cvo >= 0) {
+ accessUnit->meta()->setInt32("cvo", cvo);
+ }
if (mAccessUnitDamaged) {
accessUnit->meta()->setInt32("damaged", true);
@@ -356,22 +509,78 @@
msg->post();
}
+int32_t AAVCAssembler::pickProperSeq(const Queue *queue, uint32_t jit, int64_t play) {
+ sp<ABuffer> buffer = *(queue->begin());
+ uint32_t rtpTime;
+ int32_t nextSeqNo = buffer->int32Data();
+
+ Queue::const_iterator it = queue->begin();
+ while (it != queue->end()) {
+ CHECK((*it)->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ // if pkt in time exists, that should be the next pivot
+ if (rtpTime + jit >= play) {
+ nextSeqNo = (*it)->int32Data();
+ break;
+ }
+ it++;
+ }
+ return nextSeqNo;
+}
+
+bool AAVCAssembler::recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
+ size_t avail, float goodRatio) {
+ float total = end - start;
+ float valid = connected;
+ float exist = avail;
+ bool isRecycle = (valid / total) >= goodRatio;
+
+ ALOGV("checking p-frame losses.. recvBufs %f valid %f diff %f recycle? %d",
+ exist, valid, total, isRecycle);
+
+ return isRecycle;
+}
+
+int32_t AAVCAssembler::deleteUnitUnderSeq(Queue *queue, uint32_t seq) {
+ int32_t initSize = queue->size();
+ Queue::iterator it = queue->begin();
+ while (it != queue->end()) {
+ if ((uint32_t)(*it)->int32Data() >= seq) {
+ break;
+ }
+ it++;
+ }
+ queue->erase(queue->begin(), it);
+ return initSize - queue->size();
+}
+
+inline void AAVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
+ ALOGD("start=%lld, now=%lld, played=%lld",
+ (long long)start, (long long)now, (long long)play);
+}
+
+inline void AAVCAssembler::printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp) {
+ ALOGD("rtp-time(JB)=%u, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%u isExpired=%d",
+ rtp, (long long)play, exp, isExp);
+}
+
ARTPAssembler::AssemblyStatus AAVCAssembler::assembleMore(
const sp<ARTPSource> &source) {
AssemblyStatus status = addNALUnit(source);
if (status == MALFORMED_PACKET) {
- mAccessUnitDamaged = true;
+ uint64_t msecsSinceLastIFrame = (ALooper::GetNowUs() / 1000) - mLastIFrameProvidedAtMs;
+ if (msecsSinceLastIFrame > 1000) {
+ ALOGV("request FIR to get a new I-Frame, time since "
+ "last I-Frame %llu ms", (unsigned long long)msecsSinceLastIFrame);
+ source->onIssueFIRByAssembler();
+ }
}
return status;
}
void AAVCAssembler::packetLost() {
CHECK(mNextExpectedSeqNoValid);
- ALOGV("packetLost (expected %d)", mNextExpectedSeqNo);
-
+ ALOGD("packetLost (expected %u)", mNextExpectedSeqNo);
++mNextExpectedSeqNo;
-
- mAccessUnitDamaged = true;
}
void AAVCAssembler::onByeReceived() {
diff --git a/media/libstagefright/rtsp/AAVCAssembler.h b/media/libstagefright/rtsp/AAVCAssembler.h
index e19480c..913a868 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.h
+++ b/media/libstagefright/rtsp/AAVCAssembler.h
@@ -31,6 +31,7 @@
struct AAVCAssembler : public ARTPAssembler {
explicit AAVCAssembler(const sp<AMessage> ¬ify);
+ typedef List<sp<ABuffer> > Queue;
protected:
virtual ~AAVCAssembler();
@@ -45,8 +46,12 @@
bool mNextExpectedSeqNoValid;
uint32_t mNextExpectedSeqNo;
bool mAccessUnitDamaged;
+ bool mFirstIFrameProvided;
+ uint64_t mLastIFrameProvidedAtMs;
List<sp<ABuffer> > mNALUnits;
+ int32_t addNack(const sp<ARTPSource> &source);
+ void checkIFrameProvided(const sp<ABuffer> &buffer);
AssemblyStatus addNALUnit(const sp<ARTPSource> &source);
void addSingleNALUnit(const sp<ABuffer> &buffer);
AssemblyStatus addFragmentedNALUnit(List<sp<ABuffer> > *queue);
@@ -54,6 +59,13 @@
void submitAccessUnit();
+ int32_t pickProperSeq(const Queue *q, uint32_t jit, int64_t play);
+ bool recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
+ size_t avail, float goodRatio);
+ int32_t deleteUnitUnderSeq(Queue *q, uint32_t seq);
+ void printNowTimeUs(int64_t start, int64_t now, int64_t play);
+ void printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp);
+
DISALLOW_EVIL_CONSTRUCTORS(AAVCAssembler);
};
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.cpp b/media/libstagefright/rtsp/AHEVCAssembler.cpp
new file mode 100644
index 0000000..148a0ba
--- /dev/null
+++ b/media/libstagefright/rtsp/AHEVCAssembler.cpp
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AHEVCAssembler"
+#include <utils/Log.h>
+
+#include "AHEVCAssembler.h"
+
+#include "ARTPSource.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <include/HevcUtils.h>
+#include <media/stagefright/foundation/hexdump.h>
+
+#include <stdint.h>
+
+#define H265_NALU_MASK 0x3F
+#define H265_NALU_VPS 0x20
+#define H265_NALU_SPS 0x21
+#define H265_NALU_PPS 0x22
+#define H265_NALU_AP 0x30
+#define H265_NALU_FU 0x31
+#define H265_NALU_PACI 0x32
+
+
+namespace android {
+
+// static
+AHEVCAssembler::AHEVCAssembler(const sp<AMessage> ¬ify)
+ : mNotifyMsg(notify),
+ mAccessUnitRTPTime(0),
+ mNextExpectedSeqNoValid(false),
+ mNextExpectedSeqNo(0),
+ mAccessUnitDamaged(false),
+ mFirstIFrameProvided(false),
+ mLastIFrameProvidedAtMs(0),
+ mWidth(0),
+ mHeight(0) {
+
+ ALOGV("Constructor");
+}
+
+AHEVCAssembler::~AHEVCAssembler() {
+}
+
+int32_t AHEVCAssembler::addNack(
+ const sp<ARTPSource> &source) {
+ List<sp<ABuffer>> *queue = source->queue();
+ int32_t nackCount = 0;
+
+ List<sp<ABuffer> >::iterator it = queue->begin();
+
+ if (it == queue->end()) {
+ return nackCount /* 0 */;
+ }
+
+ uint16_t queueHeadSeqNum = (*it)->int32Data();
+
+ // move to the packet after which RTCP:NACK was sent.
+ for (; it != queue->end(); ++it) {
+ int32_t seqNum = (*it)->int32Data();
+ if (seqNum >= source->mHighestNackNumber) {
+ break;
+ }
+ }
+
+ int32_t nackStartAt = -1;
+
+ while (it != queue->end()) {
+ int32_t seqBeforeLast = (*it)->int32Data();
+ // increase iterator.
+ if ((++it) == queue->end()) {
+ break;
+ }
+
+ int32_t seqLast = (*it)->int32Data();
+
+ if ((seqLast - seqBeforeLast) < 0) {
+ ALOGD("addNack: found end of seqNum from(%d) to(%d)", seqBeforeLast, seqLast);
+ source->mHighestNackNumber = 0;
+ }
+
+ // missed packet found
+ if (seqLast > (seqBeforeLast + 1) &&
+ // we didn't send RTCP:NACK for this packet yet.
+ (seqLast - 1) > source->mHighestNackNumber) {
+ source->mHighestNackNumber = seqLast -1;
+ nackStartAt = seqBeforeLast + 1;
+ break;
+ }
+
+ }
+
+ if (nackStartAt != -1) {
+ nackCount = source->mHighestNackNumber - nackStartAt + 1;
+ ALOGD("addNack: nackCount=%d, nackFrom=%d, nackTo=%d", nackCount,
+ nackStartAt, source->mHighestNackNumber);
+
+ uint16_t mask = (uint16_t)(0xffff) >> (16 - nackCount + 1);
+ source->setSeqNumToNACK(nackStartAt, mask, queueHeadSeqNum);
+ }
+
+ return nackCount;
+}
+
+ARTPAssembler::AssemblyStatus AHEVCAssembler::addNALUnit(
+ const sp<ARTPSource> &source) {
+ List<sp<ABuffer> > *queue = source->queue();
+
+ if (queue->empty()) {
+ return NOT_ENOUGH_DATA;
+ }
+
+ sp<ABuffer> buffer = *queue->begin();
+ buffer->meta()->setObject("source", source);
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ int64_t startTime = source->mFirstSysTime / 1000;
+ int64_t nowTime = ALooper::GetNowUs() / 1000;
+ int64_t playedTime = nowTime - startTime;
+ int64_t playedTimeRtp = source->mFirstRtpTime +
+ (((uint32_t)playedTime) * (source->mClockRate / 1000));
+ const uint32_t jitterTime = (uint32_t)(source->mClockRate / ((float)1000 / (source->mJbTimeMs)));
+ uint32_t expiredTimeInJb = rtpTime + jitterTime;
+ bool isExpired = expiredTimeInJb <= (playedTimeRtp);
+ bool isTooLate200 = expiredTimeInJb < (playedTimeRtp - jitterTime);
+ bool isTooLate300 = expiredTimeInJb < (playedTimeRtp - (jitterTime * 3 / 2));
+
+ if (mShowQueueCnt < 20) {
+ showCurrentQueue(queue);
+ printNowTimeUs(startTime, nowTime, playedTime);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+ mShowQueueCnt++;
+ }
+
+ AHEVCAssembler::addNack(source);
+
+ if (!isExpired) {
+ ALOGV("buffering in jitter buffer.");
+ return NOT_ENOUGH_DATA;
+ }
+
+ if (isTooLate200) {
+ ALOGW("=== WARNING === buffer arrived 200ms late. === WARNING === ");
+ }
+
+ if (isTooLate300) {
+ ALOGW("buffer arrived after 300ms ... \t Diff in Jb=%lld \t Seq# %d",
+ ((long long)playedTimeRtp) - expiredTimeInJb, buffer->int32Data());
+ printNowTimeUs(startTime, nowTime, playedTime);
+ printRTPTime(rtpTime, playedTimeRtp, expiredTimeInJb, isExpired);
+
+ mNextExpectedSeqNo = pickProperSeq(queue, jitterTime, playedTimeRtp);
+ }
+
+ if (mNextExpectedSeqNoValid) {
+ int32_t size = queue->size();
+ int32_t cntRemove = deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
+
+ if (cntRemove > 0) {
+ source->noticeAbandonBuffer(cntRemove);
+ ALOGW("delete %d of %d buffers", cntRemove, size);
+ }
+
+ if (queue->empty()) {
+ return NOT_ENOUGH_DATA;
+ }
+ }
+
+ buffer = *queue->begin();
+
+ if (!mNextExpectedSeqNoValid) {
+ mNextExpectedSeqNoValid = true;
+ mNextExpectedSeqNo = (uint32_t)buffer->int32Data();
+ } else if ((uint32_t)buffer->int32Data() != mNextExpectedSeqNo) {
+ ALOGV("Not the sequence number I expected");
+
+ return WRONG_SEQUENCE_NUMBER;
+ }
+
+ const uint8_t *data = buffer->data();
+ size_t size = buffer->size();
+
+ if (size < 1 || (data[0] & 0x80)) {
+ // Corrupt.
+
+ ALOGV("Ignoring corrupt buffer.");
+ queue->erase(queue->begin());
+
+ ++mNextExpectedSeqNo;
+ return MALFORMED_PACKET;
+ }
+
+ unsigned nalType = (data[0] >> 1) & H265_NALU_MASK;
+ if (nalType > 0 && nalType < H265_NALU_AP) {
+ addSingleNALUnit(buffer);
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+ return OK;
+ } else if (nalType == H265_NALU_FU) {
+ // FU-A
+ return addFragmentedNALUnit(queue);
+ } else if (nalType == H265_NALU_AP) {
+ // STAP-A
+ bool success = addSingleTimeAggregationPacket(buffer);
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+
+ return success ? OK : MALFORMED_PACKET;
+ } else if (nalType == 0) {
+ ALOGV("Ignoring undefined nal type.");
+
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+
+ return OK;
+ } else {
+ ALOGV("Ignoring unsupported buffer (nalType=%d)", nalType);
+
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+
+ return MALFORMED_PACKET;
+ }
+}
+
+void AHEVCAssembler::checkSpsUpdated(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ return;
+ }
+ const uint8_t *data = buffer->data();
+ HevcParameterSets paramSets;
+ unsigned nalType = (data[0] >> 1) & H265_NALU_MASK;
+ if (nalType == H265_NALU_SPS) {
+ int32_t width = 0, height = 0;
+ paramSets.FindHEVCDimensions(buffer, &width, &height);
+ ALOGV("existing resolution (%u x %u)", mWidth, mHeight);
+ if (width != mWidth || height != mHeight) {
+ mFirstIFrameProvided = false;
+ mWidth = width;
+ mHeight = height;
+ ALOGD("found a new resolution (%u x %u)", mWidth, mHeight);
+ }
+ }
+}
+
+void AHEVCAssembler::checkIFrameProvided(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ return;
+ }
+ const uint8_t *data = buffer->data();
+ unsigned nalType = (data[0] >> 1) & H265_NALU_MASK;
+ if (nalType > 0x0F && nalType < 0x18) {
+ mLastIFrameProvidedAtMs = ALooper::GetNowUs() / 1000;
+ if (!mFirstIFrameProvided) {
+ mFirstIFrameProvided = true;
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ ALOGD("got First I-frame to be decoded. rtpTime=%d, size=%zu", rtpTime, buffer->size());
+ }
+ }
+}
+
+bool AHEVCAssembler::dropFramesUntilIframe(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ return false;
+ }
+ const uint8_t *data = buffer->data();
+ unsigned nalType = (data[0] >> 1) & H265_NALU_MASK;
+ return !mFirstIFrameProvided && nalType < 0x10;
+}
+
+void AHEVCAssembler::addSingleNALUnit(const sp<ABuffer> &buffer) {
+ ALOGV("addSingleNALUnit of size %zu", buffer->size());
+#if !LOG_NDEBUG
+ hexdump(buffer->data(), buffer->size());
+#endif
+ checkSpsUpdated(buffer);
+ checkIFrameProvided(buffer);
+
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+
+ if (dropFramesUntilIframe(buffer)) {
+ sp<ARTPSource> source = nullptr;
+ buffer->meta()->findObject("source", (sp<android::RefBase>*)&source);
+ if (source != nullptr) {
+ ALOGD("Issued FIR to get the I-frame");
+ source->onIssueFIRByAssembler();
+ }
+ ALOGD("drop P-frames till an I-frame provided. rtpTime %u", rtpTime);
+ return;
+ }
+
+ if (!mNALUnits.empty() && rtpTime != mAccessUnitRTPTime) {
+ submitAccessUnit();
+ }
+ mAccessUnitRTPTime = rtpTime;
+
+ mNALUnits.push_back(buffer);
+}
+
+bool AHEVCAssembler::addSingleTimeAggregationPacket(const sp<ABuffer> &buffer) {
+ const uint8_t *data = buffer->data();
+ size_t size = buffer->size();
+
+ if (size < 3) {
+ ALOGV("Discarding too small STAP-A packet.");
+ return false;
+ }
+
+ ++data;
+ --size;
+ while (size >= 2) {
+ size_t nalSize = (data[0] << 8) | data[1];
+
+ if (size < nalSize + 2) {
+ ALOGV("Discarding malformed STAP-A packet.");
+ return false;
+ }
+
+ sp<ABuffer> unit = new ABuffer(nalSize);
+ memcpy(unit->data(), &data[2], nalSize);
+
+ CopyTimes(unit, buffer);
+
+ addSingleNALUnit(unit);
+
+ data += 2 + nalSize;
+ size -= 2 + nalSize;
+ }
+
+ if (size != 0) {
+ ALOGV("Unexpected padding at end of STAP-A packet.");
+ }
+
+ return true;
+}
+
+ARTPAssembler::AssemblyStatus AHEVCAssembler::addFragmentedNALUnit(
+ List<sp<ABuffer> > *queue) {
+ CHECK(!queue->empty());
+
+ sp<ABuffer> buffer = *queue->begin();
+ const uint8_t *data = buffer->data();
+ size_t size = buffer->size();
+
+ CHECK(size > 0);
+ /* H265 payload header is 16 bit
+ 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F| Type | Layer ID | TID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ unsigned indicator = (data[0] >> 1);
+
+ CHECK((indicator & H265_NALU_MASK) == H265_NALU_FU);
+
+ if (size < 3) {
+ ALOGV("Ignoring malformed FU buffer (size = %zu)", size);
+
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+ return MALFORMED_PACKET;
+ }
+
+ if (!(data[2] & 0x80)) {
+ // Start bit not set on the first buffer.
+
+ ALOGV("Start bit not set on first buffer");
+
+ queue->erase(queue->begin());
+ ++mNextExpectedSeqNo;
+ return MALFORMED_PACKET;
+ }
+
+ /* FU INDICATOR HDR
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+
+ |S|E| Type |
+ +-+-+-+-+-+-+-+-+
+ */
+ uint32_t nalType = data[2] & H265_NALU_MASK;
+ uint32_t tid = data[1] & 0x7;
+ ALOGV("nalType =%u, tid =%u", nalType, tid);
+
+ uint32_t expectedSeqNo = (uint32_t)buffer->int32Data() + 1;
+ size_t totalSize = size - 3;
+ size_t totalCount = 1;
+ bool complete = false;
+
+ uint32_t rtpTimeStartAt;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTimeStartAt));
+ uint32_t startSeqNo = buffer->int32Data();
+ bool pFrame = (nalType < 0x10);
+
+ if (data[2] & 0x40) {
+ // Huh? End bit also set on the first buffer.
+
+ ALOGV("Grrr. This isn't fragmented at all.");
+
+ complete = true;
+ } else {
+ List<sp<ABuffer> >::iterator it = ++queue->begin();
+ int32_t connected = 1;
+ bool snapped = false;
+ while (it != queue->end()) {
+ ALOGV("sequence length %zu", totalCount);
+
+ const sp<ABuffer> &buffer = *it;
+
+ const uint8_t *data = buffer->data();
+ size_t size = buffer->size();
+
+ if ((uint32_t)buffer->int32Data() != expectedSeqNo) {
+ ALOGV("sequence not complete, expected seqNo %u, got %u, nalType %u",
+ expectedSeqNo, (uint32_t)buffer->int32Data(), nalType);
+ snapped = true;
+
+ if (!pFrame) {
+ return WRONG_SEQUENCE_NUMBER;
+ }
+ }
+
+ if (!snapped) {
+ connected++;
+ }
+
+ uint32_t rtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ if (size < 3
+ || ((data[0] >> 1) & H265_NALU_MASK) != indicator
+ || (data[2] & H265_NALU_MASK) != nalType
+ || (data[2] & 0x80)
+ || rtpTime != rtpTimeStartAt) {
+ ALOGV("Ignoring malformed FU buffer.");
+
+ // Delete the whole start of the FU.
+
+ mNextExpectedSeqNo = expectedSeqNo + 1;
+ deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
+
+ return MALFORMED_PACKET;
+ }
+
+ totalSize += size - 3;
+ ++totalCount;
+
+ expectedSeqNo = (uint32_t)buffer->int32Data() + 1;
+
+ if (data[2] & 0x40) {
+ if (pFrame && !recycleUnit(startSeqNo, expectedSeqNo,
+ connected, totalCount, 0.5f)) {
+ mNextExpectedSeqNo = expectedSeqNo;
+ deleteUnitUnderSeq(queue, mNextExpectedSeqNo);
+
+ return MALFORMED_PACKET;
+ }
+ // This is the last fragment.
+ complete = true;
+ break;
+ }
+
+ ++it;
+ }
+ }
+
+ if (!complete) {
+ return NOT_ENOUGH_DATA;
+ }
+
+ mNextExpectedSeqNo = expectedSeqNo;
+
+ // We found all the fragments that make up the complete NAL unit.
+
+ // Leave room for the header. So far totalSize did not include the
+ // header byte.
+ totalSize += 2;
+
+ sp<ABuffer> unit = new ABuffer(totalSize);
+ CopyTimes(unit, *queue->begin());
+
+ unit->data()[0] = (nalType << 1);
+ unit->data()[1] = tid;
+
+ size_t offset = 2;
+ int32_t cvo = -1;
+ List<sp<ABuffer> >::iterator it = queue->begin();
+ for (size_t i = 0; i < totalCount; ++i) {
+ const sp<ABuffer> &buffer = *it;
+
+ ALOGV("piece #%zu/%zu", i + 1, totalCount);
+#if !LOG_NDEBUG
+ hexdump(buffer->data(), buffer->size());
+#endif
+
+ memcpy(unit->data() + offset, buffer->data() + 3, buffer->size() - 3);
+ buffer->meta()->findInt32("cvo", &cvo);
+ offset += buffer->size() - 3;
+
+ it = queue->erase(it);
+ }
+
+ unit->setRange(0, totalSize);
+
+ if (cvo >= 0) {
+ unit->meta()->setInt32("cvo", cvo);
+ }
+
+ addSingleNALUnit(unit);
+
+ ALOGV("successfully assembled a NAL unit from fragments.");
+
+ return OK;
+}
+
+void AHEVCAssembler::submitAccessUnit() {
+ CHECK(!mNALUnits.empty());
+
+ ALOGV("Access unit complete (%zu nal units)", mNALUnits.size());
+
+ size_t totalSize = 0;
+ for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+ it != mNALUnits.end(); ++it) {
+ totalSize += 4 + (*it)->size();
+ }
+
+ sp<ABuffer> accessUnit = new ABuffer(totalSize);
+ size_t offset = 0;
+ int32_t cvo = -1;
+ for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+ it != mNALUnits.end(); ++it) {
+ memcpy(accessUnit->data() + offset, "\x00\x00\x00\x01", 4);
+ offset += 4;
+
+ sp<ABuffer> nal = *it;
+ memcpy(accessUnit->data() + offset, nal->data(), nal->size());
+ offset += nal->size();
+ nal->meta()->findInt32("cvo", &cvo);
+ }
+
+ CopyTimes(accessUnit, *mNALUnits.begin());
+
+#if 0
+ printf(mAccessUnitDamaged ? "X" : ".");
+ fflush(stdout);
+#endif
+ if (cvo >= 0) {
+ accessUnit->meta()->setInt32("cvo", cvo);
+ }
+
+ if (mAccessUnitDamaged) {
+ accessUnit->meta()->setInt32("damaged", true);
+ }
+
+ mNALUnits.clear();
+ mAccessUnitDamaged = false;
+
+ sp<AMessage> msg = mNotifyMsg->dup();
+ msg->setBuffer("access-unit", accessUnit);
+ msg->post();
+}
+
+int32_t AHEVCAssembler::pickProperSeq(const Queue *queue, uint32_t jit, int64_t play) {
+ sp<ABuffer> buffer = *(queue->begin());
+ uint32_t rtpTime;
+ int32_t nextSeqNo = buffer->int32Data();
+
+ Queue::const_iterator it = queue->begin();
+ while (it != queue->end()) {
+ CHECK((*it)->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+ // if pkt in time exists, that should be the next pivot
+ if (rtpTime + jit >= play) {
+ nextSeqNo = (*it)->int32Data();
+ break;
+ }
+ it++;
+ }
+ return nextSeqNo;
+}
+
+bool AHEVCAssembler::recycleUnit(uint32_t start, uint32_t end, uint32_t connected,
+ size_t avail, float goodRatio) {
+ float total = end - start;
+ float valid = connected;
+ float exist = avail;
+ bool isRecycle = (valid / total) >= goodRatio;
+
+ ALOGV("checking p-frame losses.. recvBufs %f valid %f diff %f recycle? %d",
+ exist, valid, total, isRecycle);
+
+ return isRecycle;
+}
+
+int32_t AHEVCAssembler::deleteUnitUnderSeq(Queue *queue, uint32_t seq) {
+ int32_t initSize = queue->size();
+ Queue::iterator it = queue->begin();
+ while (it != queue->end()) {
+ if ((uint32_t)(*it)->int32Data() >= seq) {
+ break;
+ }
+ it++;
+ }
+ queue->erase(queue->begin(), it);
+ return initSize - queue->size();
+}
+
+inline void AHEVCAssembler::printNowTimeUs(int64_t start, int64_t now, int64_t play) {
+ ALOGD("start=%lld, now=%lld, played=%lld",
+ (long long)start, (long long)now, (long long)play);
+}
+
+inline void AHEVCAssembler::printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp) {
+ ALOGD("rtp-time(JB)=%u, played-rtp-time(JB)=%lld, expired-rtp-time(JB)=%u isExpired=%d",
+ rtp, (long long)play, exp, isExp);
+}
+
+
+ARTPAssembler::AssemblyStatus AHEVCAssembler::assembleMore(
+ const sp<ARTPSource> &source) {
+ AssemblyStatus status = addNALUnit(source);
+ if (status == MALFORMED_PACKET) {
+ uint64_t msecsSinceLastIFrame = (ALooper::GetNowUs() / 1000) - mLastIFrameProvidedAtMs;
+ if (msecsSinceLastIFrame > 1000) {
+ ALOGV("request FIR to get a new I-Frame, time after "
+ "last I-Frame in %llu ms", (unsigned long long)msecsSinceLastIFrame);
+ source->onIssueFIRByAssembler();
+ }
+ }
+ return status;
+}
+
+void AHEVCAssembler::packetLost() {
+ CHECK(mNextExpectedSeqNoValid);
+ ALOGD("packetLost (expected %u)", mNextExpectedSeqNo);
+
+ ++mNextExpectedSeqNo;
+}
+
+void AHEVCAssembler::onByeReceived() {
+ sp<AMessage> msg = mNotifyMsg->dup();
+ msg->setInt32("eos", true);
+ msg->post();
+}
+
+} // namespace android
diff --git a/media/libstagefright/rtsp/AHEVCAssembler.h b/media/libstagefright/rtsp/AHEVCAssembler.h
new file mode 100644
index 0000000..16fc1c8
--- /dev/null
+++ b/media/libstagefright/rtsp/AHEVCAssembler.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_HEVC_ASSEMBLER_H_
+
+#define A_HEVC_ASSEMBLER_H_
+
+#include "ARTPAssembler.h"
+
+#include <utils/List.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct ABuffer;
+struct AMessage;
+
+struct AHEVCAssembler : public ARTPAssembler {
+ AHEVCAssembler(const sp<AMessage> ¬ify);
+
+ typedef List<sp<ABuffer> > Queue;
+
+protected:
+ virtual ~AHEVCAssembler();
+
+ virtual AssemblyStatus assembleMore(const sp<ARTPSource> &source);
+ virtual void onByeReceived();
+ virtual void packetLost();
+
+private:
+ sp<AMessage> mNotifyMsg;
+
+ uint32_t mAccessUnitRTPTime;
+ bool mNextExpectedSeqNoValid;
+ uint32_t mNextExpectedSeqNo;
+ bool mAccessUnitDamaged;
+ bool mFirstIFrameProvided;
+ uint64_t mLastIFrameProvidedAtMs;
+ int32_t mWidth;
+ int32_t mHeight;
+ List<sp<ABuffer> > mNALUnits;
+
+ int32_t addNack(const sp<ARTPSource> &source);
+ void checkSpsUpdated(const sp<ABuffer> &buffer);
+ void checkIFrameProvided(const sp<ABuffer> &buffer);
+ bool dropFramesUntilIframe(const sp<ABuffer> &buffer);
+ AssemblyStatus addNALUnit(const sp<ARTPSource> &source);
+ void addSingleNALUnit(const sp<ABuffer> &buffer);
+ AssemblyStatus addFragmentedNALUnit(List<sp<ABuffer> > *queue);
+ bool addSingleTimeAggregationPacket(const sp<ABuffer> &buffer);
+
+ void submitAccessUnit();
+
+ int32_t pickProperSeq(const Queue *queue, uint32_t jit, int64_t play);
+ bool recycleUnit(uint32_t start, uint32_t end, uint32_t conneceted,
+ size_t avail, float goodRatio);
+ int32_t deleteUnitUnderSeq(Queue *queue, uint32_t seq);
+ void printNowTimeUs(int64_t start, int64_t now, int64_t play);
+ void printRTPTime(uint32_t rtp, int64_t play, uint32_t exp, bool isExp);
+
+ DISALLOW_EVIL_CONSTRUCTORS(AHEVCAssembler);
+};
+
+} // namespace android
+
+#endif // A_HEVC_ASSEMBLER_H_
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index 574bd7a..8f4df8e 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -454,6 +454,17 @@
mFormat->setInt32(kKeyWidth, width);
mFormat->setInt32(kKeyHeight, height);
+ } else if (!strncmp(desc.c_str(), "H265/", 5)) {
+ mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+
+ int32_t width, height;
+ if (!sessionDesc->getDimensions(index, PT, &width, &height)) {
+ width = -1;
+ height = -1;
+ }
+
+ mFormat->setInt32(kKeyWidth, width);
+ mFormat->setInt32(kKeyHeight, height);
} else if (!strncmp(desc.c_str(), "H263-2000/", 10)
|| !strncmp(desc.c_str(), "H263-1998/", 10)) {
mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
diff --git a/media/libstagefright/rtsp/ARTPAssembler.cpp b/media/libstagefright/rtsp/ARTPAssembler.cpp
index befc226..52aa3a0 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.cpp
+++ b/media/libstagefright/rtsp/ARTPAssembler.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#define LOG_TAG "ARTPAssembler"
#include "ARTPAssembler.h"
#include <media/stagefright/foundation/ABuffer.h>
@@ -21,12 +22,16 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <android-base/properties.h>
+
#include <stdint.h>
namespace android {
ARTPAssembler::ARTPAssembler()
- : mFirstFailureTimeUs(-1) {
+ : mShowQueueCnt(0),
+ mFirstFailureTimeUs(-1) {
+ mShowQueue = android::base::GetBoolProperty("debug.stagefright.rtp", false);
}
void ARTPAssembler::onPacketReceived(const sp<ARTPSource> &source) {
@@ -141,4 +146,15 @@
return accessUnit;
}
+void ARTPAssembler::showCurrentQueue(List<sp<ABuffer> > *queue) {
+ AString temp("Queue elem size : ");
+ List<sp<ABuffer> >::iterator it = queue->begin();
+ while (it != queue->end()) {
+ temp.append((*it)->size());
+ temp.append(" \t");
+ it++;
+ }
+ ALOGD("%s",temp.c_str());
+};
+
} // namespace android
diff --git a/media/libstagefright/rtsp/ARTPAssembler.h b/media/libstagefright/rtsp/ARTPAssembler.h
index 4082d4c..191f08e 100644
--- a/media/libstagefright/rtsp/ARTPAssembler.h
+++ b/media/libstagefright/rtsp/ARTPAssembler.h
@@ -56,6 +56,11 @@
static sp<ABuffer> MakeCompoundFromPackets(
const List<sp<ABuffer> > &frames);
+ void showCurrentQueue(List<sp<ABuffer> > *queue);
+
+ bool mShowQueue;
+ int32_t mShowQueueCnt;
+
private:
int64_t mFirstFailureTimeUs;
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index 6a4706d..f57077c 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -30,6 +30,8 @@
#include <media/stagefright/foundation/AString.h>
#include <media/stagefright/foundation/hexdump.h>
+#include <android/multinetwork.h>
+
#include <arpa/inet.h>
#include <sys/socket.h>
@@ -53,6 +55,7 @@
const int64_t ARTPConnection::kSelectTimeoutUs = 1000LL;
struct ARTPConnection::StreamInfo {
+ bool isIPv6;
int mRTPSocket;
int mRTCPSocket;
sp<ASessionDescription> mSessionDesc;
@@ -63,14 +66,21 @@
int64_t mNumRTCPPacketsReceived;
int64_t mNumRTPPacketsReceived;
struct sockaddr_in mRemoteRTCPAddr;
+ struct sockaddr_in6 mRemoteRTCPAddr6;
bool mIsInjected;
+
+ // RTCP Extension for CVO
+ int mCVOExtMap; // will be set to 0 if cvo is not negotiated in sdp
};
ARTPConnection::ARTPConnection(uint32_t flags)
: mFlags(flags),
mPollEventPending(false),
- mLastReceiverReportTimeUs(-1) {
+ mLastReceiverReportTimeUs(-1),
+ mLastBitrateReportTimeUs(-1),
+ mTargetBitrate(-1),
+ mJbTimeMs(300) {
}
ARTPConnection::~ARTPConnection() {
@@ -145,6 +155,117 @@
TRESPASS();
}
+// static
+void ARTPConnection::MakeRTPSocketPair(
+ int *rtpSocket, int *rtcpSocket, const char *localIp, const char *remoteIp,
+ unsigned localPort, unsigned remotePort, int64_t socketNetwork) {
+ bool isIPv6 = false;
+ if (strchr(localIp, ':') != NULL)
+ isIPv6 = true;
+
+ *rtpSocket = socket(isIPv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(*rtpSocket, 0);
+
+ bumpSocketBufferSize(*rtpSocket);
+
+ *rtcpSocket = socket(isIPv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(*rtcpSocket, 0);
+
+ if (socketNetwork != 0) {
+ ALOGD("trying to bind rtp socket(%d) to network(%llu).",
+ *rtpSocket, (unsigned long long)socketNetwork);
+
+ int result = android_setsocknetwork((net_handle_t)socketNetwork, *rtpSocket);
+ if (result != 0) {
+ ALOGW("failed(%d) to bind rtp socket(%d) to network(%llu)",
+ result, *rtpSocket, (unsigned long long)socketNetwork);
+ }
+ result = android_setsocknetwork((net_handle_t)socketNetwork, *rtcpSocket);
+ if (result != 0) {
+ ALOGW("failed(%d) to bind rtcp socket(%d) to network(%llu)",
+ result, *rtcpSocket, (unsigned long long)socketNetwork);
+ }
+ }
+
+ bumpSocketBufferSize(*rtcpSocket);
+
+ struct sockaddr *addr;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+
+ if (isIPv6) {
+ addr = (struct sockaddr *)&addr6;
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ inet_pton(AF_INET6, localIp, &addr6.sin6_addr);
+ addr6.sin6_port = htons((uint16_t)localPort);
+ } else {
+ addr = (struct sockaddr *)&addr4;
+ memset(&addr4, 0, sizeof(addr4));
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = inet_addr(localIp);
+ addr4.sin_port = htons((uint16_t)localPort);
+ }
+
+ int sockopt = 1;
+ setsockopt(*rtpSocket, SOL_SOCKET, SO_REUSEADDR, (int *)&sockopt, sizeof(sockopt));
+ setsockopt(*rtcpSocket, SOL_SOCKET, SO_REUSEADDR, (int *)&sockopt, sizeof(sockopt));
+
+ int sizeSockSt = isIPv6 ? sizeof(addr6) : sizeof(addr4);
+
+ if (bind(*rtpSocket, addr, sizeSockSt) == 0) {
+ ALOGI("rtp socket successfully binded. addr=%s:%d", localIp, localPort);
+ } else {
+ ALOGE("failed to bind rtp socket addr=%s:%d err=%s", localIp, localPort, strerror(errno));
+ return;
+ }
+
+ if (isIPv6)
+ addr6.sin6_port = htons(localPort + 1);
+ else
+ addr4.sin_port = htons(localPort + 1);
+
+ if (bind(*rtcpSocket, addr, sizeSockSt) == 0) {
+ ALOGI("rtcp socket successfully binded. addr=%s:%d", localIp, localPort + 1);
+ } else {
+ ALOGE("failed to bind rtcp socket addr=%s:%d err=%s", localIp,
+ localPort + 1, strerror(errno));
+ }
+
+ // Re uses addr variable as remote addr.
+ if (isIPv6) {
+ memset(&addr6, 0, sizeof(addr6));
+ addr6.sin6_family = AF_INET6;
+ inet_pton(AF_INET6, remoteIp, &addr6.sin6_addr);
+ addr6.sin6_port = htons((uint16_t)remotePort);
+ } else {
+ memset(&addr4, 0, sizeof(addr4));
+ addr4.sin_family = AF_INET;
+ addr4.sin_addr.s_addr = inet_addr(remoteIp);
+ addr4.sin_port = htons((uint16_t)remotePort);
+ }
+ if (connect(*rtpSocket, addr, sizeSockSt) == 0) {
+ ALOGI("rtp socket successfully connected to remote=%s:%d", remoteIp, remotePort);
+ } else {
+ ALOGE("failed to connect rtp socket to remote addr=%s:%d err=%s", remoteIp,
+ remotePort, strerror(errno));
+ return;
+ }
+
+ if (isIPv6)
+ addr6.sin6_port = htons(remotePort + 1);
+ else
+ addr4.sin_port = htons(remotePort + 1);
+
+ if (connect(*rtcpSocket, addr, sizeSockSt) == 0) {
+ ALOGI("rtcp socket successfully connected to remote=%s:%d", remoteIp, remotePort + 1);
+ } else {
+ ALOGE("failed to connect rtcp socket addr=%s:%d err=%s", remoteIp,
+ remotePort + 1, strerror(errno));
+ return;
+ }
+}
+
void ARTPConnection::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatAddStream:
@@ -204,6 +325,19 @@
info->mNumRTCPPacketsReceived = 0;
info->mNumRTPPacketsReceived = 0;
memset(&info->mRemoteRTCPAddr, 0, sizeof(info->mRemoteRTCPAddr));
+ memset(&info->mRemoteRTCPAddr6, 0, sizeof(info->mRemoteRTCPAddr6));
+
+ sp<ASessionDescription> sessionDesc = info->mSessionDesc;
+ info->mCVOExtMap = 0;
+ for (size_t i = 1; i < sessionDesc->countTracks(); ++i) {
+ int32_t cvoExtMap;
+ if (sessionDesc->getCvoExtMap(i, &cvoExtMap)) {
+ info->mCVOExtMap = cvoExtMap;
+ ALOGI("urn:3gpp:video-orientation(cvo) found as extmap:%d", info->mCVOExtMap);
+ } else {
+ ALOGI("urn:3gpp:video-orientation(cvo) not found :%d", info->mCVOExtMap);
+ }
+ }
if (!injected) {
postPollEvent();
@@ -295,17 +429,43 @@
if (err == -ECONNRESET) {
// socket failure, this stream is dead, Jim.
+ sp<AMessage> notify = it->mNotifyMsg->dup();
+ notify->setInt32("rtcp-event", 1);
+ notify->setInt32("payload-type", 400);
+ notify->setInt32("feedback-type", 1);
+ notify->setInt32("sender", it->mSources.valueAt(0)->getSelfID());
+ notify->post();
ALOGW("failed to receive RTP/RTCP datagram.");
it = mStreams.erase(it);
continue;
}
+ // add NACK and FIR that needs to be sent immediately.
+ sp<ABuffer> buffer = new ABuffer(kMaxUDPSize);
+ for (size_t i = 0; i < it->mSources.size(); ++i) {
+ buffer->setRange(0, 0);
+ int cnt = it->mSources.valueAt(i)->addNACK(buffer);
+ if (cnt > 0) {
+ ALOGV("Send NACK for lost %d Packets", cnt);
+ send(&*it, buffer);
+ }
+
+ buffer->setRange(0, 0);
+ it->mSources.valueAt(i)->addFIR(buffer);
+ if (buffer->size() > 0) {
+ ALOGD("Send FIR immediately for lost Packets");
+ send(&*it, buffer);
+ }
+ }
+
++it;
}
}
int64_t nowUs = ALooper::GetNowUs();
+ checkRxBitrate(nowUs);
+
if (mLastReceiverReportTimeUs <= 0
|| mLastReceiverReportTimeUs + 5000000LL <= nowUs) {
sp<ABuffer> buffer = new ABuffer(kMaxUDPSize);
@@ -340,13 +500,7 @@
if (buffer->size() > 0) {
ALOGV("Sending RR...");
- ssize_t n;
- do {
- n = sendto(
- s->mRTCPSocket, buffer->data(), buffer->size(), 0,
- (const struct sockaddr *)&s->mRemoteRTCPAddr,
- sizeof(s->mRemoteRTCPAddr));
- } while (n < 0 && errno == EINTR);
+ ssize_t n = send(s, buffer);
if (n <= 0) {
ALOGW("failed to send RTCP receiver report (%s).",
@@ -377,9 +531,22 @@
sp<ABuffer> buffer = new ABuffer(65536);
+ struct sockaddr *pRemoteRTCPAddr;
+ int sizeSockSt;
+ if (s->isIPv6) {
+ pRemoteRTCPAddr = (struct sockaddr *)&s->mRemoteRTCPAddr6;
+ sizeSockSt = sizeof(struct sockaddr_in6);
+ } else {
+ pRemoteRTCPAddr = (struct sockaddr *)&s->mRemoteRTCPAddr;
+ sizeSockSt = sizeof(struct sockaddr_in);
+ }
socklen_t remoteAddrLen =
(!receiveRTP && s->mNumRTCPPacketsReceived == 0)
- ? sizeof(s->mRemoteRTCPAddr) : 0;
+ ? sizeSockSt : 0;
+
+ if (mFlags & kViLTEConnection) {
+ remoteAddrLen = 0;
+ }
ssize_t nbytes;
do {
@@ -388,8 +555,9 @@
buffer->data(),
buffer->capacity(),
0,
- remoteAddrLen > 0 ? (struct sockaddr *)&s->mRemoteRTCPAddr : NULL,
+ remoteAddrLen > 0 ? pRemoteRTCPAddr : NULL,
remoteAddrLen > 0 ? &remoteAddrLen : NULL);
+ mCumulativeBytes += nbytes;
} while (nbytes < 0 && errno == EINTR);
if (nbytes <= 0) {
@@ -410,6 +578,36 @@
return err;
}
+ssize_t ARTPConnection::send(const StreamInfo *info, const sp<ABuffer> buffer) {
+ struct sockaddr* pRemoteRTCPAddr;
+ int sizeSockSt;
+
+ /* It seems this isIPv6 variable is useless.
+ * We should remove it to prevent confusion */
+ if (info->isIPv6) {
+ pRemoteRTCPAddr = (struct sockaddr *)&info->mRemoteRTCPAddr6;
+ sizeSockSt = sizeof(struct sockaddr_in6);
+ } else {
+ pRemoteRTCPAddr = (struct sockaddr *)&info->mRemoteRTCPAddr;
+ sizeSockSt = sizeof(struct sockaddr_in);
+ }
+
+ if (mFlags & kViLTEConnection) {
+ ALOGV("ViLTE RTCP");
+ pRemoteRTCPAddr = NULL;
+ sizeSockSt = 0;
+ }
+
+ ssize_t n;
+ do {
+ n = sendto(
+ info->mRTCPSocket, buffer->data(), buffer->size(), 0,
+ pRemoteRTCPAddr, sizeSockSt);
+ } while (n < 0 && errno == EINTR);
+
+ return n;
+}
+
status_t ARTPConnection::parseRTP(StreamInfo *s, const sp<ABuffer> &buffer) {
if (s->mNumRTPPacketsReceived++ == 0) {
sp<AMessage> notify = s->mNotifyMsg->dup();
@@ -431,6 +629,11 @@
return -1;
}
+ if ((data[1] & 0x7f) == 20 /* decimal */) {
+ // Unassigned payload type
+ return -1;
+ }
+
if (data[0] & 0x20) {
// Padding present.
@@ -454,6 +657,7 @@
return -1;
}
+ int32_t cvoDegrees = -1;
if (data[0] & 0x10) {
// Header eXtension present.
@@ -473,6 +677,7 @@
return -1;
}
+ parseRTPExt(s, (const uint8_t *)extensionData, extensionLength, &cvoDegrees);
payloadOffset += 4 + extensionLength;
}
@@ -487,6 +692,8 @@
meta->setInt32("rtp-time", rtpTime);
meta->setInt32("PT", data[1] & 0x7f);
meta->setInt32("M", data[1] >> 7);
+ if (cvoDegrees >= 0)
+ meta->setInt32("cvo", cvoDegrees);
buffer->setInt32Data(u16at(&data[2]));
buffer->setRange(payloadOffset, size - payloadOffset);
@@ -496,11 +703,65 @@
return OK;
}
+status_t ARTPConnection::parseRTPExt(StreamInfo *s,
+ const uint8_t *extHeader, size_t extLen, int32_t *cvoDegrees) {
+ if (extLen < 4)
+ return -1;
+
+ uint16_t header = (extHeader[0] << 8) | (extHeader[1]);
+ bool isOnebyteHeader = false;
+
+ if (header == 0xBEDE) {
+ isOnebyteHeader = true;
+ } else if (header == 0x1000) {
+ ALOGW("parseRTPExt: two-byte header is not implemented yet");
+ return -1;
+ } else {
+ ALOGW("parseRTPExt: can not recognize header");
+ return -1;
+ }
+
+ const uint8_t *extPayload = extHeader + 4;
+ extLen -= 4;
+ size_t offset = 0; //start from first payload of rtp extension.
+ // one-byte header parser
+ while (isOnebyteHeader && offset < extLen) {
+ uint8_t extmapId = extPayload[offset] >> 4;
+ uint8_t length = (extPayload[offset] & 0xF) + 1;
+ offset++;
+
+ // padding case
+ if (extmapId == 0)
+ continue;
+
+ uint8_t data[16]; // maximum length value
+ for (uint8_t j = 0; offset + j <= extLen && j < length; j++) {
+ data[j] = extPayload[offset + j];
+ }
+
+ offset += length;
+
+ if (extmapId == s->mCVOExtMap) {
+ *cvoDegrees = (int32_t)data[0];
+ return OK;
+ }
+ }
+
+ return BAD_VALUE;
+}
+
status_t ARTPConnection::parseRTCP(StreamInfo *s, const sp<ABuffer> &buffer) {
if (s->mNumRTCPPacketsReceived++ == 0) {
sp<AMessage> notify = s->mNotifyMsg->dup();
notify->setInt32("first-rtcp", true);
notify->post();
+
+ ALOGI("send first-rtcp event to upper layer as ImsRxNotice");
+ sp<AMessage> imsNotify = s->mNotifyMsg->dup();
+ imsNotify->setInt32("rtcp-event", 1);
+ imsNotify->setInt32("payload-type", 101);
+ imsNotify->setInt32("feedback-type", 0);
+ imsNotify->post();
}
const uint8_t *data = buffer->data();
@@ -551,8 +812,12 @@
break;
case 205: // TSFB (transport layer specific feedback)
+ parseTSFB(s, data, headerLength);
+ break;
case 206: // PSFB (payload specific feedback)
// hexdump(data, headerLength);
+ parsePSFB(s, data, headerLength);
+ ALOGI("RTCP packet type %u of size %zu", (unsigned)data[1], headerLength);
break;
case 203:
@@ -621,6 +886,144 @@
return 0;
}
+status_t ARTPConnection::parseTSFB(
+ StreamInfo *s, const uint8_t *data, size_t size) {
+ if (size < 12) {
+ // broken packet
+ return -1;
+ }
+
+ uint8_t msgType = data[0] & 0x1f;
+ uint32_t id = u32at(&data[4]);
+
+ const uint8_t *ptr = &data[12];
+ size -= 12;
+
+ using namespace std;
+ size_t FCISize;
+ switch(msgType) {
+ case 1: // Generic NACK
+ {
+ FCISize = 4;
+ while (size >= FCISize) {
+ uint16_t PID = u16at(&ptr[0]); // lost packet RTP number
+ uint16_t BLP = u16at(&ptr[2]); // Bitmask of following Lost Packets
+
+ size -= FCISize;
+ ptr += FCISize;
+
+ AString list_of_losts;
+ list_of_losts.append(PID);
+ for (int i=0 ; i<16 ; i++) {
+ bool is_lost = BLP & (0x1 << i);
+ if (is_lost) {
+ list_of_losts.append(", ");
+ list_of_losts.append(PID + i);
+ }
+ }
+ ALOGI("Opponent losts packet of RTP %s", list_of_losts.c_str());
+ }
+ break;
+ }
+ case 3: // TMMBR
+ case 4: // TMMBN
+ {
+ FCISize = 8;
+ while (size >= FCISize) {
+ uint32_t MxTBR = u32at(&ptr[4]);
+ uint32_t MxTBRExp = MxTBR >> 26;
+ uint32_t MxTBRMantissa = (MxTBR >> 9) & 0x01FFFF;
+ uint32_t overhead = MxTBR & 0x01FF;
+
+ size -= FCISize;
+ ptr += FCISize;
+
+ uint32_t bitRate = (1 << MxTBRExp) * MxTBRMantissa;
+
+ if (msgType == 3)
+ ALOGI("Op -> UE Req Tx bitrate : %d X 2^%d = %d",
+ MxTBRMantissa, MxTBRExp, bitRate);
+ else if (msgType == 4)
+ ALOGI("OP -> UE Noti Rx bitrate : %d X 2^%d = %d",
+ MxTBRMantissa, MxTBRExp, bitRate);
+
+ sp<AMessage> notify = s->mNotifyMsg->dup();
+ notify->setInt32("rtcp-event", 1);
+ notify->setInt32("payload-type", 205);
+ notify->setInt32("feedback-type", msgType);
+ notify->setInt32("sender", id);
+ notify->setInt32("bit-rate", bitRate);
+ notify->post();
+ ALOGI("overhead : %d", overhead);
+ }
+ break;
+ }
+ default:
+ {
+ ALOGI("Not supported TSFB type %d", msgType);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+status_t ARTPConnection::parsePSFB(
+ StreamInfo *s, const uint8_t *data, size_t size) {
+ if (size < 12) {
+ // broken packet
+ return -1;
+ }
+
+ uint8_t msgType = data[0] & 0x1f;
+ uint32_t id = u32at(&data[4]);
+
+ const uint8_t *ptr = &data[12];
+ size -= 12;
+
+ using namespace std;
+ switch(msgType) {
+ case 1: // Picture Loss Indication (PLI)
+ {
+ if (size > 0) {
+ // PLI does not need parameters
+ break;
+ };
+ sp<AMessage> notify = s->mNotifyMsg->dup();
+ notify->setInt32("rtcp-event", 1);
+ notify->setInt32("payload-type", 206);
+ notify->setInt32("feedback-type", msgType);
+ notify->setInt32("sender", id);
+ notify->post();
+ ALOGI("PLI detected.");
+ break;
+ }
+ case 4: // Full Intra Request (FIR)
+ {
+ if (size < 4) {
+ break;
+ }
+ uint32_t requestedId = u32at(&ptr[0]);
+ if (requestedId == (uint32_t)mSelfID) {
+ sp<AMessage> notify = s->mNotifyMsg->dup();
+ notify->setInt32("rtcp-event", 1);
+ notify->setInt32("payload-type", 206);
+ notify->setInt32("feedback-type", msgType);
+ notify->setInt32("sender", id);
+ notify->post();
+ ALOGI("FIR detected.");
+ }
+ break;
+ }
+ default:
+ {
+ ALOGI("Not supported PSFB type %d", msgType);
+ break;
+ }
+ }
+
+ return 0;
+}
sp<ARTPSource> ARTPConnection::findSource(StreamInfo *info, uint32_t srcId) {
sp<ARTPSource> source;
ssize_t index = info->mSources.indexOfKey(srcId);
@@ -630,6 +1033,12 @@
source = new ARTPSource(
srcId, info->mSessionDesc, info->mIndex, info->mNotifyMsg);
+ if (mFlags & kViLTEConnection) {
+ source->setPeriodicFIR(false);
+ }
+
+ source->setSelfID(mSelfID);
+ source->setJbTime(mJbTimeMs > 0 ? mJbTimeMs : 300);
info->mSources.add(srcId, source);
} else {
source = info->mSources.valueAt(index);
@@ -645,6 +1054,72 @@
msg->post();
}
+void ARTPConnection::setSelfID(const uint32_t selfID) {
+ mSelfID = selfID;
+}
+
+void ARTPConnection::setJbTime(const uint32_t jbTimeMs) {
+ mJbTimeMs = jbTimeMs;
+}
+
+void ARTPConnection::setTargetBitrate(int32_t targetBitrate) {
+ mTargetBitrate = targetBitrate;
+}
+
+void ARTPConnection::checkRxBitrate(int64_t nowUs) {
+ if (mLastBitrateReportTimeUs <= 0) {
+ mCumulativeBytes = 0;
+ mLastBitrateReportTimeUs = nowUs;
+ }
+ else if (mLastBitrateReportTimeUs + 1000000ll <= nowUs) {
+ int32_t timeDiff = (nowUs - mLastBitrateReportTimeUs) / 1000000ll;
+ int32_t bitrate = mCumulativeBytes * 8 / timeDiff;
+ ALOGI("Actual Rx bitrate : %d bits/sec", bitrate);
+
+ sp<ABuffer> buffer = new ABuffer(kMaxUDPSize);
+ List<StreamInfo>::iterator it = mStreams.begin();
+ while (it != mStreams.end()) {
+ StreamInfo *s = &*it;
+ if (s->mIsInjected) {
+ ++it;
+ continue;
+ }
+
+ if (s->mNumRTCPPacketsReceived == 0) {
+ // We have never received any RTCP packets on this stream,
+ // we don't even know where to send a report.
+ ++it;
+ continue;
+ }
+
+ buffer->setRange(0, 0);
+
+ for (size_t i = 0; i < s->mSources.size(); ++i) {
+ sp<ARTPSource> source = s->mSources.valueAt(i);
+ source->notifyPktInfo(bitrate, nowUs);
+ source->addTMMBR(buffer, mTargetBitrate);
+ }
+ if (buffer->size() > 0) {
+ ALOGV("Sending TMMBR...");
+
+ ssize_t n = send(s, buffer);
+
+ if (n <= 0) {
+ ALOGW("failed to send RTCP TMMBR (%s).",
+ n == 0 ? "connection gone" : strerror(errno));
+
+ it = mStreams.erase(it);
+ continue;
+ }
+
+ CHECK_EQ(n, (ssize_t)buffer->size());
+ }
+ ++it;
+ }
+ mCumulativeBytes = 0;
+ mLastBitrateReportTimeUs = nowUs;
+ }
+}
void ARTPConnection::onInjectPacket(const sp<AMessage> &msg) {
int32_t index;
CHECK(msg->findInt32("index", &index));
@@ -672,4 +1147,3 @@
}
} // namespace android
-
diff --git a/media/libstagefright/rtsp/ARTPConnection.h b/media/libstagefright/rtsp/ARTPConnection.h
index d5f7c2e..7c8218f 100644
--- a/media/libstagefright/rtsp/ARTPConnection.h
+++ b/media/libstagefright/rtsp/ARTPConnection.h
@@ -30,6 +30,7 @@
struct ARTPConnection : public AHandler {
enum Flags {
kRegularlyRequestFIR = 2,
+ kViLTEConnection = 4,
};
explicit ARTPConnection(uint32_t flags = 0);
@@ -44,11 +45,22 @@
void injectPacket(int index, const sp<ABuffer> &buffer);
+ void setSelfID(const uint32_t selfID);
+ void setJbTime(const uint32_t jbTimeMs);
+ void setTargetBitrate(int32_t targetBitrate);
+
// Creates a pair of UDP datagram sockets bound to adjacent ports
// (the rtpSocket is bound to an even port, the rtcpSocket to the
// next higher port).
static void MakePortPair(
int *rtpSocket, int *rtcpSocket, unsigned *rtpPort);
+ // Creates a pair of UDP datagram sockets bound to assigned ip and
+ // ports (the rtpSocket is bound to an even port, the rtcpSocket
+ // to the next higher port).
+ static void MakeRTPSocketPair(
+ int *rtpSocket, int *rtcpSocket,
+ const char *localIp, const char *remoteIp,
+ unsigned localPort, unsigned remotePort, int64_t socketNetwork = 0);
protected:
virtual ~ARTPConnection();
@@ -71,18 +83,31 @@
bool mPollEventPending;
int64_t mLastReceiverReportTimeUs;
+ int64_t mLastBitrateReportTimeUs;
+
+ int32_t mSelfID;
+ int32_t mTargetBitrate;
+
+ uint32_t mJbTimeMs;
+
+ int32_t mCumulativeBytes;
void onAddStream(const sp<AMessage> &msg);
void onRemoveStream(const sp<AMessage> &msg);
void onPollStreams();
void onInjectPacket(const sp<AMessage> &msg);
void onSendReceiverReports();
+ void checkRxBitrate(int64_t nowUs);
status_t receive(StreamInfo *info, bool receiveRTP);
+ ssize_t send(const StreamInfo *info, const sp<ABuffer> buffer);
status_t parseRTP(StreamInfo *info, const sp<ABuffer> &buffer);
+ status_t parseRTPExt(StreamInfo *s, const uint8_t *extData, size_t extLen, int32_t *cvoDegrees);
status_t parseRTCP(StreamInfo *info, const sp<ABuffer> &buffer);
status_t parseSR(StreamInfo *info, const uint8_t *data, size_t size);
+ status_t parseTSFB(StreamInfo *info, const uint8_t *data, size_t size);
+ status_t parsePSFB(StreamInfo *info, const uint8_t *data, size_t size);
status_t parseBYE(StreamInfo *info, const uint8_t *data, size_t size);
sp<ARTPSource> findSource(StreamInfo *info, uint32_t id);
diff --git a/media/libstagefright/rtsp/ARTPSource.cpp b/media/libstagefright/rtsp/ARTPSource.cpp
index f5f8128..6303fc4 100644
--- a/media/libstagefright/rtsp/ARTPSource.cpp
+++ b/media/libstagefright/rtsp/ARTPSource.cpp
@@ -22,6 +22,7 @@
#include "AAMRAssembler.h"
#include "AAVCAssembler.h"
+#include "AHEVCAssembler.h"
#include "AH263Assembler.h"
#include "AMPEG2TSAssembler.h"
#include "AMPEG4AudioAssembler.h"
@@ -35,21 +36,31 @@
namespace android {
-static const uint32_t kSourceID = 0xdeadbeef;
+static uint32_t kSourceID = 0xdeadbeef;
ARTPSource::ARTPSource(
uint32_t id,
const sp<ASessionDescription> &sessionDesc, size_t index,
const sp<AMessage> ¬ify)
- : mID(id),
+ : mFirstSeqNumber(0),
+ mFirstRtpTime(0),
+ mFirstSysTime(0),
+ mClockRate(0),
+ mJbTimeMs(300), // default jitter buffer time is 300ms.
+ mFirstSsrc(0),
+ mHighestNackNumber(0),
+ mID(id),
mHighestSeqNumber(0),
mPrevExpected(0),
mBaseSeqNumber(0),
mNumBuffersReceived(0),
mPrevNumBuffersReceived(0),
+ mPrevExpectedForRR(0),
+ mPrevNumBuffersReceivedForRR(0),
mLastNTPTime(0),
mLastNTPTimeUpdateUs(0),
mIssueFIRRequests(false),
+ mIssueFIRByAssembler(false),
mLastFIRRequestUs(-1),
mNextFIRSeqNo((rand() * 256.0) / RAND_MAX),
mNotify(notify) {
@@ -61,6 +72,9 @@
if (!strncmp(desc.c_str(), "H264/", 5)) {
mAssembler = new AAVCAssembler(notify);
mIssueFIRRequests = true;
+ } else if (!strncmp(desc.c_str(), "H265/", 5)) {
+ mAssembler = new AHEVCAssembler(notify);
+ mIssueFIRRequests = true;
} else if (!strncmp(desc.c_str(), "MP4A-LATM/", 10)) {
mAssembler = new AMPEG4AudioAssembler(notify, params);
} else if (!strncmp(desc.c_str(), "H263-1998/", 10)
@@ -112,13 +126,29 @@
bool ARTPSource::queuePacket(const sp<ABuffer> &buffer) {
uint32_t seqNum = (uint32_t)buffer->int32Data();
- if (mNumBuffersReceived++ == 0) {
+ int32_t ssrc = 0;
+ buffer->meta()->findInt32("ssrc", &ssrc);
+
+ if (mNumBuffersReceived++ == 0 && mFirstSysTime == 0) {
+ uint32_t firstRtpTime;
+ CHECK(buffer->meta()->findInt32("rtp-time", (int32_t *)&firstRtpTime));
+ mFirstSysTime = ALooper::GetNowUs();
mHighestSeqNumber = seqNum;
mBaseSeqNumber = seqNum;
+ mFirstRtpTime = firstRtpTime;
+ mFirstSsrc = ssrc;
+ ALOGD("first-rtp arrived: first-rtp-time=%d, sys-time=%lld, seq-num=%u, ssrc=%d",
+ mFirstRtpTime, (long long)mFirstSysTime, mHighestSeqNumber, mFirstSsrc);
+ mClockRate = 90000;
mQueue.push_back(buffer);
return true;
}
+ if (mFirstSsrc != ssrc) {
+ ALOGW("Discarding a buffer due to unexpected ssrc");
+ return false;
+ }
+
// Only the lower 16-bit of the sequence numbers are transmitted,
// derive the high-order bits by choosing the candidate closest
// to the highest sequence number (extended to 32 bits) received so far.
@@ -181,20 +211,34 @@
}
void ARTPSource::addFIR(const sp<ABuffer> &buffer) {
- if (!mIssueFIRRequests) {
+ if (!mIssueFIRRequests && !mIssueFIRByAssembler) {
return;
}
+ bool send = false;
int64_t nowUs = ALooper::GetNowUs();
- if (mLastFIRRequestUs >= 0 && mLastFIRRequestUs + 5000000LL > nowUs) {
- // Send FIR requests at most every 5 secs.
+ int64_t usecsSinceLastFIR = nowUs - mLastFIRRequestUs;
+ if (mLastFIRRequestUs < 0) {
+ // A first FIR, just send it.
+ send = true;
+ } else if (mIssueFIRByAssembler && (usecsSinceLastFIR > 1000000)) {
+ // A FIR issued by Assembler.
+ // Send it if last FIR is not sent within a sec.
+ send = true;
+ } else if (mIssueFIRRequests && (usecsSinceLastFIR > 5000000)) {
+ // A FIR issued periodically reagardless packet loss.
+ // Send it if last FIR is not sent within 5 secs.
+ send = true;
+ }
+
+ if (!send) {
return;
}
mLastFIRRequestUs = nowUs;
if (buffer->size() + 20 > buffer->capacity()) {
- ALOGW("RTCP buffer too small to accomodate FIR.");
+ ALOGW("RTCP buffer too small to accommodate FIR.");
return;
}
@@ -203,7 +247,7 @@
data[0] = 0x80 | 4;
data[1] = 206; // PSFB
data[2] = 0;
- data[3] = 4;
+ data[3] = 4; // total (4+1) * sizeof(int32_t) = 20 bytes
data[4] = kSourceID >> 24;
data[5] = (kSourceID >> 16) & 0xff;
data[6] = (kSourceID >> 8) & 0xff;
@@ -225,14 +269,16 @@
data[18] = 0x00;
data[19] = 0x00;
- buffer->setRange(buffer->offset(), buffer->size() + 20);
+ buffer->setRange(buffer->offset(), buffer->size() + (data[3] + 1) * sizeof(int32_t));
+
+ mIssueFIRByAssembler = false;
ALOGV("Added FIR request.");
}
void ARTPSource::addReceiverReport(const sp<ABuffer> &buffer) {
if (buffer->size() + 32 > buffer->capacity()) {
- ALOGW("RTCP buffer too small to accomodate RR.");
+ ALOGW("RTCP buffer too small to accommodate RR.");
return;
}
@@ -240,16 +286,16 @@
// According to appendix A.3 in RFC 3550
uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
- int64_t intervalExpected = expected - mPrevExpected;
- int64_t intervalReceived = mNumBuffersReceived - mPrevNumBuffersReceived;
+ int64_t intervalExpected = expected - mPrevExpectedForRR;
+ int64_t intervalReceived = mNumBuffersReceived - mPrevNumBuffersReceivedForRR;
int64_t intervalPacketLost = intervalExpected - intervalReceived;
if (intervalExpected > 0 && intervalPacketLost > 0) {
fraction = (intervalPacketLost << 8) / intervalExpected;
}
- mPrevExpected = expected;
- mPrevNumBuffersReceived = mNumBuffersReceived;
+ mPrevExpectedForRR = expected;
+ mPrevNumBuffersReceivedForRR = mNumBuffersReceived;
int32_t cumulativePacketLost = (int32_t)expected - mNumBuffersReceived;
uint8_t *data = buffer->data() + buffer->size();
@@ -257,7 +303,7 @@
data[0] = 0x80 | 1;
data[1] = 201; // RR
data[2] = 0;
- data[3] = 7;
+ data[3] = 7; // total (7+1) * sizeof(int32_t) = 32 bytes
data[4] = kSourceID >> 24;
data[5] = (kSourceID >> 16) & 0xff;
data[6] = (kSourceID >> 8) & 0xff;
@@ -303,9 +349,193 @@
data[30] = (DLSR >> 8) & 0xff;
data[31] = DLSR & 0xff;
- buffer->setRange(buffer->offset(), buffer->size() + 32);
+ buffer->setRange(buffer->offset(), buffer->size() + (data[3] + 1) * sizeof(int32_t));
}
+void ARTPSource::addTMMBR(const sp<ABuffer> &buffer, int32_t targetBitrate) {
+ if (buffer->size() + 20 > buffer->capacity()) {
+ ALOGW("RTCP buffer too small to accommodate RR.");
+ return;
+ }
+
+ if (targetBitrate <= 0) {
+ return;
+ }
+
+ uint8_t *data = buffer->data() + buffer->size();
+
+ data[0] = 0x80 | 3; // TMMBR
+ data[1] = 205; // TSFB
+ data[2] = 0;
+ data[3] = 4; // total (4+1) * sizeof(int32_t) = 20 bytes
+ data[4] = kSourceID >> 24;
+ data[5] = (kSourceID >> 16) & 0xff;
+ data[6] = (kSourceID >> 8) & 0xff;
+ data[7] = kSourceID & 0xff;
+
+ *(int32_t*)(&data[8]) = 0; // 4 bytes blank
+
+ data[12] = mID >> 24;
+ data[13] = (mID >> 16) & 0xff;
+ data[14] = (mID >> 8) & 0xff;
+ data[15] = mID & 0xff;
+
+ int32_t exp, mantissa;
+
+ // Round off to the nearest 2^4th
+ ALOGI("UE -> Op Req Rx bitrate : %d ", targetBitrate & 0xfffffff0);
+ for (exp=4 ; exp < 32 ; exp++)
+ if (((targetBitrate >> exp) & 0x01) != 0)
+ break;
+ mantissa = targetBitrate >> exp;
+
+ data[16] = ((exp << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
+ data[17] = (mantissa & 0x07f80) >> 7;
+ data[18] = (mantissa & 0x0007f) << 1;
+ data[19] = 40; // 40 bytes overhead;
+
+ buffer->setRange(buffer->offset(), buffer->size() + (data[3] + 1) * sizeof(int32_t));
+}
+
+int ARTPSource::addNACK(const sp<ABuffer> &buffer) {
+ constexpr size_t kMaxFCIs = 10; // max number of FCIs
+ if (buffer->size() + (3 + kMaxFCIs) * sizeof(int32_t) > buffer->capacity()) {
+ ALOGW("RTCP buffer too small to accommodate NACK.");
+ return -1;
+ }
+
+ uint8_t *data = buffer->data() + buffer->size();
+
+ data[0] = 0x80 | 1; // Generic NACK
+ data[1] = 205; // TSFB
+ data[2] = 0;
+ data[3] = 0; // will be decided later
+ data[4] = kSourceID >> 24;
+ data[5] = (kSourceID >> 16) & 0xff;
+ data[6] = (kSourceID >> 8) & 0xff;
+ data[7] = kSourceID & 0xff;
+
+ data[8] = mID >> 24;
+ data[9] = (mID >> 16) & 0xff;
+ data[10] = (mID >> 8) & 0xff;
+ data[11] = mID & 0xff;
+
+ List<int> list;
+ List<int>::iterator it;
+ getSeqNumToNACK(list, kMaxFCIs);
+ size_t cnt = 0;
+
+ int *FCI = (int *)(data + 12);
+ for (it = list.begin(); it != list.end() && cnt < kMaxFCIs; it++) {
+ *(FCI + cnt) = *it;
+ cnt++;
+ }
+
+ data[3] = (3 + cnt) - 1; // total (3 + #ofFCI) * sizeof(int32_t) byte
+
+ buffer->setRange(buffer->offset(), buffer->size() + (data[3] + 1) * sizeof(int32_t));
+
+ return cnt;
+}
+
+int ARTPSource::getSeqNumToNACK(List<int>& list, int size) {
+ AutoMutex _l(mMapLock);
+ int cnt = 0;
+
+ std::map<uint16_t, infoNACK>::iterator it;
+ for(it = mNACKMap.begin(); it != mNACKMap.end() && cnt < size; it++) {
+ infoNACK &info_it = it->second;
+ if (info_it.needToNACK) {
+ info_it.needToNACK = false;
+ // switch LSB to MSB for sending N/W
+ uint32_t FCI;
+ uint8_t *temp = (uint8_t *)&FCI;
+ temp[0] = (info_it.seqNum >> 8) & 0xff;
+ temp[1] = (info_it.seqNum) & 0xff;
+ temp[2] = (info_it.mask >> 8) & 0xff;
+ temp[3] = (info_it.mask) & 0xff;
+
+ list.push_back(FCI);
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+void ARTPSource::setSeqNumToNACK(uint16_t seqNum, uint16_t mask, uint16_t nowJitterHeadSeqNum) {
+ AutoMutex _l(mMapLock);
+ infoNACK info = {seqNum, mask, nowJitterHeadSeqNum, true};
+ std::map<uint16_t, infoNACK>::iterator it;
+
+ it = mNACKMap.find(seqNum);
+ if (it != mNACKMap.end()) {
+ infoNACK &info_it = it->second;
+ // renew if (mask or head seq) is changed
+ if ((info_it.mask != mask) || (info_it.nowJitterHeadSeqNum != nowJitterHeadSeqNum)) {
+ info_it = info;
+ }
+ } else {
+ mNACKMap[seqNum] = info;
+ }
+
+ // delete all NACK far from current Jitter's first sequence number
+ it = mNACKMap.begin();
+ while (it != mNACKMap.end()) {
+ infoNACK &info_it = it->second;
+
+ int diff = nowJitterHeadSeqNum - info_it.nowJitterHeadSeqNum;
+ if (diff > 100) {
+ ALOGV("Delete %d pkt from NACK map ", info_it.seqNum);
+ it = mNACKMap.erase(it);
+ } else {
+ it++;
+ }
+ }
+
+}
+
+uint32_t ARTPSource::getSelfID() {
+ return kSourceID;
+}
+
+void ARTPSource::setSelfID(const uint32_t selfID) {
+ kSourceID = selfID;
+}
+
+void ARTPSource::setJbTime(const uint32_t jbTimeMs) {
+ mJbTimeMs = jbTimeMs;
+}
+
+void ARTPSource::setPeriodicFIR(bool enable) {
+ ALOGD("setPeriodicFIR %d", enable);
+ mIssueFIRRequests = enable;
+}
+
+void ARTPSource::notifyPktInfo(int32_t bitrate, int64_t /*time*/) {
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("rtcp-event", 1);
+ notify->setInt32("payload-type", 102);
+ notify->setInt32("feedback-type", 0);
+ // sending target bitrate up to application to share rtp quality.
+ notify->setInt32("bit-rate", bitrate);
+ notify->setInt32("highest-seq-num", mHighestSeqNumber);
+ notify->setInt32("base-seq-num", mBaseSeqNumber);
+ notify->setInt32("prev-expected", mPrevExpected);
+ notify->setInt32("num-buf-recv", mNumBuffersReceived);
+ notify->setInt32("prev-num-buf-recv", mPrevNumBuffersReceived);
+ notify->post();
+
+ uint32_t expected = mHighestSeqNumber - mBaseSeqNumber + 1;
+ mPrevExpected = expected;
+ mPrevNumBuffersReceived = mNumBuffersReceived;
+}
+
+void ARTPSource::onIssueFIRByAssembler() {
+ mIssueFIRByAssembler = true;
+}
+
+void ARTPSource::noticeAbandonBuffer(int cnt) {
+ mNumBuffersReceived -= cnt;
+}
} // namespace android
-
-
diff --git a/media/libstagefright/rtsp/ARTPSource.h b/media/libstagefright/rtsp/ARTPSource.h
index f44e83f..ea683a0 100644
--- a/media/libstagefright/rtsp/ARTPSource.h
+++ b/media/libstagefright/rtsp/ARTPSource.h
@@ -23,6 +23,9 @@
#include <media/stagefright/foundation/ABase.h>
#include <utils/List.h>
#include <utils/RefBase.h>
+#include <utils/Thread.h>
+
+#include <map>
namespace android {
@@ -45,22 +48,58 @@
void addReceiverReport(const sp<ABuffer> &buffer);
void addFIR(const sp<ABuffer> &buffer);
+ void addTMMBR(const sp<ABuffer> &buffer, int32_t targetBitrate);
+ int addNACK(const sp<ABuffer> &buffer);
+ void setSeqNumToNACK(uint16_t seqNum, uint16_t mask, uint16_t nowJitterHeadSeqNum);
+ uint32_t getSelfID();
+ void setSelfID(const uint32_t selfID);
+ void setJbTime(const uint32_t jbTimeMs);
+ void setPeriodicFIR(bool enable);
+ void notifyPktInfo(int32_t bitrate, int64_t time);
+ // FIR needs to be sent by missing packet or broken video image.
+ void onIssueFIRByAssembler();
+
+ void noticeAbandonBuffer(int cnt=1);
+
+ int32_t mFirstSeqNumber;
+ uint32_t mFirstRtpTime;
+ int64_t mFirstSysTime;
+ int32_t mClockRate;
+
+ uint32_t mJbTimeMs;
+ int32_t mFirstSsrc;
+ int32_t mHighestNackNumber;
private:
+
uint32_t mID;
uint32_t mHighestSeqNumber;
uint32_t mPrevExpected;
uint32_t mBaseSeqNumber;
int32_t mNumBuffersReceived;
int32_t mPrevNumBuffersReceived;
+ uint32_t mPrevExpectedForRR;
+ int32_t mPrevNumBuffersReceivedForRR;
List<sp<ABuffer> > mQueue;
sp<ARTPAssembler> mAssembler;
+ typedef struct infoNACK {
+ uint16_t seqNum;
+ uint16_t mask;
+ uint16_t nowJitterHeadSeqNum;
+ bool needToNACK;
+ } infoNACK;
+
+ Mutex mMapLock;
+ std::map<uint16_t, infoNACK> mNACKMap;
+ int getSeqNumToNACK(List<int>& list, int size);
+
uint64_t mLastNTPTime;
int64_t mLastNTPTimeUpdateUs;
bool mIssueFIRRequests;
+ bool mIssueFIRByAssembler;
int64_t mLastFIRRequestUs;
uint8_t mNextFIRSeqNo;
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index 58d6086..76afb04 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -35,10 +35,32 @@
#define PT 97
#define PT_STR "97"
+#define H264_NALU_MASK 0x1F
+#define H264_NALU_SPS 0x7
+#define H264_NALU_PPS 0x8
+#define H264_NALU_IFRAME 0x5
+#define H264_NALU_PFRAME 0x1
+
+#define H265_NALU_MASK 0x3F
+#define H265_NALU_VPS 0x20
+#define H265_NALU_SPS 0x21
+#define H265_NALU_PPS 0x22
+
+#define LINK_HEADER_SIZE 14
+#define IP_HEADER_SIZE 20
+#define UDP_HEADER_SIZE 8
+#define TCPIP_HEADER_SIZE (LINK_HEADER_SIZE + IP_HEADER_SIZE + UDP_HEADER_SIZE)
+#define RTP_HEADER_SIZE 12
+#define RTP_HEADER_EXT_SIZE 8
+#define RTP_FU_HEADER_SIZE 2
+#define RTP_PAYLOAD_ROOM_SIZE 100 // ROOM size for IPv6 header, ESP and etc.
+
+
namespace android {
// static const size_t kMaxPacketSize = 65507; // maximum payload in UDP over IP
-static const size_t kMaxPacketSize = 1500;
+static const size_t kMaxPacketSize = 1280;
+static char kCNAME[255] = "someone@somewhere";
static int UniformRand(int limit) {
return ((double)rand() * limit) / RAND_MAX;
@@ -48,15 +70,19 @@
: mFlags(0),
mFd(dup(fd)),
mLooper(new ALooper),
- mReflector(new AHandlerReflector<ARTPWriter>(this)) {
+ mReflector(new AHandlerReflector<ARTPWriter>(this)),
+ mTrafficRec(new TrafficRecorder<uint32_t, size_t>(128)) {
CHECK_GE(fd, 0);
+ mIsIPv6 = false;
mLooper->setName("rtp writer");
mLooper->registerHandler(mReflector);
mLooper->start();
- mSocket = socket(AF_INET, SOCK_DGRAM, 0);
- CHECK_GE(mSocket, 0);
+ mRTPSocket = socket(AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(mRTPSocket, 0);
+ mRTCPSocket = socket(AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(mRTCPSocket, 0);
memset(mRTPAddr.sin_zero, 0, sizeof(mRTPAddr.sin_zero));
mRTPAddr.sin_family = AF_INET;
@@ -72,6 +98,44 @@
mRTCPAddr = mRTPAddr;
mRTCPAddr.sin_port = htons(ntohs(mRTPAddr.sin_port) | 1);
+ mSPSBuf = NULL;
+ mPPSBuf = NULL;
+
+#if LOG_TO_FILES
+ mRTPFd = open(
+ "/data/misc/rtpout.bin",
+ O_WRONLY | O_CREAT | O_TRUNC,
+ 0644);
+ CHECK_GE(mRTPFd, 0);
+
+ mRTCPFd = open(
+ "/data/misc/rtcpout.bin",
+ O_WRONLY | O_CREAT | O_TRUNC,
+ 0644);
+ CHECK_GE(mRTCPFd, 0);
+#endif
+}
+
+ARTPWriter::ARTPWriter(int fd, String8& localIp, int localPort, String8& remoteIp,
+ int remotePort, uint32_t seqNo)
+ : mFlags(0),
+ mFd(dup(fd)),
+ mLooper(new ALooper),
+ mReflector(new AHandlerReflector<ARTPWriter>(this)),
+ mTrafficRec(new TrafficRecorder<uint32_t, size_t>(128)) {
+ CHECK_GE(fd, 0);
+ mIsIPv6 = false;
+
+ mLooper->setName("rtp writer");
+ mLooper->registerHandler(mReflector);
+ mLooper->start();
+
+ makeSocketPairAndBind(localIp, localPort, remoteIp , remotePort);
+ mVPSBuf = NULL;
+ mSPSBuf = NULL;
+ mPPSBuf = NULL;
+
+ mSeqNo = seqNo;
#if LOG_TO_FILES
mRTPFd = open(
@@ -89,6 +153,21 @@
}
ARTPWriter::~ARTPWriter() {
+ if (mVPSBuf != NULL) {
+ mVPSBuf->release();
+ mVPSBuf = NULL;
+ }
+
+ if (mSPSBuf != NULL) {
+ mSPSBuf->release();
+ mSPSBuf = NULL;
+ }
+
+ if (mPPSBuf != NULL) {
+ mPPSBuf->release();
+ mPPSBuf = NULL;
+ }
+
#if LOG_TO_FILES
close(mRTCPFd);
mRTCPFd = -1;
@@ -97,8 +176,11 @@
mRTPFd = -1;
#endif
- close(mSocket);
- mSocket = -1;
+ close(mRTPSocket);
+ mRTPSocket = -1;
+
+ close(mRTCPSocket);
+ mRTCPSocket = -1;
close(mFd);
mFd = -1;
@@ -114,28 +196,61 @@
return (mFlags & kFlagEOS) != 0;
}
-status_t ARTPWriter::start(MetaData * /* params */) {
+status_t ARTPWriter::start(MetaData * params) {
Mutex::Autolock autoLock(mLock);
if (mFlags & kFlagStarted) {
return INVALID_OPERATION;
}
mFlags &= ~kFlagEOS;
- mSourceID = rand();
- mSeqNo = UniformRand(65536);
- mRTPTimeBase = rand();
+ if (mSourceID == 0)
+ mSourceID = rand();
+ if (mSeqNo == 0)
+ mSeqNo = UniformRand(65536);
+ mRTPTimeBase = 0;
mNumRTPSent = 0;
mNumRTPOctetsSent = 0;
mLastRTPTime = 0;
mLastNTPTime = 0;
+ mOpponentID = 0;
+ mBitrate = 192000;
mNumSRsSent = 0;
+ mRTPCVOExtMap = -1;
+ mRTPCVODegrees = 0;
+ mRTPSockNetwork = 0;
const char *mime;
CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime));
+ int32_t selfID = 0;
+ if (params->findInt32(kKeySelfID, &selfID))
+ mSourceID = selfID;
+
+ int32_t payloadType = 0;
+ if (params->findInt32(kKeyPayloadType, &payloadType))
+ mPayloadType = payloadType;
+
+ int32_t rtpExtMap = 0;
+ if (params->findInt32(kKeyRtpExtMap, &rtpExtMap))
+ mRTPCVOExtMap = rtpExtMap;
+
+ int32_t rtpCVODegrees = 0;
+ if (params->findInt32(kKeyRtpCvoDegrees, &rtpCVODegrees))
+ mRTPCVODegrees = rtpCVODegrees;
+
+ int32_t dscp = 0;
+ if (params->findInt32(kKeyRtpDscp, &dscp))
+ updateSocketDscp(dscp);
+
+ int64_t sockNetwork = 0;
+ if (params->findInt64(kKeySocketNetwork, &sockNetwork))
+ updateSocketNetwork(sockNetwork);
+
mMode = INVALID;
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
mMode = H264;
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ mMode = H265;
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_H263)) {
mMode = H263;
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
@@ -187,11 +302,137 @@
}
}
+static const uint8_t SPCSize = 4; // Start Prefix Code Size
+static const uint8_t startPrefixCode[SPCSize] = {0, 0, 0, 1};
+static const uint8_t spcKMPidx[SPCSize] = {0, 0, 2, 0};
+static void SpsPpsParser(MediaBufferBase *buffer,
+ MediaBufferBase **spsBuffer, MediaBufferBase **ppsBuffer) {
+
+ while (buffer->range_length() > 0) {
+ const uint8_t *NALPtr = (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ MediaBufferBase **targetPtr = NULL;
+ if ((*NALPtr & H264_NALU_MASK) == H264_NALU_SPS) {
+ targetPtr = spsBuffer;
+ } else if ((*NALPtr & H264_NALU_MASK) == H264_NALU_PPS) {
+ targetPtr = ppsBuffer;
+ } else {
+ return;
+ }
+ ALOGV("SPS(7) or PPS(8) found. Type %d", *NALPtr & H264_NALU_MASK);
+
+ uint32_t bufferSize = buffer->range_length();
+ MediaBufferBase *&target = *targetPtr;
+ uint32_t i = 0, j = 0;
+ bool isBoundFound = false;
+ for (i = 0; i < bufferSize; i++) {
+ while (j > 0 && NALPtr[i] != startPrefixCode[j]) {
+ j = spcKMPidx[j - 1];
+ }
+ if (NALPtr[i] == startPrefixCode[j]) {
+ j++;
+ if (j == SPCSize) {
+ isBoundFound = true;
+ break;
+ }
+ }
+ }
+
+ uint32_t targetSize;
+ if (target != NULL) {
+ target->release();
+ }
+ // note that targetSize is never 0 as the first byte is never part
+ // of a start prefix
+ if (isBoundFound) {
+ targetSize = i - SPCSize + 1;
+ target = MediaBufferBase::Create(targetSize);
+ memcpy(target->data(),
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ targetSize);
+ buffer->set_range(buffer->range_offset() + targetSize + SPCSize,
+ buffer->range_length() - targetSize - SPCSize);
+ } else {
+ targetSize = bufferSize;
+ target = MediaBufferBase::Create(targetSize);
+ memcpy(target->data(),
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ targetSize);
+ buffer->set_range(buffer->range_offset() + bufferSize, 0);
+ return;
+ }
+ }
+}
+
+static void VpsSpsPpsParser(MediaBufferBase *buffer,
+ MediaBufferBase **vpsBuffer, MediaBufferBase **spsBuffer, MediaBufferBase **ppsBuffer) {
+
+ while (buffer->range_length() > 0) {
+ const uint8_t *NALPtr = (const uint8_t *)buffer->data() + buffer->range_offset();
+ uint8_t nalType = ((*NALPtr) >> 1) & H265_NALU_MASK;
+
+ MediaBufferBase **targetPtr = NULL;
+ if (nalType == H265_NALU_VPS) {
+ targetPtr = vpsBuffer;
+ } else if (nalType == H265_NALU_SPS) {
+ targetPtr = spsBuffer;
+ } else if (nalType == H265_NALU_PPS) {
+ targetPtr = ppsBuffer;
+ } else {
+ return;
+ }
+ ALOGV("VPS(32) SPS(33) or PPS(34) found. Type %d", nalType);
+
+ uint32_t bufferSize = buffer->range_length();
+ MediaBufferBase *&target = *targetPtr;
+ uint32_t i = 0, j = 0;
+ bool isBoundFound = false;
+ for (i = 0; i < bufferSize; i++) {
+ while (j > 0 && NALPtr[i] != startPrefixCode[j]) {
+ j = spcKMPidx[j - 1];
+ }
+ if (NALPtr[i] == startPrefixCode[j]) {
+ j++;
+ if (j == SPCSize) {
+ isBoundFound = true;
+ break;
+ }
+ }
+ }
+
+ if (target != NULL) {
+ target->release();
+ }
+ uint32_t targetSize;
+ // note that targetSize is never 0 as the first byte is never part
+ // of a start prefix
+ if (isBoundFound) {
+ targetSize = i - SPCSize + 1;
+ target = MediaBufferBase::Create(j);
+ memcpy(target->data(),
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ j);
+ buffer->set_range(buffer->range_offset() + targetSize + SPCSize,
+ buffer->range_length() - targetSize - SPCSize);
+ } else {
+ targetSize = bufferSize;
+ target = MediaBufferBase::Create(targetSize);
+ memcpy(target->data(),
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ targetSize);
+ buffer->set_range(buffer->range_offset() + bufferSize, 0);
+ return;
+ }
+ }
+}
+
void ARTPWriter::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatStart:
{
- CHECK_EQ(mSource->start(), (status_t)OK);
+ sp<MetaData> meta = new MetaData();
+ meta->setInt64(kKeyTime, 10ll);
+ CHECK_EQ(mSource->start(meta.get()), (status_t)OK);
#if 0
if (mMode == H264) {
@@ -264,6 +505,18 @@
}
}
+void ARTPWriter::setTMMBNInfo(uint32_t opponentID, uint32_t bitrate) {
+ mOpponentID = opponentID;
+ mBitrate = bitrate;
+
+ sp<ABuffer> buffer = new ABuffer(65536);
+ buffer->setRange(0, 0);
+
+ addTMMBN(buffer);
+
+ send(buffer, true /* isRTCP */);
+}
+
void ARTPWriter::onRead(const sp<AMessage> &msg) {
MediaBufferBase *mediaBuf;
status_t err = mSource->read(&mediaBuf);
@@ -281,7 +534,16 @@
if (mMode == H264) {
StripStartcode(mediaBuf);
- sendAVCData(mediaBuf);
+ SpsPpsParser(mediaBuf, &mSPSBuf, &mPPSBuf);
+ if (mediaBuf->range_length() > 0) {
+ sendAVCData(mediaBuf);
+ }
+ } else if (mMode == H265) {
+ StripStartcode(mediaBuf);
+ VpsSpsPpsParser(mediaBuf, &mVPSBuf, &mSPSBuf, &mPPSBuf);
+ if (mediaBuf->range_length() > 0) {
+ sendHEVCData(mediaBuf);
+ }
} else if (mMode == H263) {
sendH263Data(mediaBuf);
} else if (mMode == AMR_NB || mMode == AMR_WB) {
@@ -309,12 +571,38 @@
}
void ARTPWriter::send(const sp<ABuffer> &buffer, bool isRTCP) {
- ssize_t n = sendto(
- mSocket, buffer->data(), buffer->size(), 0,
- (const struct sockaddr *)(isRTCP ? &mRTCPAddr : &mRTPAddr),
- sizeof(mRTCPAddr));
+ int sizeSockSt;
+ struct sockaddr *remAddr;
- CHECK_EQ(n, (ssize_t)buffer->size());
+ if (mIsIPv6) {
+ sizeSockSt = sizeof(struct sockaddr_in6);
+ if (isRTCP)
+ remAddr = (struct sockaddr *)&mRTCPAddr6;
+ else
+ remAddr = (struct sockaddr *)&mRTPAddr6;
+ } else {
+ sizeSockSt = sizeof(struct sockaddr_in);
+ if (isRTCP)
+ remAddr = (struct sockaddr *)&mRTCPAddr;
+ else
+ remAddr = (struct sockaddr *)&mRTPAddr;
+ }
+
+ // Unseal code if moderator is needed (prevent overflow of instant bandwidth)
+ // Set limit bits per period through the moderator.
+ // ex) 6KByte/10ms = 48KBit/10ms = 4.8MBit/s instant limit
+ // ModerateInstantTraffic(10, 6 * 1024);
+
+ ssize_t n = sendto(isRTCP ? mRTCPSocket : mRTPSocket,
+ buffer->data(), buffer->size(), 0, remAddr, sizeSockSt);
+
+ if (n != (ssize_t)buffer->size()) {
+ ALOGW("packets can not be sent. ret=%d, buf=%d", (int)n, (int)buffer->size());
+ } else {
+ // Record current traffic & Print bits while last 1sec (1000ms)
+ mTrafficRec->writeBytes(buffer->size());
+ mTrafficRec->printAccuBitsForLastPeriod(1000, 1000);
+ }
#if LOG_TO_FILES
int fd = isRTCP ? mRTCPFd : mRTPFd;
@@ -379,7 +667,6 @@
data[offset++] = 1; // CNAME
- static const char *kCNAME = "someone@somewhere";
data[offset++] = strlen(kCNAME);
memcpy(&data[offset], kCNAME, strlen(kCNAME));
@@ -416,9 +703,52 @@
buffer->setRange(buffer->offset(), buffer->size() + offset);
}
+void ARTPWriter::addTMMBN(const sp<ABuffer> &buffer) {
+ if (buffer->size() + 20 > buffer->capacity()) {
+ ALOGW("RTCP buffer too small to accommodate SR.");
+ return;
+ }
+ if (mOpponentID == 0)
+ return;
+
+ uint8_t *data = buffer->data() + buffer->size();
+
+ data[0] = 0x80 | 4; // TMMBN
+ data[1] = 205; // TSFB
+ data[2] = 0;
+ data[3] = 4; // total (4+1) * sizeof(int32_t) = 20 bytes
+ data[4] = mSourceID >> 24;
+ data[5] = (mSourceID >> 16) & 0xff;
+ data[6] = (mSourceID >> 8) & 0xff;
+ data[7] = mSourceID & 0xff;
+
+ *(int32_t*)(&data[8]) = 0; // 4 bytes blank
+
+ data[12] = mOpponentID >> 24;
+ data[13] = (mOpponentID >> 16) & 0xff;
+ data[14] = (mOpponentID >> 8) & 0xff;
+ data[15] = mOpponentID & 0xff;
+
+ int32_t exp, mantissa;
+
+ // Round off to the nearest 2^4th
+ ALOGI("UE -> Op Noti Tx bitrate : %d ", mBitrate & 0xfffffff0);
+ for (exp=4 ; exp < 32 ; exp++)
+ if (((mBitrate >> exp) & 0x01) != 0)
+ break;
+ mantissa = mBitrate >> exp;
+
+ data[16] = ((exp << 2) & 0xfc) | ((mantissa & 0x18000) >> 15);
+ data[17] = (mantissa & 0x07f80) >> 7;
+ data[18] = (mantissa & 0x0007f) << 1;
+ data[19] = 40; // 40 bytes overhead;
+
+ buffer->setRange(buffer->offset(), buffer->size() + 20);
+}
+
// static
uint64_t ARTPWriter::GetNowNTP() {
- uint64_t nowUs = ALooper::GetNowUs();
+ uint64_t nowUs = systemTime(SYSTEM_TIME_REALTIME) / 1000ll;
nowUs += ((70LL * 365 + 17) * 24) * 60 * 60 * 1000000LL;
@@ -463,7 +793,7 @@
sdp.append("m=audio ");
}
- sdp.append(AStringPrintf("%d", ntohs(mRTPAddr.sin_port)));
+ sdp.append(AStringPrintf("%d", mIsIPv6 ? ntohs(mRTPAddr6.sin6_port) : ntohs(mRTPAddr.sin_port)));
sdp.append(
" RTP/AVP " PT_STR "\r\n"
"b=AS 320000\r\n"
@@ -569,24 +899,91 @@
send(buffer, true /* isRTCP */);
}
-void ARTPWriter::sendAVCData(MediaBufferBase *mediaBuf) {
+void ARTPWriter::sendSPSPPSIfIFrame(MediaBufferBase *mediaBuf, int64_t timeUs) {
+ CHECK(mediaBuf->range_length() > 0);
+ const uint8_t *mediaData =
+ (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
+
+ if ((mediaData[0] & H264_NALU_MASK) != H264_NALU_IFRAME) {
+ return;
+ }
+
+ if (mSPSBuf != NULL) {
+ mSPSBuf->meta_data().setInt64(kKeyTime, timeUs);
+ mSPSBuf->meta_data().setInt32(kKeySps, 1);
+ sendAVCData(mSPSBuf);
+ }
+
+ if (mPPSBuf != NULL) {
+ mPPSBuf->meta_data().setInt64(kKeyTime, timeUs);
+ mPPSBuf->meta_data().setInt32(kKeyPps, 1);
+ sendAVCData(mPPSBuf);
+ }
+}
+
+void ARTPWriter::sendVPSSPSPPSIfIFrame(MediaBufferBase *mediaBuf, int64_t timeUs) {
+ CHECK(mediaBuf->range_length() > 0);
+ const uint8_t *mediaData =
+ (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
+
+ int nalType = ((mediaData[0] >> 1) & H265_NALU_MASK);
+ if (!(nalType >= 16 && nalType <= 21) /*H265_NALU_IFRAME*/) {
+ return;
+ }
+
+ if (mVPSBuf != NULL) {
+ mVPSBuf->meta_data().setInt64(kKeyTime, timeUs);
+ mVPSBuf->meta_data().setInt32(kKeyVps, 1);
+ sendHEVCData(mVPSBuf);
+ }
+
+ if (mSPSBuf != NULL) {
+ mSPSBuf->meta_data().setInt64(kKeyTime, timeUs);
+ mSPSBuf->meta_data().setInt32(kKeySps, 1);
+ sendHEVCData(mSPSBuf);
+ }
+
+ if (mPPSBuf != NULL) {
+ mPPSBuf->meta_data().setInt64(kKeyTime, timeUs);
+ mPPSBuf->meta_data().setInt32(kKeyPps, 1);
+ sendHEVCData(mPPSBuf);
+ }
+}
+
+void ARTPWriter::sendHEVCData(MediaBufferBase *mediaBuf) {
// 12 bytes RTP header + 2 bytes for the FU-indicator and FU-header.
CHECK_GE(kMaxPacketSize, 12u + 2u);
int64_t timeUs;
CHECK(mediaBuf->meta_data().findInt64(kKeyTime, &timeUs));
- uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100LL);
+ sendVPSSPSPPSIfIFrame(mediaBuf, timeUs);
+ uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100ll);
+
+ CHECK(mediaBuf->range_length() > 0);
const uint8_t *mediaData =
(const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
+ int32_t isNonVCL = 0;
+ if (mediaBuf->meta_data().findInt32(kKeyVps, &isNonVCL) ||
+ mediaBuf->meta_data().findInt32(kKeySps, &isNonVCL) ||
+ mediaBuf->meta_data().findInt32(kKeyPps, &isNonVCL)) {
+ isNonVCL = 1;
+ }
+
sp<ABuffer> buffer = new ABuffer(kMaxPacketSize);
- if (mediaBuf->range_length() + 12 <= buffer->capacity()) {
+
+ if (mediaBuf->range_length() + TCPIP_HEADER_SIZE + RTP_HEADER_SIZE + RTP_HEADER_EXT_SIZE
+ + RTP_PAYLOAD_ROOM_SIZE <= buffer->capacity()) {
// The data fits into a single packet
uint8_t *data = buffer->data();
data[0] = 0x80;
- data[1] = (1 << 7) | PT; // M-bit
+ if (isNonVCL) {
+ data[1] = mPayloadType; // Marker bit should not be set in case of Non-VCL
+ } else {
+ data[1] = (1 << 7) | mPayloadType; // M-bit
+ }
data[2] = (mSeqNo >> 8) & 0xff;
data[3] = mSeqNo & 0xff;
data[4] = rtpTime >> 24;
@@ -611,21 +1008,24 @@
} else {
// FU-A
- unsigned nalType = mediaData[0];
- size_t offset = 1;
+ unsigned nalType = (mediaData[0] >> 1) & H265_NALU_MASK;
+ ALOGV("H265 nalType 0x%x, data[0]=0x%x", nalType, mediaData[0]);
+ size_t offset = 2; //H265 payload header is 16 bit.
bool firstPacket = true;
while (offset < mediaBuf->range_length()) {
size_t size = mediaBuf->range_length() - offset;
bool lastPacket = true;
- if (size + 12 + 2 > buffer->capacity()) {
+ if (size + TCPIP_HEADER_SIZE + RTP_HEADER_SIZE + RTP_HEADER_EXT_SIZE +
+ RTP_FU_HEADER_SIZE + RTP_PAYLOAD_ROOM_SIZE > buffer->capacity()) {
lastPacket = false;
- size = buffer->capacity() - 12 - 2;
+ size = buffer->capacity() - TCPIP_HEADER_SIZE - RTP_HEADER_SIZE -
+ RTP_HEADER_EXT_SIZE - RTP_FU_HEADER_SIZE - RTP_PAYLOAD_ROOM_SIZE;
}
uint8_t *data = buffer->data();
data[0] = 0x80;
- data[1] = (lastPacket ? (1 << 7) : 0x00) | PT; // M-bit
+ data[1] = (lastPacket ? (1 << 7) : 0x00) | mPayloadType; // M-bit
data[2] = (mSeqNo >> 8) & 0xff;
data[3] = mSeqNo & 0xff;
data[4] = rtpTime >> 24;
@@ -637,18 +1037,39 @@
data[10] = (mSourceID >> 8) & 0xff;
data[11] = mSourceID & 0xff;
- data[12] = 28 | (nalType & 0xe0);
+ /* H265 payload header is 16 bit
+ 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |F| Type | Layer ID | TID |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ ALOGV("H265 payload header 0x%x %x", mediaData[0], mediaData[1]);
+ // excludes Type from 1st byte of H265 payload header.
+ data[12] = mediaData[0] & 0x81;
+ // fills Type as FU (49 == 0x31)
+ data[12] = data[12] | (0x31 << 1);
+ data[13] = mediaData[1];
+
+ ALOGV("H265 FU header 0x%x %x", data[12], data[13]);
CHECK(!firstPacket || !lastPacket);
+ /*
+ FU INDICATOR HDR
+ 0 1 2 3 4 5 6 7
+ +-+-+-+-+-+-+-+
+ |S|E| Type |
+ +-+-+-+-+-+-+-+
+ */
- data[13] =
+ data[14] =
(firstPacket ? 0x80 : 0x00)
| (lastPacket ? 0x40 : 0x00)
- | (nalType & 0x1f);
+ | (nalType & H265_NALU_MASK);
+ ALOGV("H265 FU indicator 0x%x", data[14]);
- memcpy(&data[14], &mediaData[offset], size);
+ memcpy(&data[15], &mediaData[offset], size);
- buffer->setRange(0, 14 + size);
+ buffer->setRange(0, 15 + size);
send(buffer, false /* isRTCP */);
@@ -663,6 +1084,172 @@
mLastRTPTime = rtpTime;
mLastNTPTime = GetNowNTP();
+
+}
+
+void ARTPWriter::sendAVCData(MediaBufferBase *mediaBuf) {
+ // 12 bytes RTP header + 2 bytes for the FU-indicator and FU-header.
+ CHECK_GE(kMaxPacketSize, 12u + 2u);
+
+ int64_t timeUs;
+ CHECK(mediaBuf->meta_data().findInt64(kKeyTime, &timeUs));
+
+ sendSPSPPSIfIFrame(mediaBuf, timeUs);
+
+ uint32_t rtpTime = mRTPTimeBase + (timeUs * 9 / 100LL);
+
+ CHECK(mediaBuf->range_length() > 0);
+ const uint8_t *mediaData =
+ (const uint8_t *)mediaBuf->data() + mediaBuf->range_offset();
+
+ int32_t sps, pps;
+ bool isSpsPps = false;
+ if (mediaBuf->meta_data().findInt32(kKeySps, &sps) ||
+ mediaBuf->meta_data().findInt32(kKeyPps, &pps)) {
+ isSpsPps = true;
+ }
+
+ mTrafficRec->updateClock(ALooper::GetNowUs() / 1000);
+ sp<ABuffer> buffer = new ABuffer(kMaxPacketSize);
+ if (mediaBuf->range_length() + TCPIP_HEADER_SIZE + RTP_HEADER_SIZE + RTP_HEADER_EXT_SIZE
+ + RTP_PAYLOAD_ROOM_SIZE <= buffer->capacity()) {
+ // The data fits into a single packet
+ uint8_t *data = buffer->data();
+ data[0] = 0x80;
+ if (mRTPCVOExtMap > 0)
+ data[0] |= 0x10;
+ if (isSpsPps)
+ data[1] = mPayloadType; // Marker bit should not be set in case of sps/pps
+ else
+ data[1] = (1 << 7) | mPayloadType;
+ data[2] = (mSeqNo >> 8) & 0xff;
+ data[3] = mSeqNo & 0xff;
+ data[4] = rtpTime >> 24;
+ data[5] = (rtpTime >> 16) & 0xff;
+ data[6] = (rtpTime >> 8) & 0xff;
+ data[7] = rtpTime & 0xff;
+ data[8] = mSourceID >> 24;
+ data[9] = (mSourceID >> 16) & 0xff;
+ data[10] = (mSourceID >> 8) & 0xff;
+ data[11] = mSourceID & 0xff;
+
+ int rtpExtIndex = 0;
+ if (mRTPCVOExtMap > 0) {
+ /*
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | 0xBE | 0xDE | length=3 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ID | L=0 | data | ID | L=1 | data...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ ...data | 0 (pad) | 0 (pad) | ID | L=3 |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+
+ In the one-byte header form of extensions, the 16-bit value required
+ by the RTP specification for a header extension, labeled in the RTP
+ specification as "defined by profile", takes the fixed bit pattern
+ 0xBEDE (the first version of this specification was written on the
+ feast day of the Venerable Bede).
+ */
+ data[12] = 0xBE;
+ data[13] = 0xDE;
+ // put a length of RTP Extension.
+ data[14] = 0x00;
+ data[15] = 0x01;
+ // put extmap of RTP assigned for CVO.
+ data[16] = (mRTPCVOExtMap << 4) | 0x0;
+ // put image degrees as per CVO specification.
+ data[17] = mRTPCVODegrees;
+ data[18] = 0x0;
+ data[19] = 0x0;
+ rtpExtIndex = 8;
+ }
+
+ memcpy(&data[12 + rtpExtIndex],
+ mediaData, mediaBuf->range_length());
+
+ buffer->setRange(0, mediaBuf->range_length() + (12 + rtpExtIndex));
+
+ send(buffer, false /* isRTCP */);
+
+ ++mSeqNo;
+ ++mNumRTPSent;
+ mNumRTPOctetsSent += buffer->size() - (12 + rtpExtIndex);
+ } else {
+ // FU-A
+
+ unsigned nalType = mediaData[0];
+ size_t offset = 1;
+
+ bool firstPacket = true;
+ while (offset < mediaBuf->range_length()) {
+ size_t size = mediaBuf->range_length() - offset;
+ bool lastPacket = true;
+ if (size + TCPIP_HEADER_SIZE + RTP_HEADER_SIZE + RTP_HEADER_EXT_SIZE +
+ RTP_FU_HEADER_SIZE + RTP_PAYLOAD_ROOM_SIZE > buffer->capacity()) {
+ lastPacket = false;
+ size = buffer->capacity() - TCPIP_HEADER_SIZE - RTP_HEADER_SIZE -
+ RTP_HEADER_EXT_SIZE - RTP_FU_HEADER_SIZE - RTP_PAYLOAD_ROOM_SIZE;
+ }
+
+ uint8_t *data = buffer->data();
+ data[0] = 0x80;
+ if (lastPacket && mRTPCVOExtMap > 0)
+ data[0] |= 0x10;
+ data[1] = (lastPacket ? (1 << 7) : 0x00) | mPayloadType; // M-bit
+ data[2] = (mSeqNo >> 8) & 0xff;
+ data[3] = mSeqNo & 0xff;
+ data[4] = rtpTime >> 24;
+ data[5] = (rtpTime >> 16) & 0xff;
+ data[6] = (rtpTime >> 8) & 0xff;
+ data[7] = rtpTime & 0xff;
+ data[8] = mSourceID >> 24;
+ data[9] = (mSourceID >> 16) & 0xff;
+ data[10] = (mSourceID >> 8) & 0xff;
+ data[11] = mSourceID & 0xff;
+
+ int rtpExtIndex = 0;
+ if (lastPacket && mRTPCVOExtMap > 0) {
+ data[12] = 0xBE;
+ data[13] = 0xDE;
+ data[14] = 0x00;
+ data[15] = 0x01;
+ data[16] = (mRTPCVOExtMap << 4) | 0x0;
+ data[17] = mRTPCVODegrees;
+ data[18] = 0x0;
+ data[19] = 0x0;
+ rtpExtIndex = 8;
+ }
+
+ data[12 + rtpExtIndex] = 28 | (nalType & 0xe0);
+
+ CHECK(!firstPacket || !lastPacket);
+
+ data[13 + rtpExtIndex] =
+ (firstPacket ? 0x80 : 0x00)
+ | (lastPacket ? 0x40 : 0x00)
+ | (nalType & 0x1f);
+
+ memcpy(&data[14 + rtpExtIndex], &mediaData[offset], size);
+
+ buffer->setRange(0, 14 + rtpExtIndex + size);
+
+ send(buffer, false /* isRTCP */);
+
+ ++mSeqNo;
+ ++mNumRTPSent;
+ mNumRTPOctetsSent += buffer->size() - (12 + rtpExtIndex);
+
+ firstPacket = false;
+ offset += size;
+ }
+ }
+
+ mLastRTPTime = rtpTime;
+ mLastNTPTime = GetNowNTP();
}
void ARTPWriter::sendH263Data(MediaBufferBase *mediaBuf) {
@@ -696,7 +1283,7 @@
uint8_t *data = buffer->data();
data[0] = 0x80;
- data[1] = (lastPacket ? 0x80 : 0x00) | PT; // M-bit
+ data[1] = (lastPacket ? 0x80 : 0x00) | mPayloadType; // M-bit
data[2] = (mSeqNo >> 8) & 0xff;
data[3] = mSeqNo & 0xff;
data[4] = rtpTime >> 24;
@@ -727,6 +1314,54 @@
mLastNTPTime = GetNowNTP();
}
+void ARTPWriter::updateCVODegrees(int32_t cvoDegrees) {
+ Mutex::Autolock autoLock(mLock);
+ mRTPCVODegrees = cvoDegrees;
+}
+
+void ARTPWriter::updatePayloadType(int32_t payloadType) {
+ Mutex::Autolock autoLock(mLock);
+ mPayloadType = payloadType;
+}
+
+void ARTPWriter::updateSocketDscp(int32_t dscp) {
+ mRtpLayer3Dscp = dscp << 2;
+
+ /* mRtpLayer3Dscp will be mapped to WMM(Wifi) as per operator's requirement */
+ if (setsockopt(mRTPSocket, IPPROTO_IP, IP_TOS,
+ (int *)&mRtpLayer3Dscp, sizeof(mRtpLayer3Dscp)) < 0) {
+ ALOGE("failed to set dscp on rtpsock. err=%s", strerror(errno));
+ } else {
+ ALOGD("successfully set dscp on rtpsock. opt=%d", mRtpLayer3Dscp);
+ setsockopt(mRTCPSocket, IPPROTO_IP, IP_TOS,
+ (int *)&mRtpLayer3Dscp, sizeof(mRtpLayer3Dscp));
+ ALOGD("successfully set dscp on rtcpsock. opt=%d", mRtpLayer3Dscp);
+ }
+}
+
+void ARTPWriter::updateSocketNetwork(int64_t socketNetwork) {
+ mRTPSockNetwork = (net_handle_t)socketNetwork;
+ ALOGI("trying to bind rtp socket(%d) to network(%llu).",
+ mRTPSocket, (unsigned long long)mRTPSockNetwork);
+
+ int result = android_setsocknetwork(mRTPSockNetwork, mRTPSocket);
+ if (result != 0) {
+ ALOGW("failed(%d) to bind rtp socket(%d) to network(%llu)",
+ result, mRTPSocket, (unsigned long long)mRTPSockNetwork);
+ }
+ result = android_setsocknetwork(mRTPSockNetwork, mRTCPSocket);
+ if (result != 0) {
+ ALOGW("failed(%d) to bind rtcp socket(%d) to network(%llu)",
+ result, mRTCPSocket, (unsigned long long)mRTPSockNetwork);
+ }
+ ALOGI("done. bind rtp socket(%d) to network(%llu)",
+ mRTPSocket, (unsigned long long)mRTPSockNetwork);
+}
+
+uint32_t ARTPWriter::getSequenceNum() {
+ return mSeqNo;
+}
+
static size_t getFrameSize(bool isWide, unsigned FT) {
static const size_t kFrameSizeNB[8] = {
95, 103, 118, 134, 148, 159, 204, 244
@@ -778,7 +1413,7 @@
// The data fits into a single packet
uint8_t *data = buffer->data();
data[0] = 0x80;
- data[1] = PT;
+ data[1] = mPayloadType;
if (mNumRTPSent == 0) {
// Signal start of talk-spurt.
data[1] |= 0x80; // M-bit
@@ -834,5 +1469,91 @@
mLastNTPTime = GetNowNTP();
}
-} // namespace android
+void ARTPWriter::makeSocketPairAndBind(String8& localIp, int localPort,
+ String8& remoteIp, int remotePort) {
+ static char kSomeone[16] = "someone@";
+ int nameLength = strlen(kSomeone);
+ memcpy(kCNAME, kSomeone, nameLength);
+ memcpy(kCNAME + nameLength, localIp.c_str(), localIp.length() + 1);
+ if (localIp.contains(":"))
+ mIsIPv6 = true;
+ else
+ mIsIPv6 = false;
+
+ mRTPSocket = socket(mIsIPv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(mRTPSocket, 0);
+ mRTCPSocket = socket(mIsIPv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, 0);
+ CHECK_GE(mRTCPSocket, 0);
+
+ int sockopt = 1;
+ setsockopt(mRTPSocket, SOL_SOCKET, SO_REUSEADDR, (int *)&sockopt, sizeof(sockopt));
+ setsockopt(mRTCPSocket, SOL_SOCKET, SO_REUSEADDR, (int *)&sockopt, sizeof(sockopt));
+
+ if (mIsIPv6) {
+ memset(&mLocalAddr6, 0, sizeof(mLocalAddr6));
+ memset(&mRTPAddr6, 0, sizeof(mRTPAddr6));
+ memset(&mRTCPAddr6, 0, sizeof(mRTCPAddr6));
+
+ mLocalAddr6.sin6_family = AF_INET6;
+ inet_pton(AF_INET6, localIp.string(), &mLocalAddr6.sin6_addr);
+ mLocalAddr6.sin6_port = htons((uint16_t)localPort);
+
+ mRTPAddr6.sin6_family = AF_INET6;
+ inet_pton(AF_INET6, remoteIp.string(), &mRTPAddr6.sin6_addr);
+ mRTPAddr6.sin6_port = htons((uint16_t)remotePort);
+
+ mRTCPAddr6 = mRTPAddr6;
+ mRTCPAddr6.sin6_port = htons((uint16_t)(remotePort + 1));
+ } else {
+ memset(&mLocalAddr, 0, sizeof(mLocalAddr));
+ memset(&mRTPAddr, 0, sizeof(mRTPAddr));
+ memset(&mRTCPAddr, 0, sizeof(mRTCPAddr));
+
+ mLocalAddr.sin_family = AF_INET;
+ mLocalAddr.sin_addr.s_addr = inet_addr(localIp.string());
+ mLocalAddr.sin_port = htons((uint16_t)localPort);
+
+ mRTPAddr.sin_family = AF_INET;
+ mRTPAddr.sin_addr.s_addr = inet_addr(remoteIp.string());
+ mRTPAddr.sin_port = htons((uint16_t)remotePort);
+
+ mRTCPAddr = mRTPAddr;
+ mRTCPAddr.sin_port = htons((uint16_t)(remotePort + 1));
+ }
+
+ struct sockaddr *localAddr = mIsIPv6 ?
+ (struct sockaddr*)&mLocalAddr6 : (struct sockaddr*)&mLocalAddr;
+
+ int sizeSockSt = mIsIPv6 ? sizeof(mLocalAddr6) : sizeof(mLocalAddr);
+
+ if (bind(mRTPSocket, localAddr, sizeSockSt) == -1) {
+ ALOGE("failed to bind rtp %s:%d err=%s", localIp.string(), localPort, strerror(errno));
+ } else {
+ ALOGD("succeed to bind rtp %s:%d", localIp.string(), localPort);
+ }
+
+ if (mIsIPv6)
+ mLocalAddr6.sin6_port = htons((uint16_t)(localPort + 1));
+ else
+ mLocalAddr.sin_port = htons((uint16_t)(localPort + 1));
+
+ if (bind(mRTCPSocket, localAddr, sizeSockSt) == -1) {
+ ALOGE("failed to bind rtcp %s:%d err=%s", localIp.string(), localPort + 1, strerror(errno));
+ } else {
+ ALOGD("succeed to bind rtcp %s:%d", localIp.string(), localPort + 1);
+ }
+}
+
+// TODO : Develop more advanced moderator based on AS & TMMBR value
+void ARTPWriter::ModerateInstantTraffic(uint32_t samplePeriod, uint32_t limitBytes) {
+ unsigned int bytes = mTrafficRec->readBytesForLastPeriod(samplePeriod);
+ if (bytes > limitBytes) {
+ ALOGI("Nuclear moderator. #seq = %d \t\t %d bits / 10ms",
+ mSeqNo, bytes * 8);
+ usleep(4000);
+ mTrafficRec->updateClock(ALooper::GetNowUs() / 1000);
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/ARTPWriter.h
index 2f13486..6f25a66 100644
--- a/media/libstagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/ARTPWriter.h
@@ -27,6 +27,9 @@
#include <arpa/inet.h>
#include <sys/socket.h>
+#include <android/multinetwork.h>
+#include "TrafficRecorder.h"
+
#define LOG_TO_FILES 0
namespace android {
@@ -36,14 +39,23 @@
struct ARTPWriter : public MediaWriter {
explicit ARTPWriter(int fd);
+ explicit ARTPWriter(int fd, String8& localIp, int localPort,
+ String8& remoteIp, int remotePort,
+ uint32_t seqNo);
virtual status_t addSource(const sp<MediaSource> &source);
virtual bool reachedEOS();
virtual status_t start(MetaData *params);
virtual status_t stop();
virtual status_t pause();
+ void updateCVODegrees(int32_t cvoDegrees);
+ void updatePayloadType(int32_t payloadType);
+ void updateSocketDscp(int32_t dscp);
+ void updateSocketNetwork(int64_t socketNetwork);
+ uint32_t getSequenceNum();
virtual void onMessageReceived(const sp<AMessage> &msg);
+ virtual void setTMMBNInfo(uint32_t opponentID, uint32_t bitrate);
protected:
virtual ~ARTPWriter();
@@ -76,15 +88,27 @@
sp<ALooper> mLooper;
sp<AHandlerReflector<ARTPWriter> > mReflector;
- int mSocket;
+ bool mIsIPv6;
+ int mRTPSocket, mRTCPSocket;
+ struct sockaddr_in mLocalAddr;
struct sockaddr_in mRTPAddr;
struct sockaddr_in mRTCPAddr;
+ struct sockaddr_in6 mLocalAddr6;
+ struct sockaddr_in6 mRTPAddr6;
+ struct sockaddr_in6 mRTCPAddr6;
+ int32_t mRtpLayer3Dscp;
+ net_handle_t mRTPSockNetwork;
AString mProfileLevel;
AString mSeqParamSet;
AString mPicParamSet;
+ MediaBufferBase *mVPSBuf;
+ MediaBufferBase *mSPSBuf;
+ MediaBufferBase *mPPSBuf;
+
uint32_t mSourceID;
+ uint32_t mPayloadType;
uint32_t mSeqNo;
uint32_t mRTPTimeBase;
uint32_t mNumRTPSent;
@@ -92,10 +116,17 @@
uint32_t mLastRTPTime;
uint64_t mLastNTPTime;
+ uint32_t mOpponentID;
+ uint32_t mBitrate;
+ sp<TrafficRecorder<uint32_t, size_t> > mTrafficRec;
+
int32_t mNumSRsSent;
+ int32_t mRTPCVOExtMap;
+ int32_t mRTPCVODegrees;
enum {
INVALID,
+ H265,
H264,
H263,
AMR_NB,
@@ -109,17 +140,23 @@
void addSR(const sp<ABuffer> &buffer);
void addSDES(const sp<ABuffer> &buffer);
+ void addTMMBN(const sp<ABuffer> &buffer);
void makeH264SPropParamSets(MediaBufferBase *buffer);
void dumpSessionDesc();
void sendBye();
+ void sendVPSSPSPPSIfIFrame(MediaBufferBase *mediaBuf, int64_t timeUs);
+ void sendSPSPPSIfIFrame(MediaBufferBase *mediaBuf, int64_t timeUs);
+ void sendHEVCData(MediaBufferBase *mediaBuf);
void sendAVCData(MediaBufferBase *mediaBuf);
void sendH263Data(MediaBufferBase *mediaBuf);
void sendAMRData(MediaBufferBase *mediaBuf);
void send(const sp<ABuffer> &buffer, bool isRTCP);
+ void makeSocketPairAndBind(String8& localIp, int localPort, String8& remoteIp, int remotePort);
+ void ModerateInstantTraffic(uint32_t samplePeriod, uint32_t limitBytes);
DISALLOW_EVIL_CONSTRUCTORS(ARTPWriter);
};
diff --git a/media/libstagefright/rtsp/ASessionDescription.cpp b/media/libstagefright/rtsp/ASessionDescription.cpp
index 2b42040..5b5b4b1 100644
--- a/media/libstagefright/rtsp/ASessionDescription.cpp
+++ b/media/libstagefright/rtsp/ASessionDescription.cpp
@@ -27,6 +27,8 @@
namespace android {
+constexpr unsigned kDefaultAs = 960; // kbps?
+
ASessionDescription::ASessionDescription()
: mIsValid(false) {
}
@@ -103,7 +105,7 @@
key.setTo(line, 0, colonPos);
if (key == "a=fmtp" || key == "a=rtpmap"
- || key == "a=framesize") {
+ || key == "a=framesize" || key == "a=extmap") {
ssize_t spacePos = line.find(" ", colonPos + 1);
if (spacePos < 0) {
return false;
@@ -201,6 +203,33 @@
return true;
}
+bool ASessionDescription::getCvoExtMap(
+ size_t index, int32_t *cvoExtMap) const {
+ CHECK_GE(index, 0u);
+ CHECK_LT(index, mTracks.size());
+
+ AString key, value;
+ *cvoExtMap = 0;
+
+ const Attribs &track = mTracks.itemAt(index);
+ for (size_t i = 0; i < track.size(); i++) {
+ value = track.valueAt(i);
+ if (value.size() > 0 && strcmp(value.c_str(), "urn:3gpp:video-orientation") == 0) {
+ key = track.keyAt(i);
+ break;
+ }
+ }
+
+ if (key.size() > 0) {
+ const char *colonPos = strrchr(key.c_str(), ':');
+ colonPos++;
+ *cvoExtMap = atoi(colonPos);
+ return true;
+ }
+
+ return false;
+}
+
void ASessionDescription::getFormatType(
size_t index, unsigned long *PT,
AString *desc, AString *params) const {
@@ -345,5 +374,74 @@
return *npt2 > *npt1;
}
+// static
+void ASessionDescription::SDPStringFactory(AString &sdp,
+ const char *ip, bool isAudio, unsigned port, unsigned payloadType,
+ unsigned as, const char *codec, const char *fmtp,
+ int32_t width, int32_t height, int32_t cvoExtMap)
+{
+ bool isIPv4 = (AString(ip).find("::") == -1) ? true : false;
+ sdp.clear();
+ sdp.append("v=0\r\n");
+
+ sdp.append("a=range:npt=now-\r\n");
+
+ sdp.append("m=");
+ sdp.append(isAudio ? "audio " : "video ");
+ sdp.append(port);
+ sdp.append(" RTP/AVP ");
+ sdp.append(payloadType);
+ sdp.append("\r\n");
+
+ sdp.append("c= IN IP");
+ if (isIPv4) {
+ sdp.append("4 ");
+ } else {
+ sdp.append("6 ");
+ }
+ sdp.append(ip);
+ sdp.append("\r\n");
+
+ sdp.append("b=AS:");
+ sdp.append(as > 0 ? as : kDefaultAs);
+ sdp.append("\r\n");
+
+ sdp.append("a=rtpmap:");
+ sdp.append(payloadType);
+ sdp.append(" ");
+ sdp.append(codec);
+ sdp.append("/");
+ sdp.append(isAudio ? "8000" : "90000");
+ sdp.append("\r\n");
+
+ if (fmtp != NULL) {
+ sdp.append("a=fmtp:");
+ sdp.append(payloadType);
+ sdp.append(" ");
+ sdp.append(fmtp);
+ sdp.append("\r\n");
+ }
+
+ if (!isAudio && width > 0 && height > 0) {
+ sdp.append("a=framesize:");
+ sdp.append(payloadType);
+ sdp.append(" ");
+ sdp.append(width);
+ sdp.append("-");
+ sdp.append(height);
+ sdp.append("\r\n");
+ }
+
+ if (cvoExtMap > 0) {
+ sdp.append("a=extmap:");
+ sdp.append(cvoExtMap);
+ sdp.append(" ");
+ sdp.append("urn:3gpp:video-orientation");
+ sdp.append("\r\n");
+ }
+
+ ALOGV("SDPStringFactory => %s", sdp.c_str());
+}
+
} // namespace android
diff --git a/media/libstagefright/rtsp/ASessionDescription.h b/media/libstagefright/rtsp/ASessionDescription.h
index b462983..91f5442 100644
--- a/media/libstagefright/rtsp/ASessionDescription.h
+++ b/media/libstagefright/rtsp/ASessionDescription.h
@@ -40,6 +40,8 @@
size_t countTracks() const;
void getFormat(size_t index, AString *value) const;
+ bool getCvoExtMap(size_t index, int32_t *cvoExtMap) const;
+
void getFormatType(
size_t index, unsigned long *PT,
AString *desc, AString *params) const;
@@ -63,6 +65,9 @@
// i.e. we have a fixed duration, otherwise this is live streaming.
static bool parseNTPRange(const char *s, float *npt1, float *npt2);
+ static void SDPStringFactory(AString &sdp, const char *ip, bool isAudio, unsigned port,
+ unsigned payloadType, unsigned as, const char *codec, const char *fmtp = NULL,
+ int32_t width = 0, int32_t height = 0, int32_t cvoExtMap = 0);
protected:
virtual ~ASessionDescription();
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index a5a895e..f990ecf 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -4,6 +4,7 @@
srcs: [
"AAMRAssembler.cpp",
"AAVCAssembler.cpp",
+ "AHEVCAssembler.cpp",
"AH263Assembler.cpp",
"AMPEG2TSAssembler.cpp",
"AMPEG4AudioAssembler.cpp",
@@ -20,6 +21,7 @@
],
shared_libs: [
+ "libandroid_net",
"libcrypto",
"libdatasource",
"libmedia",
@@ -28,6 +30,7 @@
include_dirs: [
"frameworks/av/media/libstagefright",
"frameworks/native/include/media/openmax",
+ "frameworks/native/include/android",
],
arch: {
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 7f025a5..0fdf431 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -1032,6 +1032,11 @@
break;
}
+ int32_t rtcpEvent;
+ if (msg->findInt32("rtcp-event", &rtcpEvent)) {
+ break;
+ }
+
++mNumAccessUnitsReceived;
postAccessUnitTimeoutCheck();
diff --git a/media/libstagefright/rtsp/QualManager.cpp b/media/libstagefright/rtsp/QualManager.cpp
new file mode 100644
index 0000000..37aa326
--- /dev/null
+++ b/media/libstagefright/rtsp/QualManager.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "QualManager"
+
+#include <algorithm>
+
+#include <sys/prctl.h>
+#include <utils/Log.h>
+
+#include "QualManager.h"
+
+namespace android {
+
+QualManager::Watcher::Watcher(int32_t timeLimit)
+ : Thread(false), mWatching(false), mSwitch(false),
+ mTimeLimit(timeLimit * 1000000LL) // timeLimit ms
+{
+}
+
+bool QualManager::Watcher::isExpired() const
+{
+ return mSwitch;
+}
+
+void QualManager::Watcher::setup() {
+ AutoMutex _l(mMyLock);
+ if (mWatching == false) {
+ mWatching = true;
+ mMyCond.signal();
+ }
+}
+
+void QualManager::Watcher::release() {
+ AutoMutex _l(mMyLock);
+ if (mSwitch) {
+ ALOGW("%s DISARMED", name);
+ mSwitch = false;
+ }
+ if (mWatching == true) {
+ ALOGW("%s DISARMED", name);
+ mWatching = false;
+ mMyCond.signal();
+ }
+}
+
+void QualManager::Watcher::exit() {
+ AutoMutex _l(mMyLock);
+ // The order is important to avoid dead lock.
+ Thread::requestExit();
+ mMyCond.signal();
+}
+
+QualManager::Watcher::~Watcher() {
+ ALOGI("%s thread dead", name);
+}
+
+bool QualManager::Watcher::threadLoop() {
+ AutoMutex _l(mMyLock);
+#if defined(__linux__)
+ prctl(PR_GET_NAME, name, 0, 0, 0);
+#endif
+ while (!exitPending()) {
+ ALOGW("%s Timer init", name);
+ mMyCond.wait(mMyLock); // waits as non-watching state
+ if (exitPending())
+ return false;
+ ALOGW("%s timer BOOM after %d msec", name, (int)(mTimeLimit / 1000000LL));
+ mMyCond.waitRelative(mMyLock, mTimeLimit); // waits as watching satte
+ if (mWatching == true) {
+ mSwitch = true;
+ ALOGW("%s BOOM!!!!", name);
+ }
+ mWatching = false;
+ }
+ return false;
+}
+
+
+QualManager::QualManager()
+ : mMinBitrate(-1), mMaxBitrate(-1),
+ mTargetBitrate(512000), mLastTargetBitrate(-1),
+ mLastSetBitrateTime(0), mIsNewTargetBitrate(false)
+{
+ VFPWatcher = new Watcher(3000); //Very Few Packet Watcher
+ VFPWatcher->run("VeryFewPtk");
+ LBRWatcher = new Watcher(10000); //Low Bit Rate Watcher
+ LBRWatcher->run("LowBitRate");
+}
+
+QualManager::~QualManager() {
+ VFPWatcher->exit();
+ LBRWatcher->exit();
+}
+
+int32_t QualManager::getTargetBitrate() {
+ if (mIsNewTargetBitrate) {
+ mIsNewTargetBitrate = false;
+ mLastTargetBitrate = clampingBitrate(mTargetBitrate);
+ mTargetBitrate = mLastTargetBitrate;
+ return mTargetBitrate;
+ } else {
+ return -1;
+ }
+}
+
+bool QualManager::isNeedToDowngrade() {
+ return LBRWatcher->isExpired();
+}
+
+void QualManager::setTargetBitrate(uint8_t fraction, int64_t nowUs, bool isTooLowPkts) {
+ /* Too Low Packet. Maybe opponent is switching camera.
+ * If this condition goes longer, we should down bitrate.
+ */
+ if (isTooLowPkts) {
+ VFPWatcher->setup();
+ } else {
+ VFPWatcher->release();
+ }
+
+ if ((fraction > (256 * 5 / 100) && !isTooLowPkts) || VFPWatcher->isExpired()) {
+ // loss more than 5% or VFPWatcher BOOMED
+ mTargetBitrate -= mBitrateStep * 3;
+ } else if (fraction <= (256 * 2 /100)) {
+ // loss less than 2%
+ mTargetBitrate += mBitrateStep;
+ }
+
+ if (mTargetBitrate > mMaxBitrate) {
+ mTargetBitrate = mMaxBitrate + mBitrateStep;
+ } else if (mTargetBitrate < mMinBitrate) {
+ LBRWatcher->setup();
+ mTargetBitrate = mMinBitrate - mBitrateStep;
+ }
+
+ if (mLastTargetBitrate != clampingBitrate(mTargetBitrate) ||
+ nowUs - mLastSetBitrateTime > 5000000ll) {
+ mIsNewTargetBitrate = true;
+ mLastSetBitrateTime = nowUs;
+ }
+}
+
+void QualManager::setMinMaxBitrate(int32_t min, int32_t max) {
+ mMinBitrate = min;
+ mMaxBitrate = max;
+ mBitrateStep = (max - min) / 8;
+}
+
+void QualManager::setBitrateData(int32_t bitrate, int64_t /*now*/) {
+ // A bitrate that is considered packetloss also should be good.
+ if (bitrate >= mMinBitrate && mTargetBitrate >= mMinBitrate) {
+ LBRWatcher->release();
+ } else if (bitrate < mMinBitrate){
+ LBRWatcher->setup();
+ }
+}
+
+int32_t QualManager::clampingBitrate(int32_t bitrate) {
+ return std::min(std::max(mMinBitrate, bitrate), mMaxBitrate);
+}
+} // namespace android
diff --git a/media/libstagefright/rtsp/QualManager.h b/media/libstagefright/rtsp/QualManager.h
new file mode 100644
index 0000000..a7dc921
--- /dev/null
+++ b/media/libstagefright/rtsp/QualManager.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef QUAL_MANAGER_H_
+
+#define QUAL_MANAGER_H_
+
+#include <stdint.h>
+#include <utils/Thread.h>
+
+namespace android {
+class QualManager {
+public:
+ QualManager();
+ ~QualManager();
+
+ int32_t getTargetBitrate();
+ bool isNeedToDowngrade();
+
+ void setTargetBitrate(uint8_t fraction, int64_t nowUs, bool isTooLowPkts);
+ void setMinMaxBitrate(int32_t min, int32_t max);
+ void setBitrateData(int32_t bitrate, int64_t now);
+private:
+ class Watcher : public Thread
+ {
+ public:
+ Watcher(int32_t timeLimit);
+
+ void setup();
+ void release();
+ void exit();
+ bool isExpired() const;
+ private:
+ virtual ~Watcher();
+ virtual bool threadLoop();
+
+ char name[32] = {0,};
+
+ Condition mMyCond;
+ Mutex mMyLock;
+
+ bool mWatching;
+ bool mSwitch;
+ const nsecs_t mTimeLimit;
+ };
+ sp<Watcher> VFPWatcher;
+ sp<Watcher> LBRWatcher;
+ int32_t mMinBitrate;
+ int32_t mMaxBitrate;
+ int32_t mBitrateStep;
+
+ int32_t mTargetBitrate;
+ int32_t mLastTargetBitrate;
+ int64_t mLastSetBitrateTime;
+
+ bool mIsNewTargetBitrate;
+
+ int32_t clampingBitrate(int32_t bitrate);
+};
+} //namespace android
+
+#endif // QUAL_MANAGER_H_
diff --git a/media/libstagefright/rtsp/TrafficRecorder.h b/media/libstagefright/rtsp/TrafficRecorder.h
new file mode 100644
index 0000000..f8e7c03
--- /dev/null
+++ b/media/libstagefright/rtsp/TrafficRecorder.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_TRAFFIC_RECORDER_H_
+
+#define A_TRAFFIC_RECORDER_H_
+
+#include <android-base/logging.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// Circular array to save recent amount of bytes
+template <class Time, class Bytes>
+class TrafficRecorder : public RefBase {
+private:
+ size_t mSize;
+ size_t mSizeMask;
+ Time *mTimeArray = NULL;
+ Bytes *mBytesArray = NULL;
+ size_t mHeadIdx = 0;
+ size_t mTailIdx = 0;
+
+ Time mClock = 0;
+ Time mLastTimeOfPrint = 0;
+ Bytes mAccuBytesOfPrint = 0;
+public:
+ TrafficRecorder();
+ TrafficRecorder(size_t size);
+ virtual ~TrafficRecorder();
+
+ void init();
+
+ void updateClock(Time now);
+
+ Bytes readBytesForLastPeriod(Time period);
+ void writeBytes(Bytes bytes);
+
+ void printAccuBitsForLastPeriod(Time period, Time unit);
+};
+
+template <class Time, class Bytes>
+TrafficRecorder<Time, Bytes>::TrafficRecorder() {
+ TrafficRecorder(128);
+}
+
+template <class Time, class Bytes>
+TrafficRecorder<Time, Bytes>::TrafficRecorder(size_t size) {
+ size_t exp;
+ for (exp = 0; exp < 32; exp++) {
+ if (size <= (1ul << exp)) {
+ break;
+ }
+ }
+ mSize = (1ul << exp); // size = 2^exp
+ mSizeMask = mSize - 1;
+
+ LOG(VERBOSE) << "TrafficRecorder Init size " << mSize;
+ mTimeArray = new Time[mSize];
+ mBytesArray = new Bytes[mSize];
+
+ init();
+}
+
+template <class Time, class Bytes>
+TrafficRecorder<Time, Bytes>::~TrafficRecorder() {
+ delete[] mTimeArray;
+ delete[] mBytesArray;
+}
+
+template <class Time, class Bytes>
+void TrafficRecorder<Time, Bytes>::init() {
+ mHeadIdx = 0;
+ mTailIdx = 0;
+ mTimeArray[0] = 0;
+ mBytesArray[0] = 0;
+}
+
+template <class Time, class Bytes>
+void TrafficRecorder<Time, Bytes>::updateClock(Time now) {
+ mClock = now;
+}
+
+template <class Time, class Bytes>
+Bytes TrafficRecorder<Time, Bytes>::readBytesForLastPeriod(Time period) {
+ Bytes bytes = 0;
+
+ size_t i = mTailIdx;
+ while (i != mHeadIdx) {
+ LOG(VERBOSE) << "READ " << i << " time " << mTimeArray[i] << " \t EndOfPeriod " << mClock - period;
+ if (mTimeArray[i] < mClock - period) {
+ break;
+ }
+ bytes += mBytesArray[i];
+ i = (i + mSize - 1) & mSizeMask;
+ }
+ mHeadIdx = i;
+ return bytes;
+}
+
+template <class Time, class Bytes>
+void TrafficRecorder<Time, Bytes>::writeBytes(Bytes bytes) {
+ size_t writeIdx;
+ if (mClock == mTimeArray[mTailIdx]) {
+ writeIdx = mTailIdx;
+ mBytesArray[writeIdx] += bytes;
+ } else {
+ writeIdx = (mTailIdx + 1) % mSize;
+ mTimeArray[writeIdx] = mClock;
+ mBytesArray[writeIdx] = bytes;
+ }
+
+ LOG(VERBOSE) << "WRITE " << writeIdx << " time " << mClock;
+ if (writeIdx == mHeadIdx) {
+ LOG(WARNING) << "Traffic recorder size exceeded at " << mHeadIdx;
+ mHeadIdx = (mHeadIdx + 1) & mSizeMask;
+ }
+
+ mTailIdx = writeIdx;
+ mAccuBytesOfPrint += bytes;
+}
+
+template <class Time, class Bytes>
+void TrafficRecorder<Time, Bytes>::printAccuBitsForLastPeriod(Time period, Time unit) {
+ Time duration = mClock - mLastTimeOfPrint;
+ float numOfUnit = (float)duration / unit;
+ if (duration > period) {
+ ALOGD("Actual Tx period %.0f ms \t %.0f Bits/Unit",
+ numOfUnit * 1000.f, mAccuBytesOfPrint * 8.f / numOfUnit);
+ mLastTimeOfPrint = mClock;
+ mAccuBytesOfPrint = 0;
+ init();
+ }
+}
+
+} // namespace android
+
+#endif // A_TRAFFIC_RECORDER_H_
diff --git a/media/libstagefright/tests/HEVC/Android.bp b/media/libstagefright/tests/HEVC/Android.bp
index 7a6b959..3762553 100644
--- a/media/libstagefright/tests/HEVC/Android.bp
+++ b/media/libstagefright/tests/HEVC/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "HEVCUtilsUnitTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/tests/extractorFactory/Android.bp b/media/libstagefright/tests/extractorFactory/Android.bp
index e3e61d7..26ec507 100644
--- a/media/libstagefright/tests/extractorFactory/Android.bp
+++ b/media/libstagefright/tests/extractorFactory/Android.bp
@@ -17,6 +17,7 @@
cc_test {
name: "ExtractorFactoryTest",
gtest: true,
+ test_suites: ["device-tests"],
srcs: [
"ExtractorFactoryTest.cpp",
diff --git a/media/libstagefright/timedtext/TEST_MAPPING b/media/libstagefright/timedtext/TEST_MAPPING
new file mode 100644
index 0000000..35a5b11
--- /dev/null
+++ b/media/libstagefright/timedtext/TEST_MAPPING
@@ -0,0 +1,9 @@
+// mappings for frameworks/av/media/libstagefright/timedtext
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "TimedTextUnitTest" }
+ ]
+}
diff --git a/media/libstagefright/timedtext/test/Android.bp b/media/libstagefright/timedtext/test/Android.bp
index 36f8891..11e5077 100644
--- a/media/libstagefright/timedtext/test/Android.bp
+++ b/media/libstagefright/timedtext/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "TimedTextUnitTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libstagefright/xmlparser/TEST_MAPPING b/media/libstagefright/xmlparser/TEST_MAPPING
new file mode 100644
index 0000000..8626d72
--- /dev/null
+++ b/media/libstagefright/xmlparser/TEST_MAPPING
@@ -0,0 +1,6 @@
+// test mapping for frameworks/av/media/libstagefright/xmlparser
+{
+ "presubmit": [
+ { "name": "XMLParserTest" }
+ ]
+}
diff --git a/media/libstagefright/xmlparser/test/Android.bp b/media/libstagefright/xmlparser/test/Android.bp
index 6d97c96..ba02f84 100644
--- a/media/libstagefright/xmlparser/test/Android.bp
+++ b/media/libstagefright/xmlparser/test/Android.bp
@@ -16,6 +16,7 @@
cc_test {
name: "XMLParserTest",
+ test_suites: ["device-tests"],
gtest: true,
srcs: [
diff --git a/media/libwatchdog/Android.bp b/media/libwatchdog/Android.bp
index 1a87824..f7f0db7 100644
--- a/media/libwatchdog/Android.bp
+++ b/media/libwatchdog/Android.bp
@@ -14,6 +14,7 @@
cc_library {
name: "libwatchdog",
+ host_supported: true,
srcs: [
"Watchdog.cpp",
],
@@ -29,6 +30,11 @@
darwin: {
enabled: false,
},
+ linux_glibc: {
+ cflags: [
+ "-Dsigev_notify_thread_id=_sigev_un._tid",
+ ],
+ },
},
apex_available: ["com.android.media"],
min_sdk_version: "29",
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 37598f8..d0e0cc7 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -181,6 +181,7 @@
cc_test {
name: "AImageReaderWindowHandleTest",
+ test_suites: ["device-tests"],
srcs: ["tests/AImageReaderWindowHandleTest.cpp"],
shared_libs: [
"libbinder",
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index 0e8cbcb..b019448 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -30,6 +30,18 @@
using namespace android;
+// Formats not listed in the public API, but still available to AImageReader
+enum AIMAGE_PRIVATE_FORMATS {
+ /**
+ * Unprocessed implementation-dependent raw
+ * depth measurements, opaque with 16 bit
+ * samples.
+ *
+ */
+
+ AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
+};
+
// TODO: this only supports ImageReader
struct AImage {
AImage(AImageReader* reader, int32_t format, uint64_t usage, BufferItem* buffer,
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index c0ceb3d..5d8f0b8 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -21,7 +21,6 @@
#include "NdkImagePriv.h"
#include "NdkImageReaderPriv.h"
-#include <private/media/NdkImage.h>
#include <cutils/atomic.h>
#include <utils/Log.h>
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index af21a99..d771095 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -45,6 +45,10 @@
return AMEDIA_OK;
} else if (err == -EAGAIN) {
return (media_status_t) AMEDIACODEC_INFO_TRY_AGAIN_LATER;
+ } else if (err == NO_MEMORY) {
+ return AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+ } else if (err == DEAD_OBJECT) {
+ return AMEDIACODEC_ERROR_RECLAIMED;
}
ALOGE("sf error code: %d", err);
return AMEDIA_ERROR_UNKNOWN;
@@ -255,7 +259,7 @@
break;
}
msg->findString("detail", &detail);
- ALOGE("Decoder reported error(0x%x), actionCode(%d), detail(%s)",
+ ALOGE("Codec reported error(0x%x), actionCode(%d), detail(%s)",
err, actionCode, detail.c_str());
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 8680641..73c52a9 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -364,6 +364,7 @@
EXPORT const char* AMEDIAFORMAT_KEY_SAR_WIDTH = "sar-width";
EXPORT const char* AMEDIAFORMAT_KEY_SEI = "sei";
EXPORT const char* AMEDIAFORMAT_KEY_SLICE_HEIGHT = "slice-height";
+EXPORT const char* AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS = "slow-motion-markers";
EXPORT const char* AMEDIAFORMAT_KEY_STRIDE = "stride";
EXPORT const char* AMEDIAFORMAT_KEY_TARGET_TIME = "target-time";
EXPORT const char* AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT = "temporal-layer-count";
diff --git a/media/ndk/TEST_MAPPING b/media/ndk/TEST_MAPPING
new file mode 100644
index 0000000..1a81538
--- /dev/null
+++ b/media/ndk/TEST_MAPPING
@@ -0,0 +1,6 @@
+// mappings for frameworks/av/media/ndk
+{
+ "presubmit": [
+ { "name": "AImageReaderWindowHandleTest" }
+ ]
+}
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index 6371de4..394b972 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -322,6 +322,10 @@
extern const char* AMEDIAFORMAT_KEY_LOW_LATENCY __INTRODUCED_IN(30);
#endif /* __ANDROID_API__ >= 30 */
+#if __ANDROID_API__ >= 31
+extern const char* AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS __INTRODUCED_IN(31);
+#endif /* __ANDROID_API__ >= 31 */
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/include/private/media/NdkImage.h b/media/ndk/include/private/media/NdkImage.h
deleted file mode 100644
index 4368a56..0000000
--- a/media/ndk/include/private/media/NdkImage.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _PRIVATE_MEDIA_NDKIMAGE_H_
-#define _PRIVATE_MEDIA_NDKIMAGE_H_
-// Formats not listed in the public API, but still available to AImageReader
-enum AIMAGE_PRIVATE_FORMATS {
- /**
- * Unprocessed implementation-dependent raw
- * depth measurements, opaque with 16 bit
- * samples.
- *
- */
-
- AIMAGE_FORMAT_RAW_DEPTH = 0x1002,
-};
-#endif // _PRIVATE_MEDIA_NDKIMAGE
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 29f1da8..bd3337e 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -131,6 +131,7 @@
AMEDIAFORMAT_KEY_SAR_WIDTH; # var introduced=29
AMEDIAFORMAT_KEY_SEI; # var introduced=28
AMEDIAFORMAT_KEY_SLICE_HEIGHT; # var introduced=28
+ AMEDIAFORMAT_KEY_SLOW_MOTION_MARKERS; # var introduced=31
AMEDIAFORMAT_KEY_STRIDE; # var introduced=21
AMEDIAFORMAT_KEY_TARGET_TIME; # var introduced=29
AMEDIAFORMAT_KEY_TEMPORAL_LAYER_COUNT; # var introduced=29
diff --git a/media/ndk/tests/AImageReaderWindowHandleTest.cpp b/media/ndk/tests/AImageReaderWindowHandleTest.cpp
index 5b65064..27864c2 100644
--- a/media/ndk/tests/AImageReaderWindowHandleTest.cpp
+++ b/media/ndk/tests/AImageReaderWindowHandleTest.cpp
@@ -17,10 +17,10 @@
#include <gtest/gtest.h>
#include <media/NdkImageReader.h>
#include <media/NdkImage.h>
-#include <private/media/NdkImage.h>
#include <mediautils/AImageReaderUtils.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
+#include <NdkImagePriv.h>
#include <NdkImageReaderPriv.h>
#include <vndk/hardware_buffer.h>
#include <memory>
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 113e4a7..19225d3 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -27,6 +27,9 @@
namespace android {
+static constexpr int32_t INVALID_ADJ = -10000;
+static constexpr int32_t NATIVE_ADJ = -1000;
+
ProcessInfo::ProcessInfo() {}
bool ProcessInfo::getPriority(int pid, int* priority) {
@@ -35,8 +38,6 @@
size_t length = 1;
int32_t state;
- static const int32_t INVALID_ADJ = -10000;
- static const int32_t NATIVE_ADJ = -1000;
int32_t score = INVALID_ADJ;
status_t err = service->getProcessStatesAndOomScoresFromPids(length, &pid, &state, &score);
if (err != OK) {
@@ -45,8 +46,17 @@
}
ALOGV("pid %d state %d score %d", pid, state, score);
if (score <= NATIVE_ADJ) {
- ALOGE("pid %d invalid OOM adjustments value %d", pid, score);
- return false;
+ std::scoped_lock lock{mOverrideLock};
+
+ // If this process if not tracked by ActivityManagerService, look for overrides.
+ auto it = mOverrideMap.find(pid);
+ if (it != mOverrideMap.end()) {
+ ALOGI("pid %d invalid OOM score %d, override to %d", pid, score, it->second.oomScore);
+ score = it->second.oomScore;
+ } else {
+ ALOGE("pid %d invalid OOM score %d", pid, score);
+ return false;
+ }
}
// Use OOM adjustments value as the priority. Lower the value, higher the priority.
@@ -61,6 +71,26 @@
return (callingPid == getpid()) || (callingPid == pid) || (callingUid == AID_MEDIA);
}
+bool ProcessInfo::overrideProcessInfo(int pid, int procState, int oomScore) {
+ std::scoped_lock lock{mOverrideLock};
+
+ mOverrideMap.erase(pid);
+
+ // Disable the override if oomScore is set to NATIVE_ADJ or below.
+ if (oomScore <= NATIVE_ADJ) {
+ return false;
+ }
+
+ mOverrideMap.emplace(pid, ProcessInfoOverride{procState, oomScore});
+ return true;
+}
+
+void ProcessInfo::removeProcessInfoOverride(int pid) {
+ std::scoped_lock lock{mOverrideLock};
+
+ mOverrideMap.erase(pid);
+}
+
ProcessInfo::~ProcessInfo() {}
} // namespace android
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 7699700..87ea084 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -62,7 +62,7 @@
}
static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
- uid_t uid, bool start, bool isHotwordSource) {
+ uid_t uid, bool start) {
// Okay to not track in app ops as audio server or media server is us and if
// device is rooted security model is considered compromised.
// system_server loses its RECORD_AUDIO permission when a secondary
@@ -87,21 +87,16 @@
}
AppOpsManager appOps;
- const int32_t opRecordAudio = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
-
+ const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
if (start) {
- const int32_t op = isHotwordSource ?
- AppOpsManager::OP_RECORD_AUDIO_HOTWORD : opRecordAudio;
if (appOps.startOpNoThrow(op, uid, resolvedOpPackageName, /*startIfModeDefault*/ false)
!= AppOpsManager::MODE_ALLOWED) {
ALOGE("Request denied by app op: %d", op);
return false;
}
} else {
- // Always use OP_RECORD_AUDIO for checks at creation time.
- if (appOps.checkOp(opRecordAudio, uid, resolvedOpPackageName)
- != AppOpsManager::MODE_ALLOWED) {
- ALOGE("Request denied by app op: %d", opRecordAudio);
+ if (appOps.checkOp(op, uid, resolvedOpPackageName) != AppOpsManager::MODE_ALLOWED) {
+ ALOGE("Request denied by app op: %d", op);
return false;
}
}
@@ -110,15 +105,14 @@
}
bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid) {
- return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false,
- /*is_hotword_source*/ false);
+ return checkRecordingInternal(opPackageName, pid, uid, /*start*/ false);
}
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, bool isHotwordSource) {
- return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true, isHotwordSource);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid) {
+ return checkRecordingInternal(opPackageName, pid, uid, /*start*/ true);
}
-void finishRecording(const String16& opPackageName, uid_t uid, bool isHotwordSource) {
+void finishRecording(const String16& opPackageName, uid_t uid) {
// Okay to not track in app ops as audio server is us and if
// device is rooted security model is considered compromised.
if (isAudioServerOrRootUid(uid)) return;
@@ -131,8 +125,7 @@
}
AppOpsManager appOps;
- const int32_t op = isHotwordSource ? AppOpsManager::OP_RECORD_AUDIO_HOTWORD
- : appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
+ const int32_t op = appOps.permissionToOpCode(sAndroidPermissionRecordAudio);
appOps.finishOp(op, uid, resolvedOpPackageName);
}
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 431dd7a..212599a 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -79,8 +79,8 @@
}
bool recordingAllowed(const String16& opPackageName, pid_t pid, uid_t uid);
-bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid, bool isHotwordSource);
-void finishRecording(const String16& opPackageName, uid_t uid, bool isHotwordSource);
+bool startRecording(const String16& opPackageName, pid_t pid, uid_t uid);
+void finishRecording(const String16& opPackageName, uid_t uid);
bool captureAudioOutputAllowed(pid_t pid, uid_t uid);
bool captureMediaOutputAllowed(pid_t pid, uid_t uid);
bool captureVoiceCommunicationOutputAllowed(pid_t pid, uid_t uid);
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 3873600..12f6eba 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -54,6 +54,7 @@
"libmediautils",
"libmemunreachable",
"libmedia_helper",
+ "libshmemcompat",
"libvibrator",
],
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index ef746f4..eae9437 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -61,6 +61,7 @@
#include <system/audio_effects/effect_visualizer.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
#include <audio_utils/primitives.h>
@@ -97,6 +98,8 @@
namespace android {
+using media::IEffectClient;
+
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
static const char kHardwareLockedString[] = "Hardware lock is taken\n";
static const char kClientLockedString[] = "Client lock is taken\n";
@@ -194,7 +197,12 @@
mNextUniqueIds[use] = AUDIO_UNIQUE_ID_USE_MAX;
}
+#if 1
+ // FIXME See bug 165702394 and bug 168511485
+ const bool doLog = false;
+#else
const bool doLog = property_get_bool("ro.test_harness", false);
+#endif
if (doLog) {
mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
MemoryHeapBase::READ_ONLY);
@@ -398,7 +406,7 @@
return ret;
}
}
- return AudioMixer::HAPTIC_SCALE_MUTE;
+ return static_cast<int>(os::HapticScale::MUTE);
}
/* static */
@@ -3298,6 +3306,16 @@
return minThread;
}
+AudioFlinger::ThreadBase *AudioFlinger::hapticPlaybackThread_l() const {
+ for (size_t i = 0; i < mPlaybackThreads.size(); ++i) {
+ PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
+ if (thread->hapticChannelMask() != AUDIO_CHANNEL_NONE) {
+ return thread;
+ }
+ }
+ return nullptr;
+}
+
sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
audio_session_t triggerSession,
audio_session_t listenerSession,
@@ -3428,7 +3446,7 @@
return status;
}
-sp<IEffect> AudioFlinger::createEffect(
+sp<media::IEffect> AudioFlinger::createEffect(
effect_descriptor_t *pDesc,
const sp<IEffectClient>& effectClient,
int32_t priority,
@@ -3539,6 +3557,16 @@
goto Exit;
}
+ const bool hapticPlaybackRequired = EffectModule::isHapticGenerator(&desc.type);
+ if (hapticPlaybackRequired
+ && (sessionId == AUDIO_SESSION_DEVICE
+ || sessionId == AUDIO_SESSION_OUTPUT_MIX
+ || sessionId == AUDIO_SESSION_OUTPUT_STAGE)) {
+ // haptic-generating effect is only valid when the session id is a general session id
+ lStatus = INVALID_OPERATION;
+ goto Exit;
+ }
+
// return effect descriptor
*pDesc = desc;
if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
@@ -3613,7 +3641,17 @@
// allow only one effect chain per sessionId on mPlaybackThreads.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
const audio_io_handle_t checkIo = mPlaybackThreads.keyAt(i);
- if (io == checkIo) continue;
+ if (io == checkIo) {
+ if (hapticPlaybackRequired
+ && mPlaybackThreads.valueAt(i)
+ ->hapticChannelMask() == AUDIO_CHANNEL_NONE) {
+ ALOGE("%s: haptic playback thread is required while the required playback "
+ "thread(io=%d) doesn't support", __func__, (int)io);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ continue;
+ }
const uint32_t sessionType =
mPlaybackThreads.valueAt(i)->hasAudioSession(sessionId);
if ((sessionType & ThreadBase::EFFECT_SESSION) != 0) {
@@ -3650,6 +3688,20 @@
// create effect on selected output thread
bool pinned = !audio_is_global_session(sessionId) && isSessionAcquired_l(sessionId);
+ ThreadBase *oriThread = nullptr;
+ if (hapticPlaybackRequired && thread->hapticChannelMask() == AUDIO_CHANNEL_NONE) {
+ ThreadBase *hapticThread = hapticPlaybackThread_l();
+ if (hapticThread == nullptr) {
+ ALOGE("%s haptic thread not found while it is required", __func__);
+ lStatus = INVALID_OPERATION;
+ goto Exit;
+ }
+ if (hapticThread != thread) {
+ // Force to use haptic thread for haptic-generating effect.
+ oriThread = thread;
+ thread = hapticThread;
+ }
+ }
handle = thread->createEffect_l(client, effectClient, priority, sessionId,
&desc, enabled, &lStatus, pinned, probe);
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
@@ -3659,6 +3711,11 @@
} else {
// handle must be valid here, but check again to be safe.
if (handle.get() != nullptr && id != nullptr) *id = handle->id();
+ // Invalidate audio session when haptic playback is created.
+ if (hapticPlaybackRequired && oriThread != nullptr) {
+ // invalidateTracksForAudioSession will trigger locking the thread.
+ oriThread->invalidateTracksForAudioSession(sessionId);
+ }
}
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 20f561e..14a4df7 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -91,15 +91,17 @@
#include "ThreadMetrics.h"
#include "TrackMetrics.h"
-#include <powermanager/IPowerManager.h>
+#include <android/os/IPowerManager.h>
#include <media/nblog/NBLog.h>
#include <private/media/AudioEffectShared.h>
#include <private/media/AudioTrackShared.h>
#include <vibrator/ExternalVibration.h>
+#include <vibrator/ExternalVibrationUtils.h>
#include "android/media/BnAudioRecord.h"
+#include "android/media/BnEffect.h"
namespace android {
@@ -231,9 +233,9 @@
uint32_t preferredTypeFlag,
effect_descriptor_t *descriptor) const;
- virtual sp<IEffect> createEffect(
+ virtual sp<media::IEffect> createEffect(
effect_descriptor_t *pDesc,
- const sp<IEffectClient>& effectClient,
+ const sp<media::IEffectClient>& effectClient,
int32_t priority,
audio_io_handle_t io,
audio_session_t sessionId,
@@ -405,7 +407,7 @@
case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
// Haptic channel mask is only applicable for channel position mask.
const uint32_t channelCount = audio_channel_count_from_out_mask(
- channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
+ static_cast<audio_channel_mask_t>(channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL));
const uint32_t maxChannelCount = kEnableExtendedChannels
? AudioMixer::MAX_NUM_CHANNELS : FCC_2;
if (channelCount < FCC_2 // mono is not supported at this time
@@ -682,6 +684,7 @@
virtual status_t createMmapBuffer(int32_t minSizeFrames,
struct audio_mmap_buffer_info *info);
virtual status_t getMmapPosition(struct audio_mmap_position *position);
+ virtual status_t getExternalPosition(uint64_t *position, int64_t *timeNanos);
virtual status_t start(const AudioClient& client,
const audio_attributes_t *attr,
audio_port_handle_t *handle);
@@ -756,6 +759,8 @@
sp<ThreadBase> getEffectThread_l(audio_session_t sessionId, int effectId);
+ ThreadBase *hapticPlaybackThread_l() const;
+
void removeClient_l(pid_t pid);
void removeNotificationClient(pid_t pid);
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 5ff7215..cecd52b 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -30,6 +30,8 @@
namespace android {
+using media::IEffectClient;
+
void AudioFlinger::DeviceEffectManager::createAudioPatch(audio_patch_handle_t handle,
const PatchPanel::Patch& patch) {
ALOGV("%s handle %d mHalHandle %d num sinks %d device sink %08x",
@@ -115,10 +117,19 @@
status_t AudioFlinger::DeviceEffectManager::checkEffectCompatibility(
const effect_descriptor_t *desc) {
+ sp<EffectsFactoryHalInterface> effectsFactory = mAudioFlinger.getEffectsFactory();
+ if (effectsFactory == nullptr) {
+ return BAD_VALUE;
+ }
- if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC
- && (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("%s() non pre/post processing device effect %s", __func__, desc->name);
+ static const float sMinDeviceEffectHalVersion = 6.0;
+ float halVersion = effectsFactory->getHalVersion();
+
+ if (((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC
+ && (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC)
+ || halVersion < sMinDeviceEffectHalVersion) {
+ ALOGW("%s() non pre/post processing device effect %s or incompatible API version %f",
+ __func__, desc->name, halVersion);
return BAD_VALUE;
}
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index 81e6065..d187df2 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -33,7 +33,7 @@
sp<EffectHandle> createEffect_l(effect_descriptor_t *descriptor,
const AudioDeviceTypeAddr& device,
const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
+ const sp<media::IEffectClient>& effectClient,
const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
int *enabled,
status_t *status,
@@ -165,6 +165,7 @@
uint32_t sampleRate() const override { return 0; }
audio_channel_mask_t channelMask() const override { return AUDIO_CHANNEL_NONE; }
uint32_t channelCount() const override { return 0; }
+ audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 529b87c..eaad6ef 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -25,6 +25,7 @@
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_dynamicsprocessing.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_visualizer.h>
#include <audio_utils/channels.h>
@@ -33,6 +34,7 @@
#include <media/AudioContainers.h>
#include <media/AudioEffect.h>
#include <media/AudioDeviceTypeAddr.h>
+#include <media/ShmemCompat.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <mediautils/ServiceUtilities.h>
@@ -58,6 +60,27 @@
namespace android {
+using binder::Status;
+
+namespace {
+
+// Append a POD value into a vector of bytes.
+template<typename T>
+void appendToBuffer(const T& value, std::vector<uint8_t>* buffer) {
+ const uint8_t* ar(reinterpret_cast<const uint8_t*>(&value));
+ buffer->insert(buffer->end(), ar, ar + sizeof(T));
+}
+
+// Write a POD value into a vector of bytes (clears the previous buffer
+// content).
+template<typename T>
+void writeToBuffer(const T& value, std::vector<uint8_t>* buffer) {
+ buffer->clear();
+ appendToBuffer(value, buffer);
+}
+
+} // namespace
+
// ----------------------------------------------------------------------------
// EffectBase implementation
// ----------------------------------------------------------------------------
@@ -868,6 +891,11 @@
}
#endif
}
+ if (isHapticGenerator()) {
+ audio_channel_mask_t hapticChannelMask = mCallback->hapticChannelMask();
+ mConfig.inputCfg.channels |= hapticChannelMask;
+ mConfig.outputCfg.channels |= hapticChannelMask;
+ }
mInChannelCountRequested =
audio_channel_count_from_out_mask(mConfig.inputCfg.channels);
mOutChannelCountRequested =
@@ -1149,11 +1177,10 @@
return remainder == 0 ? 0 : divisor - remainder;
}
-status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData)
+status_t AudioFlinger::EffectModule::command(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ int32_t maxReplySize,
+ std::vector<uint8_t>* reply)
{
Mutex::Autolock _l(mLock);
ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface.get());
@@ -1164,63 +1191,68 @@
if (mStatus != NO_ERROR) {
return mStatus;
}
+ if (maxReplySize < 0 || maxReplySize > EFFECT_PARAM_SIZE_MAX) {
+ return -EINVAL;
+ }
+ size_t cmdSize = cmdData.size();
+ const effect_param_t* param = cmdSize >= sizeof(effect_param_t)
+ ? reinterpret_cast<const effect_param_t*>(cmdData.data())
+ : nullptr;
if (cmdCode == EFFECT_CMD_GET_PARAM &&
- (sizeof(effect_param_t) > cmdSize ||
- ((effect_param_t *)pCmdData)->psize > cmdSize
- - sizeof(effect_param_t))) {
+ (param == nullptr || param->psize > cmdSize - sizeof(effect_param_t))) {
android_errorWriteLog(0x534e4554, "32438594");
android_errorWriteLog(0x534e4554, "33003822");
return -EINVAL;
}
if (cmdCode == EFFECT_CMD_GET_PARAM &&
- (*replySize < sizeof(effect_param_t) ||
- ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) {
+ (maxReplySize < sizeof(effect_param_t) ||
+ param->psize > maxReplySize - sizeof(effect_param_t))) {
android_errorWriteLog(0x534e4554, "29251553");
return -EINVAL;
}
if (cmdCode == EFFECT_CMD_GET_PARAM &&
- (sizeof(effect_param_t) > *replySize
- || ((effect_param_t *)pCmdData)->psize > *replySize
- - sizeof(effect_param_t)
- || ((effect_param_t *)pCmdData)->vsize > *replySize
- - sizeof(effect_param_t)
- - ((effect_param_t *)pCmdData)->psize
- || roundUpDelta(((effect_param_t *)pCmdData)->psize, (uint32_t)sizeof(int)) >
- *replySize
- - sizeof(effect_param_t)
- - ((effect_param_t *)pCmdData)->psize
- - ((effect_param_t *)pCmdData)->vsize)) {
+ (sizeof(effect_param_t) > maxReplySize
+ || param->psize > maxReplySize - sizeof(effect_param_t)
+ || param->vsize > maxReplySize - sizeof(effect_param_t)
+ - param->psize
+ || roundUpDelta(param->psize, (uint32_t) sizeof(int)) >
+ maxReplySize
+ - sizeof(effect_param_t)
+ - param->psize
+ - param->vsize)) {
ALOGV("\tLVM_ERROR : EFFECT_CMD_GET_PARAM: reply size inconsistent");
android_errorWriteLog(0x534e4554, "32705438");
return -EINVAL;
}
if ((cmdCode == EFFECT_CMD_SET_PARAM
- || cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED) && // DEFERRED not generally used
- (sizeof(effect_param_t) > cmdSize
- || ((effect_param_t *)pCmdData)->psize > cmdSize
- - sizeof(effect_param_t)
- || ((effect_param_t *)pCmdData)->vsize > cmdSize
- - sizeof(effect_param_t)
- - ((effect_param_t *)pCmdData)->psize
- || roundUpDelta(((effect_param_t *)pCmdData)->psize, (uint32_t)sizeof(int)) >
- cmdSize
- - sizeof(effect_param_t)
- - ((effect_param_t *)pCmdData)->psize
- - ((effect_param_t *)pCmdData)->vsize)) {
+ || cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED)
+ && // DEFERRED not generally used
+ (param == nullptr
+ || param->psize > cmdSize - sizeof(effect_param_t)
+ || param->vsize > cmdSize - sizeof(effect_param_t)
+ - param->psize
+ || roundUpDelta(param->psize,
+ (uint32_t) sizeof(int)) >
+ cmdSize
+ - sizeof(effect_param_t)
+ - param->psize
+ - param->vsize)) {
android_errorWriteLog(0x534e4554, "30204301");
return -EINVAL;
}
+ uint32_t replySize = maxReplySize;
+ reply->resize(replySize);
status_t status = mEffectInterface->command(cmdCode,
cmdSize,
- pCmdData,
- replySize,
- pReplyData);
+ const_cast<uint8_t*>(cmdData.data()),
+ &replySize,
+ reply->data());
+ reply->resize(status == NO_ERROR ? replySize : 0);
if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) {
- uint32_t size = (replySize == NULL) ? 0 : *replySize;
for (size_t i = 1; i < mHandles.size(); i++) {
EffectHandle *h = mHandles[i];
if (h != NULL && !h->disconnected()) {
- h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData);
+ h->commandExecuted(cmdCode, cmdData, *reply);
}
}
}
@@ -1511,6 +1543,41 @@
return mOffloaded;
}
+/*static*/
+bool AudioFlinger::EffectModule::isHapticGenerator(const effect_uuid_t *type) {
+ return memcmp(type, FX_IID_HAPTICGENERATOR, sizeof(effect_uuid_t)) == 0;
+}
+
+bool AudioFlinger::EffectModule::isHapticGenerator() const {
+ return isHapticGenerator(&mDescriptor.type);
+}
+
+status_t AudioFlinger::EffectModule::setHapticIntensity(int id, int intensity)
+{
+ if (mStatus != NO_ERROR) {
+ return mStatus;
+ }
+ if (!isHapticGenerator()) {
+ ALOGW("Should not set haptic intensity for effects that are not HapticGenerator");
+ return INVALID_OPERATION;
+ }
+
+ std::vector<uint8_t> request(sizeof(effect_param_t) + 3 * sizeof(uint32_t));
+ effect_param_t *param = (effect_param_t*) request.data();
+ param->psize = sizeof(int32_t);
+ param->vsize = sizeof(int32_t) * 2;
+ *(int32_t*)param->data = HG_PARAM_HAPTIC_INTENSITY;
+ *((int32_t*)param->data + 1) = id;
+ *((int32_t*)param->data + 2) = intensity;
+ std::vector<uint8_t> response;
+ status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
+ if (status == NO_ERROR) {
+ LOG_ALWAYS_FATAL_IF(response.size() != 4);
+ status = *reinterpret_cast<const status_t*>(response.data());
+ }
+ return status;
+}
+
static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
std::stringstream ss;
@@ -1589,9 +1656,9 @@
#define LOG_TAG "AudioFlinger::EffectHandle"
AudioFlinger::EffectHandle::EffectHandle(const sp<EffectBase>& effect,
- const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
- int32_t priority)
+ const sp<AudioFlinger::Client>& client,
+ const sp<media::IEffectClient>& effectClient,
+ int32_t priority)
: BnEffect(),
mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false)
@@ -1625,20 +1692,24 @@
return mClient == 0 || mCblkMemory != 0 ? OK : NO_MEMORY;
}
-status_t AudioFlinger::EffectHandle::enable()
+#define RETURN(code) \
+ *_aidl_return = (code); \
+ return Status::ok();
+
+Status AudioFlinger::EffectHandle::enable(int32_t* _aidl_return)
{
AutoMutex _l(mLock);
ALOGV("enable %p", this);
sp<EffectBase> effect = mEffect.promote();
if (effect == 0 || mDisconnected) {
- return DEAD_OBJECT;
+ RETURN(DEAD_OBJECT);
}
if (!mHasControl) {
- return INVALID_OPERATION;
+ RETURN(INVALID_OPERATION);
}
if (mEnabled) {
- return NO_ERROR;
+ RETURN(NO_ERROR);
}
mEnabled = true;
@@ -1646,54 +1717,55 @@
status_t status = effect->updatePolicyState();
if (status != NO_ERROR) {
mEnabled = false;
- return status;
+ RETURN(status);
}
effect->checkSuspendOnEffectEnabled(true, false /*threadLocked*/);
// checkSuspendOnEffectEnabled() can suspend this same effect when enabled
if (effect->suspended()) {
- return NO_ERROR;
+ RETURN(NO_ERROR);
}
status = effect->setEnabled(true, true /*fromHandle*/);
if (status != NO_ERROR) {
mEnabled = false;
}
- return status;
+ RETURN(status);
}
-status_t AudioFlinger::EffectHandle::disable()
+Status AudioFlinger::EffectHandle::disable(int32_t* _aidl_return)
{
ALOGV("disable %p", this);
AutoMutex _l(mLock);
sp<EffectBase> effect = mEffect.promote();
if (effect == 0 || mDisconnected) {
- return DEAD_OBJECT;
+ RETURN(DEAD_OBJECT);
}
if (!mHasControl) {
- return INVALID_OPERATION;
+ RETURN(INVALID_OPERATION);
}
if (!mEnabled) {
- return NO_ERROR;
+ RETURN(NO_ERROR);
}
mEnabled = false;
effect->updatePolicyState();
if (effect->suspended()) {
- return NO_ERROR;
+ RETURN(NO_ERROR);
}
status_t status = effect->setEnabled(false, true /*fromHandle*/);
- return status;
+ RETURN(status);
}
-void AudioFlinger::EffectHandle::disconnect()
+Status AudioFlinger::EffectHandle::disconnect()
{
ALOGV("%s %p", __FUNCTION__, this);
disconnect(true);
+ return Status::ok();
}
void AudioFlinger::EffectHandle::disconnect(bool unpinIfLast)
@@ -1730,11 +1802,16 @@
}
}
-status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData)
+Status AudioFlinger::EffectHandle::getCblk(media::SharedFileRegion* _aidl_return) {
+ LOG_ALWAYS_FATAL_IF(!convertIMemoryToSharedFileRegion(mCblkMemory, _aidl_return));
+ return Status::ok();
+}
+
+Status AudioFlinger::EffectHandle::command(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ int32_t maxResponseSize,
+ std::vector<uint8_t>* response,
+ int32_t* _aidl_return)
{
ALOGVV("command(), cmdCode: %d, mHasControl: %d, mEffect: %p",
cmdCode, mHasControl, mEffect.unsafe_get());
@@ -1754,49 +1831,46 @@
break;
}
android_errorWriteLog(0x534e4554, "62019992");
- return BAD_VALUE;
+ RETURN(BAD_VALUE);
}
if (cmdCode == EFFECT_CMD_ENABLE) {
- if (*replySize < sizeof(int)) {
+ if (maxResponseSize < sizeof(int)) {
android_errorWriteLog(0x534e4554, "32095713");
- return BAD_VALUE;
+ RETURN(BAD_VALUE);
}
- *(int *)pReplyData = NO_ERROR;
- *replySize = sizeof(int);
- return enable();
+ writeToBuffer(NO_ERROR, response);
+ return enable(_aidl_return);
} else if (cmdCode == EFFECT_CMD_DISABLE) {
- if (*replySize < sizeof(int)) {
+ if (maxResponseSize < sizeof(int)) {
android_errorWriteLog(0x534e4554, "32095713");
- return BAD_VALUE;
+ RETURN(BAD_VALUE);
}
- *(int *)pReplyData = NO_ERROR;
- *replySize = sizeof(int);
- return disable();
+ writeToBuffer(NO_ERROR, response);
+ return disable(_aidl_return);
}
AutoMutex _l(mLock);
sp<EffectBase> effect = mEffect.promote();
if (effect == 0 || mDisconnected) {
- return DEAD_OBJECT;
+ RETURN(DEAD_OBJECT);
}
// only get parameter command is permitted for applications not controlling the effect
if (!mHasControl && cmdCode != EFFECT_CMD_GET_PARAM) {
- return INVALID_OPERATION;
+ RETURN(INVALID_OPERATION);
}
// handle commands that are not forwarded transparently to effect engine
if (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) {
if (mClient == 0) {
- return INVALID_OPERATION;
+ RETURN(INVALID_OPERATION);
}
- if (*replySize < sizeof(int)) {
+ if (maxResponseSize < sizeof(int)) {
android_errorWriteLog(0x534e4554, "32095713");
- return BAD_VALUE;
+ RETURN(BAD_VALUE);
}
- *(int *)pReplyData = NO_ERROR;
- *replySize = sizeof(int);
+ writeToBuffer(NO_ERROR, response);
// No need to trylock() here as this function is executed in the binder thread serving a
// particular client process: no risk to block the whole media server process or mixer
@@ -1809,10 +1883,10 @@
serverIndex > EFFECT_PARAM_BUFFER_SIZE) {
mCblk->serverIndex = 0;
mCblk->clientIndex = 0;
- return BAD_VALUE;
+ RETURN(BAD_VALUE);
}
status_t status = NO_ERROR;
- effect_param_t *param = NULL;
+ std::vector<uint8_t> param;
for (uint32_t index = serverIndex; index < clientIndex;) {
int *p = (int *)(mBuffer + index);
const int size = *p++;
@@ -1824,23 +1898,16 @@
break;
}
- // copy to local memory in case of client corruption b/32220769
- auto *newParam = (effect_param_t *)realloc(param, size);
- if (newParam == NULL) {
- ALOGW("command(): out of memory");
- status = NO_MEMORY;
- break;
- }
- param = newParam;
- memcpy(param, p, size);
+ std::copy(reinterpret_cast<const uint8_t*>(p),
+ reinterpret_cast<const uint8_t*>(p) + size,
+ std::back_inserter(param));
- int reply = 0;
- uint32_t rsize = sizeof(reply);
+ std::vector<uint8_t> replyBuffer;
status_t ret = effect->command(EFFECT_CMD_SET_PARAM,
- size,
param,
- &rsize,
- &reply);
+ sizeof(int),
+ &replyBuffer);
+ int reply = *reinterpret_cast<const int*>(replyBuffer.data());
// verify shared memory: server index shouldn't change; client index can't go back.
if (serverIndex != mCblk->serverIndex
@@ -1853,21 +1920,24 @@
// stop at first error encountered
if (ret != NO_ERROR) {
status = ret;
- *(int *)pReplyData = reply;
+ writeToBuffer(reply, response);
break;
} else if (reply != NO_ERROR) {
- *(int *)pReplyData = reply;
+ writeToBuffer(reply, response);
break;
}
index += size;
}
- free(param);
mCblk->serverIndex = 0;
mCblk->clientIndex = 0;
- return status;
+ RETURN(status);
}
- return effect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ status_t status = effect->command(cmdCode,
+ cmdData,
+ maxResponseSize,
+ response);
+ RETURN(status);
}
void AudioFlinger::EffectHandle::setControl(bool hasControl, bool signal, bool enabled)
@@ -1883,13 +1953,11 @@
}
void AudioFlinger::EffectHandle::commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData)
+ const std::vector<uint8_t>& cmdData,
+ const std::vector<uint8_t>& replyData)
{
if (mEffectClient != 0) {
- mEffectClient->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ mEffectClient->commandExecuted(cmdCode, cmdData, replyData);
}
}
@@ -1902,13 +1970,6 @@
}
}
-status_t AudioFlinger::EffectHandle::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- return BnEffect::onTransact(code, data, reply, flags);
-}
-
-
void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size)
{
bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
@@ -2374,6 +2435,25 @@
}
}
+// containsHapticGeneratingEffect_l must be called with ThreadBase::mLock or EffectChain::mLock held
+bool AudioFlinger::EffectChain::containsHapticGeneratingEffect_l()
+{
+ for (size_t i = 0; i < mEffects.size(); ++i) {
+ if (mEffects[i]->isHapticGenerator()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void AudioFlinger::EffectChain::setHapticIntensity_l(int id, int intensity)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mEffects.size(); ++i) {
+ mEffects[i]->setHapticIntensity(id, intensity);
+ }
+}
+
void AudioFlinger::EffectChain::syncHalEffectsState()
{
Mutex::Autolock _l(mLock);
@@ -2828,6 +2908,14 @@
return t->channelCount();
}
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::hapticChannelMask() const {
+ sp<ThreadBase> t = mThread.promote();
+ if (t == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+ return t->hapticChannelMask();
+}
+
size_t AudioFlinger::EffectChain::EffectCallback::frameCount() const {
sp<ThreadBase> t = mThread.promote();
if (t == nullptr) {
@@ -2932,10 +3020,14 @@
Mutex::Autolock _l(mProxyLock);
if (status == NO_ERROR) {
for (auto& handle : mEffectHandles) {
+ Status bs;
if (enabled) {
- status = handle.second->enable();
+ bs = handle.second->enable(&status);
} else {
- status = handle.second->disable();
+ bs = handle.second->disable(&status);
+ }
+ if (!bs.isOk()) {
+ status = bs.transactionError();
}
}
}
@@ -2994,7 +3086,7 @@
__func__, port->type, port->ext.device.type,
port->ext.device.address, port->id, patch.isSoftware());
if (port->type != AUDIO_PORT_TYPE_DEVICE || port->ext.device.type != mDevice.mType
- || port->ext.device.address != mDevice.mAddress) {
+ || port->ext.device.address != mDevice.address()) {
return NAME_NOT_FOUND;
}
status_t status = NAME_NOT_FOUND;
@@ -3043,10 +3135,14 @@
status = BAD_VALUE;
}
if (status == NO_ERROR || status == ALREADY_EXISTS) {
+ Status bs;
if (isEnabled()) {
- (*handle)->enable();
+ bs = (*handle)->enable(&status);
} else {
- (*handle)->disable();
+ bs = (*handle)->disable(&status);
+ }
+ if (!bs.isOk()) {
+ status = bs.transactionError();
}
}
return status;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 2c79ac5..03bdc60 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -36,6 +36,7 @@
virtual uint32_t sampleRate() const = 0;
virtual audio_channel_mask_t channelMask() const = 0;
virtual uint32_t channelCount() const = 0;
+ virtual audio_channel_mask_t hapticChannelMask() const = 0;
virtual size_t frameCount() const = 0;
// Non trivial methods usually implemented with help from ThreadBase:
@@ -132,11 +133,10 @@
void setSuspended(bool suspended);
bool suspended() const;
- virtual status_t command(uint32_t cmdCode __unused,
- uint32_t cmdSize __unused,
- void *pCmdData __unused,
- uint32_t *replySize __unused,
- void *pReplyData __unused) { return NO_ERROR; };
+ virtual status_t command(int32_t __unused,
+ const std::vector<uint8_t>& __unused,
+ int32_t __unused,
+ std::vector<uint8_t>* __unused) { return NO_ERROR; };
void setCallback(const sp<EffectCallbackInterface>& callback) { mCallback = callback; }
sp<EffectCallbackInterface>& callback() { return mCallback; }
@@ -213,11 +213,10 @@
void process();
bool updateState();
- status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData) override;
+ status_t command(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ int32_t maxReplySize,
+ std::vector<uint8_t>* reply) override;
void reset_l();
status_t configure();
@@ -255,6 +254,11 @@
sp<EffectModule> asEffectModule() override { return this; }
+ static bool isHapticGenerator(const effect_uuid_t* type);
+ bool isHapticGenerator() const;
+
+ status_t setHapticIntensity(int id, int intensity);
+
void dump(int fd, const Vector<String16>& args);
private:
@@ -314,32 +318,29 @@
// There is one EffectHandle object for each application controlling (or using)
// an effect module.
// The EffectHandle is obtained by calling AudioFlinger::createEffect().
-class EffectHandle: public android::BnEffect {
+class EffectHandle: public android::media::BnEffect {
public:
EffectHandle(const sp<EffectBase>& effect,
const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
+ const sp<media::IEffectClient>& effectClient,
int32_t priority);
virtual ~EffectHandle();
virtual status_t initCheck();
// IEffect
- virtual status_t enable();
- virtual status_t disable();
- virtual status_t command(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t *replySize,
- void *pReplyData);
- virtual void disconnect();
-private:
- void disconnect(bool unpinIfLast);
-public:
- virtual sp<IMemory> getCblk() const { return mCblkMemory; }
- virtual status_t onTransact(uint32_t code, const Parcel& data,
- Parcel* reply, uint32_t flags);
+ android::binder::Status enable(int32_t* _aidl_return) override;
+ android::binder::Status disable(int32_t* _aidl_return) override;
+ android::binder::Status command(int32_t cmdCode,
+ const std::vector<uint8_t>& cmdData,
+ int32_t maxResponseSize,
+ std::vector<uint8_t>* response,
+ int32_t* _aidl_return) override;
+ android::binder::Status disconnect() override;
+ android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
+private:
+ void disconnect(bool unpinIfLast);
// Give or take control of effect module
// - hasControl: true if control is given, false if removed
@@ -347,10 +348,8 @@
// - enabled: state of the effect when control is passed
void setControl(bool hasControl, bool signal, bool enabled);
void commandExecuted(uint32_t cmdCode,
- uint32_t cmdSize,
- void *pCmdData,
- uint32_t replySize,
- void *pReplyData);
+ const std::vector<uint8_t>& cmdData,
+ const std::vector<uint8_t>& replyData);
void setEnabled(bool enabled);
bool enabled() const { return mEnabled; }
@@ -373,19 +372,20 @@
friend class AudioFlinger; // for mEffect, mHasControl, mEnabled
DISALLOW_COPY_AND_ASSIGN(EffectHandle);
- Mutex mLock; // protects IEffect method calls
- wp<EffectBase> mEffect; // pointer to controlled EffectModule
- sp<IEffectClient> mEffectClient; // callback interface for client notifications
- /*const*/ sp<Client> mClient; // client for shared memory allocation, see disconnect()
- sp<IMemory> mCblkMemory; // shared memory for control block
- effect_param_cblk_t* mCblk; // control block for deferred parameter setting via
- // shared memory
- uint8_t* mBuffer; // pointer to parameter area in shared memory
- int mPriority; // client application priority to control the effect
- bool mHasControl; // true if this handle is controlling the effect
- bool mEnabled; // cached enable state: needed when the effect is
- // restored after being suspended
- bool mDisconnected; // Set to true by disconnect()
+ Mutex mLock; // protects IEffect method calls
+ wp<EffectBase> mEffect; // pointer to controlled EffectModule
+ sp<media::IEffectClient> mEffectClient; // callback interface for client notifications
+ /*const*/ sp<Client> mClient; // client for shared memory allocation, see
+ // disconnect()
+ sp<IMemory> mCblkMemory; // shared memory for control block
+ effect_param_cblk_t* mCblk; // control block for deferred parameter setting via
+ // shared memory
+ uint8_t* mBuffer; // pointer to parameter area in shared memory
+ int mPriority; // client application priority to control the effect
+ bool mHasControl; // true if this handle is controlling the effect
+ bool mEnabled; // cached enable state: needed when the effect is
+ // restored after being suspended
+ bool mDisconnected; // Set to true by disconnect()
};
// the EffectChain class represents a group of effects associated to one audio session.
@@ -501,6 +501,10 @@
// isCompatibleWithThread_l() must be called with thread->mLock held
bool isCompatibleWithThread_l(const sp<ThreadBase>& thread) const;
+ bool containsHapticGeneratingEffect_l();
+
+ void setHapticIntensity_l(int id, int intensity);
+
sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
@@ -532,6 +536,7 @@
uint32_t sampleRate() const override;
audio_channel_mask_t channelMask() const override;
uint32_t channelCount() const override;
+ audio_channel_mask_t hapticChannelMask() const override;
size_t frameCount() const override;
uint32_t latency() const override;
@@ -683,6 +688,7 @@
uint32_t sampleRate() const override;
audio_channel_mask_t channelMask() const override;
uint32_t channelCount() const override;
+ audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 396c797..857d3de 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -23,6 +23,7 @@
#include <media/ExtendedAudioBufferProvider.h>
#include <media/nbaio/NBAIO.h>
#include <media/nblog/NBLog.h>
+#include <vibrator/ExternalVibrationUtils.h>
#include "FastThreadState.h"
namespace android {
@@ -49,8 +50,7 @@
audio_format_t mFormat; // track format
int mGeneration; // increment when any field is assigned
bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
- AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE; // intensity of
- // haptic data
+ os::HapticScale mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
};
// Represents a single state of the fast mixer
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a2df29b..a4b8650 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -161,12 +161,12 @@
mHapticPlaybackEnabled = hapticPlaybackEnabled;
}
/** Return at what intensity to play haptics, used in mixer. */
- AudioMixer::haptic_intensity_t getHapticIntensity() const { return mHapticIntensity; }
+ os::HapticScale getHapticIntensity() const { return mHapticIntensity; }
/** Set intensity of haptic playback, should be set after querying vibrator service. */
- void setHapticIntensity(AudioMixer::haptic_intensity_t hapticIntensity) {
- if (AudioMixer::isValidHapticIntensity(hapticIntensity)) {
+ void setHapticIntensity(os::HapticScale hapticIntensity) {
+ if (os::isValidHapticScale(hapticIntensity)) {
mHapticIntensity = hapticIntensity;
- setHapticPlaybackEnabled(mHapticIntensity != AudioMixer::HAPTIC_SCALE_MUTE);
+ setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
}
}
sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
@@ -267,7 +267,7 @@
bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
// intensity to play haptic data
- AudioMixer::haptic_intensity_t mHapticIntensity = AudioMixer::HAPTIC_SCALE_MUTE;
+ os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
class AudioVibrationController : public os::BnExternalVibrationController {
public:
explicit AudioVibrationController(Track* track) : mTrack(track) {}
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index c7aba79..0ce5681 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -39,7 +39,7 @@
, mSpdifEncoder(this, format)
, mApplicationFormat(AUDIO_FORMAT_DEFAULT)
, mApplicationSampleRate(0)
- , mApplicationChannelMask(0)
+ , mApplicationChannelMask(AUDIO_CHANNEL_NONE)
{
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index c5d1cc7..46969ef 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -116,6 +116,8 @@
namespace android {
+using media::IEffectClient;
+
// retry counts for buffer fill timeout
// 50 * ~20msecs = 1 second
static const int8_t kMaxTrackRetries = 50;
@@ -986,15 +988,16 @@
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
// Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids.
- status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
- binder,
+ binder::Status status = mPowerManager->acquireWakeLockAsync(binder,
+ POWERMANAGER_PARTIAL_WAKE_LOCK,
getWakeLockTag(),
String16("audioserver"),
- true /* FIXME force oneway contrary to .aidl */);
- if (status == NO_ERROR) {
+ {} /* workSource */,
+ {} /* historyTag */);
+ if (status.isOk()) {
mWakeLockToken = binder;
}
- ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
+ ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status.exceptionCode());
}
gBoottime.acquire(mWakeLockToken);
@@ -1014,8 +1017,7 @@
if (mWakeLockToken != 0) {
ALOGV("releaseWakeLock_l() %s", mThreadName);
if (mPowerManager != 0) {
- mPowerManager->releaseWakeLock(mWakeLockToken, 0,
- true /* FIXME force oneway contrary to .aidl */);
+ mPowerManager->releaseWakeLockAsync(mWakeLockToken, 0);
}
mWakeLockToken.clear();
}
@@ -1029,7 +1031,7 @@
if (binder == 0) {
ALOGW("Thread %s cannot connect to the power manager service", mThreadName);
} else {
- mPowerManager = interface_cast<IPowerManager>(binder);
+ mPowerManager = interface_cast<os::IPowerManager>(binder);
binder->linkToDeath(mDeathRecipient);
}
}
@@ -1056,10 +1058,9 @@
}
if (mPowerManager != 0) {
std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
- status_t status = mPowerManager->updateWakeLockUids(
- mWakeLockToken, uidsAsInt.size(), uidsAsInt.data(),
- true /* FIXME force oneway contrary to .aidl */);
- ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
+ binder::Status status = mPowerManager->updateWakeLockUidsAsync(
+ mWakeLockToken, uidsAsInt);
+ ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status.exceptionCode());
}
}
@@ -1244,6 +1245,11 @@
return BAD_VALUE;
}
}
+
+ if (EffectModule::isHapticGenerator(&desc->type)) {
+ ALOGE("%s(): HapticGenerator is not supported in RecordThread", __func__);
+ return BAD_VALUE;
+ }
return NO_ERROR;
}
@@ -1263,6 +1269,12 @@
return NO_ERROR;
}
+ if (EffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) {
+ ALOGW("%s: thread doesn't support haptic playback while the effect is HapticGenerator",
+ __func__);
+ return BAD_VALUE;
+ }
+
switch (mType) {
case MIXER: {
#ifndef MULTICHANNEL_EFFECT_CHAIN
@@ -1934,7 +1946,7 @@
// here instead of constructor of PlaybackThread so that the onFirstRef
// callback would not be made on an incompletely constructed object.
if (mOutput->stream->setEventCallback(this) != OK) {
- ALOGE("Failed to add event callback");
+ ALOGD("Failed to add event callback");
}
}
run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
@@ -2347,10 +2359,20 @@
}
}
+ // Set DIRECT flag if current thread is DirectOutputThread. This can
+ // happen when the playback is rerouted to direct output thread by
+ // dynamic audio policy.
+ // Do NOT report the flag changes back to client, since the client
+ // doesn't explicitly request a direct flag.
+ audio_output_flags_t trackFlags = *flags;
+ if (mType == DIRECT) {
+ trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT);
+ }
+
track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
- sessionId, creatorPid, uid, *flags, TrackBase::TYPE_DEFAULT, portId,
+ sessionId, creatorPid, uid, trackFlags, TrackBase::TYPE_DEFAULT, portId,
SIZE_MAX /*frameCountToBeReady*/, opPackageName);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
@@ -2530,15 +2552,17 @@
track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
}
- if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
- && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+ sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+ if (mHapticChannelMask != AUDIO_CHANNEL_NONE
+ && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
// Unlock due to VibratorService will lock for this call and will
// call Tracks.mute/unmute which also require thread's lock.
mLock.unlock();
const int intensity = AudioFlinger::onExternalVibrationStart(
track->getExternalVibration());
mLock.lock();
- track->setHapticIntensity(static_cast<AudioMixer::haptic_intensity_t>(intensity));
+ track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
// Haptic playback should be enabled by vibrator service.
if (track->getHapticPlaybackEnabled()) {
// Disable haptic playback of all active track to ensure only
@@ -2547,12 +2571,16 @@
t->setHapticPlaybackEnabled(false);
}
}
+
+ // Set haptic intensity for effect
+ if (chain != nullptr) {
+ chain->setHapticIntensity_l(track->id(), intensity);
+ }
}
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
- sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
track->sessionId());
@@ -2869,8 +2897,8 @@
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
}
- mHapticChannelMask = mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
- mChannelMask &= ~mHapticChannelMask;
+ mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+ mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
mChannelCount -= mHapticChannelCount;
@@ -3739,9 +3767,15 @@
// Determine which session to pick up haptic data.
// This must be done under the same lock as prepareTracks_l().
+ // The haptic data from the effect is at a higher priority than the one from track.
// TODO: Write haptic data directly to sink buffer when mixing.
if (mHapticChannelCount > 0 && effectChains.size() > 0) {
for (const auto& track : mActiveTracks) {
+ sp<EffectChain> effectChain = getEffectChain_l(track->sessionId());
+ if (effectChain != nullptr && effectChain->containsHapticGeneratingEffect_l()) {
+ activeHapticSessionId = track->sessionId();
+ break;
+ }
if (track->getHapticPlaybackEnabled()) {
activeHapticSessionId = track->sessionId();
break;
@@ -4111,13 +4145,20 @@
// remove from our tracks vector
removeTrack_l(track);
}
- if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
- && mHapticChannelCount > 0) {
+ if (mHapticChannelCount > 0 &&
+ ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) {
mLock.unlock();
// Unlock due to VibratorService will lock for this call and will
// call Tracks.mute/unmute which also require thread's lock.
AudioFlinger::onExternalVibrationStop(track->getExternalVibration());
mLock.lock();
+
+ // When the track is stop, set the haptic intensity as MUTE
+ // for the HapticGenerator effect.
+ if (chain != nullptr) {
+ chain->setHapticIntensity_l(track->id(), static_cast<int>(os::HapticScale::MUTE));
+ }
}
}
}
@@ -4206,7 +4247,7 @@
"Enumerated device type(%#x) must not be used "
"as it does not support audio patches",
patch->sinks[i].ext.device.type);
- type |= patch->sinks[i].ext.device.type;
+ type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
deviceTypeAddrs.push_back(AudioDeviceTypeAddr(patch->sinks[i].ext.device.type,
patch->sinks[i].ext.device.address));
}
@@ -4456,11 +4497,12 @@
// wrap the source side of the MonoPipe to make it an AudioBufferProvider
fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
fastTrack->mVolumeProvider = NULL;
- fastTrack->mChannelMask = mChannelMask | mHapticChannelMask; // mPipeSink channel mask for
- // audio to FastMixer
+ fastTrack->mChannelMask = static_cast<audio_channel_mask_t>(
+ mChannelMask | mHapticChannelMask); // mPipeSink channel mask for
+ // audio to FastMixer
fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
- fastTrack->mHapticIntensity = AudioMixer::HAPTIC_SCALE_NONE;
+ fastTrack->mHapticIntensity = os::HapticScale::NONE;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
@@ -4471,7 +4513,8 @@
// specify sink channel mask when haptic channel mask present as it can not
// be calculated directly from channel count
state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
- ? AUDIO_CHANNEL_NONE : mChannelMask | mHapticChannelMask;
+ ? AUDIO_CHANNEL_NONE
+ : static_cast<audio_channel_mask_t>(mChannelMask | mHapticChannelMask);
state->mCommand = FastMixerState::COLD_IDLE;
// already done in constructor initialization list
//mFastMixerFutex = 0;
@@ -8563,7 +8606,7 @@
// store new device and send to effects
mInDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
- mInDeviceTypeAddr.mAddress = patch->sources[0].ext.device.address;
+ mInDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
audio_port_handle_t deviceId = patch->sources[0].id;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->setInputDevice_l(inDeviceTypeAddr());
@@ -8705,6 +8748,11 @@
return mThread->getMmapPosition(position);
}
+status_t AudioFlinger::MmapThreadHandle::getExternalPosition(uint64_t *position,
+ int64_t *timeNanos) {
+ return mThread->getExternalPosition(position, timeNanos);
+}
+
status_t AudioFlinger::MmapThreadHandle::start(const AudioClient& client,
const audio_attributes_t *attr, audio_port_handle_t *handle)
@@ -8740,7 +8788,6 @@
AudioFlinger::MmapThread::~MmapThread()
{
- releaseWakeLock_l();
}
void AudioFlinger::MmapThread::onFirstRef()
@@ -8790,7 +8837,6 @@
return NO_INIT;
}
mStandby = true;
- acquireWakeLock();
return mHalStream->createMmapBuffer(minSizeFrames, info);
}
@@ -8829,8 +8875,12 @@
status_t ret;
if (*handle == mPortId) {
- // for the first track, reuse portId and session allocated when the stream was opened
- return exitStandby();
+ // For the first track, reuse portId and session allocated when the stream was opened.
+ ret = exitStandby();
+ if (ret == NO_ERROR) {
+ acquireWakeLock();
+ }
+ return ret;
}
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
@@ -8951,6 +9001,7 @@
if (handle == mPortId) {
mHalStream->stop();
+ releaseWakeLock();
return NO_ERROR;
}
@@ -9193,7 +9244,7 @@
"Enumerated device type(%#x) must not be used "
"as it does not support audio patches",
patch->sinks[i].ext.device.type);
- type |= patch->sinks[i].ext.device.type;
+ type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
sinkDeviceTypeAddrs.push_back(AudioDeviceTypeAddr(patch->sinks[i].ext.device.type,
patch->sinks[i].ext.device.address));
}
@@ -9204,7 +9255,7 @@
deviceId = patch->sources[0].id;
numDevices = mPatch.num_sources;
sourceDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
- sourceDeviceTypeAddr.mAddress = patch->sources[0].ext.device.address;
+ sourceDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
}
for (size_t i = 0; i < mEffectChains.size(); i++) {
@@ -9402,6 +9453,11 @@
return BAD_VALUE;
}
+ if (EffectModule::isHapticGenerator(&desc->type)) {
+ ALOGE("%s(): HapticGenerator is not supported for MmapThread", __func__);
+ return BAD_VALUE;
+ }
+
return NO_ERROR;
}
@@ -9654,6 +9710,20 @@
}
}
+status_t AudioFlinger::MmapPlaybackThread::getExternalPosition(uint64_t *position,
+ int64_t *timeNanos)
+{
+ if (mOutput == nullptr) {
+ return NO_INIT;
+ }
+ struct timespec timestamp;
+ status_t status = mOutput->getPresentationPosition(position, ×tamp);
+ if (status == NO_ERROR) {
+ *timeNanos = timestamp.tv_sec * NANOS_PER_SECOND + timestamp.tv_nsec;
+ }
+ return status;
+}
+
void AudioFlinger::MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
{
MmapThread::dumpInternals_l(fd, args);
@@ -9758,4 +9828,13 @@
}
}
+status_t AudioFlinger::MmapCaptureThread::getExternalPosition(
+ uint64_t *position, int64_t *timeNanos)
+{
+ if (mInput == nullptr) {
+ return NO_INIT;
+ }
+ return mInput->getCapturePosition((int64_t*)position, timeNanos);
+}
+
} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 6b33ad5..014f2d7 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -272,6 +272,7 @@
// Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
// and returns the [normal mix] buffer's frame count.
virtual size_t frameCount() const = 0;
+ virtual audio_channel_mask_t hapticChannelMask() const { return AUDIO_CHANNEL_NONE; }
virtual uint32_t latency_l() const { return 0; }
virtual void setVolumeForOutput_l(float left __unused, float right __unused) const {}
@@ -348,7 +349,7 @@
sp<EffectHandle> createEffect_l(
const sp<AudioFlinger::Client>& client,
- const sp<IEffectClient>& effectClient,
+ const sp<media::IEffectClient>& effectClient,
int32_t priority,
audio_session_t sessionId,
effect_descriptor_t *desc,
@@ -478,6 +479,25 @@
void onEffectEnable(const sp<EffectModule>& effect);
void onEffectDisable();
+ // invalidateTracksForAudioSession_l must be called with holding mLock.
+ virtual void invalidateTracksForAudioSession_l(audio_session_t sessionId __unused) const { }
+ // Invalidate all the tracks with the given audio session.
+ void invalidateTracksForAudioSession(audio_session_t sessionId) const {
+ Mutex::Autolock _l(mLock);
+ invalidateTracksForAudioSession_l(sessionId);
+ }
+
+ template <typename T>
+ void invalidateTracksForAudioSession_l(audio_session_t sessionId,
+ const T& tracks) const {
+ for (size_t i = 0; i < tracks.size(); ++i) {
+ const sp<TrackBase>& track = tracks[i];
+ if (sessionId == track->sessionId()) {
+ track->invalidate();
+ }
+ }
+ }
+
protected:
// entry describing an effect being suspended in mSuspendedSessions keyed vector
@@ -575,7 +595,7 @@
static const int kThreadNameLength = 16; // prctl(PR_SET_NAME) limit
char mThreadName[kThreadNameLength]; // guaranteed NUL-terminated
- sp<IPowerManager> mPowerManager;
+ sp<os::IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
const sp<PMDeathRecipient> mDeathRecipient;
// list of suspended effects per session and per type. The first (outer) vector is
@@ -940,6 +960,13 @@
&& outDeviceTypes().count(mTimestampCorrectedDevice) != 0;
}
+ audio_channel_mask_t hapticChannelMask() const override {
+ return mHapticChannelMask;
+ }
+ bool supportsHapticPlayback() const {
+ return (mHapticChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE;
+ }
+
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1062,6 +1089,11 @@
uint32_t trackCountForUid_l(uid_t uid) const;
+ void invalidateTracksForAudioSession_l(
+ audio_session_t sessionId) const override {
+ ThreadBase::invalidateTracksForAudioSession_l(sessionId, mTracks);
+ }
+
private:
friend class AudioFlinger; // for numerous
@@ -1792,6 +1824,7 @@
audio_port_handle_t *handle);
status_t stop(audio_port_handle_t handle);
status_t standby();
+ virtual status_t getExternalPosition(uint64_t *position, int64_t *timeNaos) = 0;
// RefBase
virtual void onFirstRef();
@@ -1903,6 +1936,8 @@
virtual void toAudioPortConfig(struct audio_port_config *config);
+ status_t getExternalPosition(uint64_t *position, int64_t *timeNanos) override;
+
protected:
void dumpInternals_l(int fd, const Vector<String16>& args) override;
@@ -1933,6 +1968,8 @@
virtual void toAudioPortConfig(struct audio_port_config *config);
+ status_t getExternalPosition(uint64_t *position, int64_t *timeNanos) override;
+
protected:
AudioStreamIn* mInput;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 178809c..1a12a5f 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -610,7 +610,10 @@
+ "_" + std::to_string(mId) + "_T");
#endif
- if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+ if (thread->supportsHapticPlayback()) {
+ // If the track is attached to haptic playback thread, it is potentially to have
+ // HapticGenerator effect, which will generate haptic data, on the track. In that case,
+ // external vibration is always created for all tracks attached to haptic playback thread.
mAudioVibrationController = new AudioVibrationController(this);
mExternalVibration = new os::ExternalVibration(
mUid, opPackageName, mAttr, mAudioVibrationController);
@@ -951,6 +954,11 @@
// initial state-stopping. next state-pausing.
// What if resume is called ?
+ if (state == FLUSHED) {
+ // avoid underrun glitches when starting after flush
+ reset();
+ }
+
if (state == PAUSED || state == PAUSING) {
if (mResumeToStopping) {
// happened we need to resume to STOPPING_1
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 8d0e5db..93819f5 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -250,12 +250,12 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes) = 0;
virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes) = 0;
- virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices)
+ virtual status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices)
= 0;
virtual status_t removeUidDeviceAffinities(uid_t uid) = 0;
virtual status_t setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices) = 0;
+ const AudioDeviceTypeAddrVector& devices) = 0;
virtual status_t removeUserIdDeviceAffinities(int userId) = 0;
virtual status_t startAudioSource(const struct audio_port_config *source,
@@ -295,13 +295,36 @@
virtual bool isCallScreenModeSupported() = 0;
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device) = 0;
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy) = 0;
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role) = 0;
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) = 0;
+
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices) = 0;
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) = 0;
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) = 0;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 39d1140..1d9223e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -498,11 +498,6 @@
*/
bool isA2dpOffloadedOnPrimary() const;
- /**
- * returns true if A2DP is supported (either via hardware offload or software encoding)
- */
- bool isA2dpSupported() const;
-
sp<SwAudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const;
sp<SwAudioOutputDescriptor> getPrimaryOutput() const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index b82305d..c6bdb04 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -101,7 +101,7 @@
* An example of failure is when there are already rules in place to restrict
* a mix to the given uid (i.e. when a MATCH_UID rule was set for it).
*/
- status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+ status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices);
status_t removeUidDeviceAffinities(uid_t uid);
status_t getDevicesForUid(uid_t uid, Vector<AudioDeviceTypeAddr>& devices) const;
@@ -115,7 +115,7 @@
* An example of failure is when there are already rules in place to restrict
* a mix to the given userId (i.e. when a MATCH_USERID rule was set for it).
*/
- status_t setUserIdDeviceAffinities(int userId, const Vector<AudioDeviceTypeAddr>& devices);
+ status_t setUserIdDeviceAffinities(int userId, const AudioDeviceTypeAddrVector& devices);
status_t removeUserIdDeviceAffinities(int userId);
status_t getDevicesForUserId(int userId, Vector<AudioDeviceTypeAddr>& devices) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
index 923310c..80afe9d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/ClientDescriptor.h
@@ -113,6 +113,9 @@
const sp<AudioPolicyMix> getPrimaryMix() const {
return mPrimaryMix.promote();
};
+ bool hasLostPrimaryMix() const {
+ return mPrimaryMix.unsafe_get() && !mPrimaryMix.promote();
+ }
void setActive(bool active) override
{
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 0f9bcc1..c51d6a9 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -146,6 +146,15 @@
// 4) the combination of all devices is invalid for selection
sp<DeviceDescriptor> getDeviceForOpening() const;
+ // Return the device descriptor that matches the given AudioDeviceTypeAddr
+ sp<DeviceDescriptor> getDeviceFromDeviceTypeAddr(
+ const AudioDeviceTypeAddr& deviceTypeAddr) const;
+
+ // Return the device vector that contains device descriptor whose AudioDeviceTypeAddr appears
+ // in the given AudioDeviceTypeAddrVector
+ DeviceVector getDevicesFromDeviceTypeAddrVec(
+ const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const;
+
// If there are devices with the given type and the devices to add is not empty,
// remove all the devices with the given type and add all the devices to add.
void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
index c4eab30..59eee52 100644
--- a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -72,6 +72,9 @@
audio_io_handle_t dstOutput);
void moveEffects(const std::vector<int>& ids, audio_io_handle_t dstOutput);
+ audio_io_handle_t getIoForSession(audio_session_t sessionId,
+ const effect_uuid_t *effectType = nullptr);
+
void dump(String8 *dst, int spaces = 0, bool verbose = true) const;
private:
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index a2e2eec..25f7c27 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -764,11 +764,6 @@
return false;
}
-bool SwAudioOutputCollection::isA2dpSupported() const
-{
- return (isA2dpOffloadedOnPrimary() || (getA2dpOutput() != 0));
-}
-
sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getPrimaryOutput() const
{
for (size_t i = 0; i < size(); i++) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index b6de4be..fc1d0e2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -463,7 +463,7 @@
}
status_t AudioPolicyMixCollection::setUidDeviceAffinities(uid_t uid,
- const Vector<AudioDeviceTypeAddr>& devices) {
+ const AudioDeviceTypeAddrVector& devices) {
// verify feasibility: for each player mix: if it already contains a
// "match uid" rule for this uid, return an error
// (adding a uid-device affinity would result in contradictory rules)
@@ -565,7 +565,7 @@
}
status_t AudioPolicyMixCollection::setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices) {
+ const AudioDeviceTypeAddrVector& devices) {
// verify feasibility: for each player mix: if it already contains a
// "match userId" rule for this userId, return an error
// (adding a userId-device affinity would result in contradictory rules)
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 68a32a2..b450e82 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -227,6 +227,7 @@
{
bool added = false;
for (const auto& device : devices) {
+ ALOG_ASSERT(device != nullptr, "Null pointer found when adding DeviceVector");
if (indexOf(device) < 0 && SortedVector::add(device) >= 0) {
added = true;
}
@@ -238,6 +239,7 @@
ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
{
+ ALOG_ASSERT(item != nullptr, "Adding null pointer to DeviceVector");
ssize_t ret = indexOf(item);
if (ret < 0) {
@@ -375,7 +377,7 @@
if (isEmpty()) {
// Return nullptr if this collection is empty.
return nullptr;
- } else if (areAllOfSameDeviceType(types(), audio_is_input_device)) {
+ } else if (areAllOfSameDeviceType(types(), audio_call_is_input_device)) {
// For input case, return the first one when there is only one device.
return size() > 1 ? nullptr : *begin();
} else if (areAllOfSameDeviceType(types(), audio_is_output_device)) {
@@ -388,6 +390,24 @@
return nullptr;
}
+sp<DeviceDescriptor> DeviceVector::getDeviceFromDeviceTypeAddr(
+ const AudioDeviceTypeAddr& deviceTypeAddr) const {
+ return getDevice(deviceTypeAddr.mType, String8(deviceTypeAddr.getAddress()),
+ AUDIO_FORMAT_DEFAULT);
+}
+
+DeviceVector DeviceVector::getDevicesFromDeviceTypeAddrVec(
+ const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const {
+ DeviceVector devices;
+ for (const auto& deviceTypeAddr : deviceTypeAddrVector) {
+ sp<DeviceDescriptor> device = getDeviceFromDeviceTypeAddr(deviceTypeAddr);
+ if (device != nullptr) {
+ devices.add(device);
+ }
+ }
+ return devices;
+}
+
void DeviceVector::replaceDevicesByType(
audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
index 415962a..843f5da 100644
--- a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -202,6 +202,19 @@
}
}
+audio_io_handle_t EffectDescriptorCollection::getIoForSession(audio_session_t sessionId,
+ const effect_uuid_t *effectType)
+{
+ for (size_t i = 0; i < size(); ++i) {
+ sp<EffectDescriptor> effect = valueAt(i);
+ if (effect->mSession == sessionId && (effectType == nullptr ||
+ memcmp(&effect->mDesc.type, effectType, sizeof(effect_uuid_t)) == 0)) {
+ return effect->mIo;
+ }
+ }
+ return AUDIO_IO_HANDLE_NONE;
+}
+
EffectDescriptorCollection EffectDescriptorCollection::getEffectsForIo(audio_io_handle_t io) const
{
EffectDescriptorCollection effects;
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 883e713..889f031 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -337,7 +337,7 @@
std::string mode = getXmlAttribute(cur, Attributes::mode);
if (!mode.empty()) {
- gain->setMode(GainModeConverter::maskFromString(mode));
+ gain->setMode(static_cast<audio_gain_mode_t>(GainModeConverter::maskFromString(mode)));
}
std::string channelsLiteral = getXmlAttribute(cur, Attributes::channelMask);
diff --git a/services/audiopolicy/config/a2dp_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/a2dp_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..2d323f6
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_audio_policy_configuration_7_0.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- A2dp Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="a2dp output" role="source"/>
+ <mixPort name="a2dp input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="a2dp input"
+ sources="BT A2DP In"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/a2dp_in_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/a2dp_in_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..d59ad70
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_in_audio_policy_configuration_7_0.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Input Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="a2dp input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="a2dp input"
+ sources="BT A2DP In"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index b28381b..dcdc035 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -91,7 +91,7 @@
<!-- Output devices declaration, i.e. Sink DEVICE PORT -->
<devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
- samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
</devicePort>
<devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
diff --git a/services/audiopolicy/config/audio_policy_configuration_7_0.xml b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..31c8954
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_7_0.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+
+ <!-- Modules section:
+ There is one section per audio HW module present on the platform.
+ Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+ The module names are the same as in current .conf file:
+ “primary”, “A2DP”, “remote_submix”, “USB”
+ Each module will contain the following sections:
+ “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+ module.
+ This contains both permanently attached devices and removable devices.
+ “mixPorts”: listing all output and input streams exposed by the audio HAL
+ “routes”: list of possible connections between input and output devices or between stream and
+ devices.
+ "route": is defined by an attribute:
+ -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+ -"sink": the sink involved in this route
+ -"sources": all the sources than can be connected to the sink via vis route
+ “attachedDevices”: permanently attached devices.
+ The attachedDevices section is a list of devices names. The names correspond to device names
+ defined in <devicePorts> section.
+ “defaultOutputDevice”: device to be used by default when no policy rule applies
+ -->
+ <modules>
+ <!-- Primary Audio HAL -->
+ <module name="primary" halVersion="3.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ <item>Built-In Back Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="deep_buffer" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="compressed_offload" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_MP3"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC_LC"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="voice_tx" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </mixPort>
+ <mixPort name="voice_rx" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+ <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <gains>
+ <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+ minValueMB="-8400"
+ maxValueMB="4000"
+ defaultValueMB="0"
+ stepValueMB="100"/>
+ </gains>
+ </devicePort>
+ <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 11025 12000 16000 22050 24000 32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO AUDIO_CHANNEL_IN_STEREO AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ </devicePorts>
+ <!-- route declaration, i.e. list all available sources for a given sink -->
+ <routes>
+ <route type="mix" sink="Earpiece"
+ sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+ <route type="mix" sink="Speaker"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headset"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headphones"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+ <route type="mix" sink="Telephony Tx"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
+ <route type="mix" sink="voice_rx"
+ sources="Telephony Rx"/>
+ </routes>
+
+ </module>
+
+ <!-- A2dp Input Audio HAL -->
+ <xi:include href="a2dp_in_audio_policy_configuration_7_0.xml"/>
+
+ <!-- Usb Audio HAL -->
+ <xi:include href="usb_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ <!-- Bluetooth Audio HAL -->
+ <xi:include href="bluetooth_audio_policy_configuration_7_0.xml"/>
+
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration_7_0.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..2dffe02
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration_7_0.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000 16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100 48000 88200 96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/hearing_aid_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/hearing_aid_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..8c364e4
--- /dev/null
+++ b/services/audiopolicy/config/hearing_aid_audio_policy_configuration_7_0.xml
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Hearing aid Audio HAL Audio Policy Configuration file -->
+<module name="hearing_aid" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000 16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT Hearing Aid Out" sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/msd_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/msd_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..f167f0b
--- /dev/null
+++ b/services/audiopolicy/config/msd_audio_policy_configuration_7_0.xml
@@ -0,0 +1,78 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2017-2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Multi Stream Decoder Audio Policy Configuration file -->
+<module name="msd" halVersion="2.0">
+ <attachedDevices>
+ <item>MS12 Input</item>
+ <item>MS12 Output</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="ms12 input" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="ms12 compressed input" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ </mixPort>
+ <!-- The HW AV Sync flag is not required, but is recommended -->
+ <mixPort name="ms12 output" role="sink" flags="AUDIO_INPUT_FLAG_HW_AV_SYNC AUDIO_INPUT_FLAG_DIRECT">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_5POINT1"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="MS12 Input" type="AUDIO_DEVICE_OUT_BUS" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <profile name="" format="AUDIO_FORMAT_AC3"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_E_AC3_JOC"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ <profile name="" format="AUDIO_FORMAT_AC4"
+ samplingRates="32000 44100 48000"
+ channelMasks="AUDIO_CHANNEL_OUT_MONO AUDIO_CHANNEL_OUT_STEREO AUDIO_CHANNEL_OUT_5POINT1 AUDIO_CHANNEL_OUT_7POINT1"/>
+ </devicePort>
+ <devicePort tagName="MS12 Output" type="AUDIO_DEVICE_IN_BUS" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="MS12 Input" sources="ms12 input,ms12 compressed input"/>
+ <route type="mix" sink="ms12 output" sources="MS12 Output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/primary_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/primary_audio_policy_configuration_7_0.xml
new file mode 100644
index 0000000..68a56b2
--- /dev/null
+++ b/services/audiopolicy/config/primary_audio_policy_configuration_7_0.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Default Primary Audio HAL Module Audio Policy Configuration include file -->
+<module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000 16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 7f339dc..4510f63 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -93,13 +93,13 @@
void dump(String8 *dst) const override;
- status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device) override;
+ status_t setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) override;
- status_t removePreferredDeviceForStrategy(product_strategy_t strategy) override;
+ status_t removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role) override;
- status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) const override;
+ status_t getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+ AudioDeviceTypeAddrVector &devices) const override;
engineConfig::ParsingResult loadAudioPolicyEngineConfig();
@@ -127,11 +127,36 @@
status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+ status_t setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) override;
+
+ status_t addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) override;
+
+ /**
+ * Remove devices role for capture preset. When `forceMatched` is true, the devices to be
+ * removed must all show as role for the capture preset. Otherwise, only devices that has shown
+ * as role for the capture preset will be remove.
+ */
+ status_t doRemoveDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices,
+ bool forceMatched=true);
+
+ status_t removeDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices) override;
+
+ status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) override;
+
+ status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const override;
+
private:
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
ProductStrategyPreferredRoutingMap mProductStrategyPreferredDevices;
+ CapturePresetDevicesRoleMap mCapturePresetDevicesRole;
VolumeGroupMap mVolumeGroups;
LastRemovableMediaDevices mLastRemovableMediaDevices;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 3ebe7d1..c505456 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -28,8 +28,11 @@
#include <utils/String8.h>
#include <media/AudioAttributes.h>
#include <media/AudioContainers.h>
+#include <media/AudioDeviceTypeAddr.h>
#include <media/AudioPolicy.h>
+#include <vector>
+
namespace android {
/**
@@ -164,7 +167,8 @@
product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
};
-class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t, AudioDeviceTypeAddr>
+class ProductStrategyPreferredRoutingMap : public std::map<product_strategy_t,
+ AudioDeviceTypeAddrVector>
{
public:
void dump(String8 *dst, int spaces = 0) const;
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 1bc7fe3..1875c10 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -19,6 +19,7 @@
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
+#include "../include/EngineBase.h"
#include <TypeConverter.h>
namespace android {
@@ -339,8 +340,8 @@
return NO_ERROR;
}
-status_t EngineBase::setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device)
+status_t EngineBase::setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
{
// verify strategy exists
if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
@@ -348,11 +349,24 @@
return BAD_VALUE;
}
- mProductStrategyPreferredDevices[strategy] = device;
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ mProductStrategyPreferredDevices[strategy] = devices;
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support set devices role as disabled for strategy.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it is no need to set device role as none for a strategy.
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
return NO_ERROR;
}
-status_t EngineBase::removePreferredDeviceForStrategy(product_strategy_t strategy)
+status_t EngineBase::removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role)
{
// verify strategy exists
if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
@@ -360,29 +374,218 @@
return BAD_VALUE;
}
- if (mProductStrategyPreferredDevices.erase(strategy) == 0) {
- // no preferred device was set
- return NAME_NOT_FOUND;
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ if (mProductStrategyPreferredDevices.erase(strategy) == 0) {
+ // no preferred device was set
+ return NAME_NOT_FOUND;
+ }
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support remove devices role as disabled for strategy.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it makes no sense to remove devices with
+ // role as DEVICE_ROLE_NONE for a strategy
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
}
return NO_ERROR;
}
-status_t EngineBase::getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) const
+status_t EngineBase::getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+ AudioDeviceTypeAddrVector &devices) const
{
// verify strategy exists
if (mProductStrategies.find(strategy) == mProductStrategies.end()) {
ALOGE("%s unknown strategy %u", __func__, strategy);
return BAD_VALUE;
}
- // preferred device for this strategy?
- auto devIt = mProductStrategyPreferredDevices.find(strategy);
- if (devIt == mProductStrategyPreferredDevices.end()) {
- ALOGV("%s no preferred device for strategy %u", __func__, strategy);
- return NAME_NOT_FOUND;
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED: {
+ // preferred device for this strategy?
+ auto devIt = mProductStrategyPreferredDevices.find(strategy);
+ if (devIt == mProductStrategyPreferredDevices.end()) {
+ ALOGV("%s no preferred device for strategy %u", __func__, strategy);
+ return NAME_NOT_FOUND;
+ }
+
+ devices = devIt->second;
+ } break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
}
- device = devIt->second;
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ mCapturePresetDevicesRole[audioSource][role] = devices;
+ // When the devices are set as preferred devices, remove them from the disabled devices.
+ doRemoveDevicesRoleForCapturePreset(
+ audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support setting devices role as disabled for capture preset.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it is no need to set device role as none
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ mCapturePresetDevicesRole[audioSource][role] = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRole[audioSource][role], devices);
+ for (const auto& device : devices) {
+ mCapturePresetDevicesRole[audioSource][role].push_back(device);
+ }
+ // When the devices are set as preferred devices, remove them from the disabled devices.
+ doRemoveDevicesRoleForCapturePreset(
+ audioSource, DEVICE_ROLE_DISABLED, devices, false /*forceMatched*/);
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support setting devices role as disabled for capture preset.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it is no need to set device role as none
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices) {
+ return doRemoveDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t EngineBase::doRemoveDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices, bool forceMatched)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
+ mCapturePresetDevicesRole[audioSource].count(role) == 0) {
+ return NAME_NOT_FOUND;
+ }
+ AudioDeviceTypeAddrVector remainingDevices = excludeDeviceTypeAddrsFrom(
+ mCapturePresetDevicesRole[audioSource][role], devices);
+ if (forceMatched && remainingDevices.size() !=
+ mCapturePresetDevicesRole[audioSource][role].size() - devices.size()) {
+ // There are some devices from `devicesToRemove` that are not shown in the cached record
+ return BAD_VALUE;
+ }
+ mCapturePresetDevicesRole[audioSource][role] = remainingDevices;
+ if (mCapturePresetDevicesRole[audioSource][role].empty()) {
+ // Remove the role when device list is empty
+ mCapturePresetDevicesRole[audioSource].erase(role);
+ }
+ } break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it makes no sense to remove devices with
+ // role as DEVICE_ROLE_NONE
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ if (mCapturePresetDevicesRole.count(audioSource) == 0 ||
+ mCapturePresetDevicesRole[audioSource].erase(role) == 0) {
+ // no preferred device for the given audio source
+ return NAME_NOT_FOUND;
+ }
+ break;
+ case DEVICE_ROLE_DISABLED:
+ // TODO: support remove devices role as disabled for strategy.
+ ALOGI("%s no implemented for role as %d", __func__, role);
+ break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as it makes no sense to remove devices with
+ // role as DEVICE_ROLE_NONE for a strategy
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+}
+
+status_t EngineBase::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const
+{
+ // verify if the audio source is valid
+ if (!audio_is_valid_audio_source(audioSource)) {
+ ALOGE("%s unknown audio source %u", __func__, audioSource);
+ return BAD_VALUE;
+ }
+
+ switch (role) {
+ case DEVICE_ROLE_PREFERRED:
+ case DEVICE_ROLE_DISABLED: {
+ if (mCapturePresetDevicesRole.count(audioSource) == 0) {
+ return NAME_NOT_FOUND;
+ }
+ auto devIt = mCapturePresetDevicesRole.at(audioSource).find(role);
+ if (devIt == mCapturePresetDevicesRole.at(audioSource).end()) {
+ ALOGV("%s no devices role(%d) for capture preset %u", __func__, role, audioSource);
+ return NAME_NOT_FOUND;
+ }
+
+ devices = devIt->second;
+ } break;
+ case DEVICE_ROLE_NONE:
+ // Intentionally fall-through as the DEVICE_ROLE_NONE is never set
+ default:
+ ALOGE("%s invalid role %d", __func__, role);
+ return BAD_VALUE;
+ }
return NO_ERROR;
}
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 1821140..d39eff6 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -26,8 +26,8 @@
{"STRATEGY_PHONE",
{
{"phone", AUDIO_STREAM_VOICE_CALL, "AUDIO_STREAM_VOICE_CALL",
- {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT, 0,
- ""}},
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}},
},
{"sco", AUDIO_STREAM_BLUETOOTH_SCO, "AUDIO_STREAM_BLUETOOTH_SCO",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_SCO,
@@ -39,10 +39,11 @@
{
{"ring", AUDIO_STREAM_RING, "AUDIO_STREAM_RING",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
- AUDIO_SOURCE_DEFAULT, 0, ""}}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}}
},
{"alarm", AUDIO_STREAM_ALARM, "AUDIO_STREAM_ALARM",
- {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, 0, ""}},
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}},
}
},
},
@@ -58,7 +59,7 @@
{
{"", AUDIO_STREAM_ACCESSIBILITY, "AUDIO_STREAM_ACCESSIBILITY",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
- AUDIO_SOURCE_DEFAULT, 0, ""}}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}}
}
},
},
@@ -66,15 +67,16 @@
{
{"", AUDIO_STREAM_NOTIFICATION, "AUDIO_STREAM_NOTIFICATION",
{
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_EVENT,
- AUDIO_SOURCE_DEFAULT, 0, ""}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
}
}
},
@@ -83,21 +85,25 @@
{
{"assistant", AUDIO_STREAM_ASSISTANT, "AUDIO_STREAM_ASSISTANT",
{{AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
- AUDIO_SOURCE_DEFAULT, 0, ""}}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}}
},
{"music", AUDIO_STREAM_MUSIC, "AUDIO_STREAM_MUSIC",
{
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT, 0, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANT, AUDIO_SOURCE_DEFAULT, 0, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANT, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
- AUDIO_SOURCE_DEFAULT, 0, ""},
- {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
+ {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}
},
},
{"system", AUDIO_STREAM_SYSTEM, "AUDIO_STREAM_SYSTEM",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_SONIFICATION,
- AUDIO_SOURCE_DEFAULT, 0, ""}}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}}
}
},
},
@@ -106,7 +112,7 @@
{"", AUDIO_STREAM_DTMF, "AUDIO_STREAM_DTMF",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
- AUDIO_SOURCE_DEFAULT, 0, ""}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
}
}
},
@@ -114,7 +120,8 @@
{"STRATEGY_CALL_ASSISTANT",
{
{"", AUDIO_STREAM_CALL_ASSISTANT, "AUDIO_STREAM_CALL_ASSISTANT",
- {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_CALL_ASSISTANT, AUDIO_SOURCE_DEFAULT, 0, ""}}
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_CALL_ASSISTANT, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}}
}
},
},
@@ -136,14 +143,16 @@
{"rerouting",
{
{"", AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
- {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VIRTUAL_SOURCE, AUDIO_SOURCE_DEFAULT, 0, ""}}
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VIRTUAL_SOURCE, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}}
}
},
},
{"patch",
{
{"", AUDIO_STREAM_PATCH, "AUDIO_STREAM_PATCH",
- {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
+ {{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
+ AUDIO_FLAG_NONE, ""}}
}
},
}
diff --git a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
index 87b6aaf..96cc140 100644
--- a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
+++ b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
@@ -69,6 +69,11 @@
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+ // TODO (b/122931261): remove when preferred device for strategy media will be used instead of
+ // AUDIO_POLICY_FORCE_NO_BT_A2DP.
+ case AUDIO_DEVICE_OUT_HEARING_AID:
+ case AUDIO_DEVICE_OUT_BLE_HEADSET:
+ case AUDIO_DEVICE_OUT_BLE_SPEAKER:
return GROUP_BT_A2DP;
default:
return GROUP_NONE;
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index 151c7bb..060568a 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -321,10 +321,11 @@
void ProductStrategyPreferredRoutingMap::dump(android::String8* dst, int spaces) const {
dst->appendFormat("\n%*sPreferred devices per product strategy dump:", spaces, "");
for (const auto& iter : *this) {
- dst->appendFormat("\n%*sStrategy %u dev:%08x addr:%s",
+ dst->appendFormat("\n%*sStrategy %u %s",
spaces + 2, "",
(uint32_t) iter.first,
- iter.second.mType, iter.second.mAddress.c_str());
+ dumpAudioDeviceTypeAddrVector(iter.second, true /*includeSensitiveInfo*/)
+ .c_str());
}
dst->appendFormat("\n");
}
diff --git a/services/audiopolicy/engine/common/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
index c352578..8aa4b08 100644
--- a/services/audiopolicy/engine/common/src/VolumeCurve.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -43,10 +43,24 @@
indexInUi = volIndexMax;
}
+ // Calculate the new volume index
size_t nbCurvePoints = mCurvePoints.size();
- // the volume index in the UI is relative to the min and max volume indices for this stream
- int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
- int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
+
+ int volIdx;
+ if (volIndexMin == volIndexMax) {
+ if (indexInUi == volIndexMin) {
+ volIdx = volIndexMin;
+ } else {
+ // This would result in a divide-by-zero below
+ ALOG_ASSERT(volIndexmin != volIndexMax, "Invalid volume index range & value: 0");
+ return NAN;
+ }
+ } else {
+ // interpolaate
+ // the volume index in the UI is relative to the min and max volume indices for this stream
+ int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
+ volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
+ }
// Where would this volume index been inserted in the curve point
size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 4842cb2..daf6418 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -228,7 +228,8 @@
std::string flags = getXmlAttribute(cur, "value");
ALOGV("%s flags %s", __FUNCTION__, flags.c_str());
- attributes.flags = AudioFlagConverter::maskFromString(flags, " ");
+ attributes.flags = static_cast<audio_flags_mask_t>(
+ AudioFlagConverter::maskFromString(flags, " "));
}
if (!xmlStrcmp(cur->name, (const xmlChar *)("Bundle"))) {
std::string bundleKey = getXmlAttribute(cur, "key");
diff --git a/services/audiopolicy/engine/interface/EngineInterface.h b/services/audiopolicy/engine/interface/EngineInterface.h
index dfb20b5..f64608d 100644
--- a/services/audiopolicy/engine/interface/EngineInterface.h
+++ b/services/audiopolicy/engine/interface/EngineInterface.h
@@ -34,6 +34,8 @@
using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
using StrategyVector = std::vector<product_strategy_t>;
using VolumeGroupVector = std::vector<volume_group_t>;
+using CapturePresetDevicesRoleMap =
+ std::map<audio_source_t, std::map<device_role_t, AudioDeviceTypeAddrVector>>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -293,36 +295,113 @@
virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) const = 0;
/**
- * @brief setPreferredDeviceForStrategy sets the default device to be used for a
- * strategy when available
+ * @brief setDevicesRoleForStrategy sets devices role for a strategy when available. To remove
+ * devices role, removeDevicesRoleForStrategy must be called. When devices role is set
+ * successfully, previously set devices for the same role and strategy will be removed.
* @param strategy the audio strategy whose routing will be affected
- * @param device the audio device to route to when available
- * @return BAD_VALUE if the strategy is invalid,
- * or NO_ERROR if the preferred device was set
+ * @param role the role of the devices for the strategy. All device roles are defined at
+ * system/media/audio/include/system/audio_policy.h. DEVICE_ROLE_NONE is invalid
+ * for setting.
+ * @param devices the audio devices to be set
+ * @return BAD_VALUE if the strategy or role is invalid,
+ * or NO_ERROR if the role of the devices for strategy was set
*/
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device) = 0;
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
/**
- * @brief removePreferredDeviceForStrategy removes the preferred device previously set
+ * @brief removeDevicesRoleForStrategy removes the role of device(s) previously set
* for the given strategy
* @param strategy the audio strategy whose routing will be affected
- * @return BAD_VALUE if the strategy is invalid,
- * or NO_ERROR if the preferred device was removed
+ * @param role the role of the devices for strategy
+ * @return BAD_VALUE if the strategy or role is invalid,
+ * or NO_ERROR if the devices for this role was removed
*/
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy) = 0;
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role) = 0;
/**
- * @brief getPreferredDeviceForStrategy queries which device is set as the
- * preferred device for the given strategy
+ * @brief getDevicesForRoleAndStrategy queries which devices have the specified role for the
+ * specified strategy
* @param strategy the strategy to query
- * @param device returns configured as the preferred device if one was set
- * @return BAD_VALUE if the strategy is invalid,
- * or NAME_NOT_FOUND if no preferred device was set
- * or NO_ERROR if the device parameter was initialized to the preferred device
+ * @param role the role of the devices to query
+ * @param devices returns list of devices with matching role for the specified strategy.
+ * DEVICE_ROLE_NONE is invalid as input.
+ * @return BAD_VALUE if the strategy or role is invalid,
+ * or NAME_NOT_FOUND if no device for the role and strategy was set
+ * or NO_ERROR if the devices parameter contains a list of devices
*/
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) const = 0;
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy, device_role_t role,
+ AudioDeviceTypeAddrVector &devices) const = 0;
+
+ /**
+ * @brief setDevicesRoleForCapturePreset sets devices role for a capture preset when available.
+ * To remove devices role, removeDevicesRoleForCapturePreset must be called. Calling
+ * clearDevicesRoleForCapturePreset will remove all devices as role. When devices role is set
+ * successfully, previously set devices for the same role and capture preset will be removed.
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset. All device roles are defined at
+ * system/media/audio/include/system/audio_policy.h. DEVICE_ROLE_NONE is invalid
+ * for setting.
+ * @param devices the audio devices to be set
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the role of the devices for capture preset was set
+ */
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * @brief addDevicesRoleForCapturePreset adds devices role for a capture preset when available.
+ * To remove devices role, removeDevicesRoleForCapturePreset must be called. Calling
+ * clearDevicesRoleForCapturePreset will remove all devices as role.
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset. All device roles are defined at
+ * system/media/audio/include/system/audio_policy.h. DEVICE_ROLE_NONE is invalid
+ * for setting.
+ * @param devices the audio devices to be added
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the role of the devices for capture preset was added
+ */
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * @brief removeDevicesRoleForCapturePreset removes the role of device(s) previously set
+ * for the given capture preset
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset
+ * @param devices the devices to be removed
+ * @return BAD_VALUE if 1) the capture preset is invalid, 2) role is invalid or 3) the list of
+ * devices to be removed are not all present as role for a capture preset
+ * or NO_ERROR if the devices for this role was removed
+ */
+ virtual status_t removeDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role, const AudioDeviceTypeAddrVector& devices) = 0;
+
+ /**
+ * @brief clearDevicesRoleForCapturePreset removes the role of all device(s) previously set
+ * for the given capture preset
+ * @param audioSource the audio capture preset whose routing will be affected
+ * @param role the role of the devices for the capture preset
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NO_ERROR if the devices for this role was removed
+ */
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ /**
+ * @brief getDevicesForRoleAndCapturePreset queries which devices have the specified role for
+ * the specified capture preset
+ * @param audioSource the capture preset to query
+ * @param role the role of the devices to query
+ * @param devices returns list of devices with matching role for the specified capture preset.
+ * DEVICE_ROLE_NONE is invalid as input.
+ * @return BAD_VALUE if the capture preset or role is invalid,
+ * or NAME_NOT_FOUND if no device for the role and capture preset was set
+ * or NO_ERROR if the devices parameter contains a list of devices
+ */
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role, AudioDeviceTypeAddrVector &devices) const = 0;
virtual void dump(String8 *dst) const = 0;
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
index f91f8d7..f8a6fc0 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -45,7 +45,7 @@
bool InputSource::sendToHW(string & /*error*/)
{
- uint32_t applicableInputDevice;
+ audio_devices_t applicableInputDevice;
blackboardRead(&applicableInputDevice, sizeof(applicableInputDevice));
return mPolicyPluginInterface->setDeviceForInputSource(mId, applicableInputDevice);
}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
index 244f082..6c8eb65 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/ProductStrategy.h
@@ -32,7 +32,7 @@
struct Device
{
- uint32_t applicableDevice; /**< applicable device for this strategy. */
+ audio_devices_t applicableDevice; /**< applicable device for this strategy. */
char deviceAddress[mMaxStringSize]; /**< device address associated with this strategy. */
} __attribute__((packed));
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
index aa06ae3..f4645e6 100644
--- a/services/audiopolicy/engineconfigurable/src/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
@@ -51,7 +51,7 @@
mApplicableDevices = devices;
return NO_ERROR;
}
- devices |= AUDIO_DEVICE_BIT_IN;
+ devices = static_cast<audio_devices_t>(devices | AUDIO_DEVICE_BIT_IN);
if (!audio_is_input_device(devices)) {
ALOGE("%s: trying to set an invalid device 0x%X for input source %s",
__FUNCTION__, devices, getName().c_str());
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index b14d2bb..eccde7b 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -241,10 +241,15 @@
default: // FORCE_NONE
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
if (!devices.isEmpty()) break;
+
+ // TODO (b/161358428): remove when preferred device
+ // for strategy phone will be used instead of AUDIO_POLICY_FORCE_FOR_COMMUNICATION
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_BLE_HEADSET);
+ if (!devices.isEmpty()) break;
+
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
- (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- outputs.isA2dpSupported()) {
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
devices = availableOutputDevices.getFirstDevicesFromTypes({
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES});
@@ -267,12 +272,16 @@
case AUDIO_POLICY_FORCE_SPEAKER:
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
// A2DP speaker when forcing to speaker output
- if (!isInCall() &&
- (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- outputs.isA2dpSupported()) {
+ if (!isInCall()) {
devices = availableOutputDevices.getDevicesFromType(
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER);
+ AUDIO_DEVICE_OUT_BLE_SPEAKER);
if (!devices.isEmpty()) break;
+
+ if ((getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
+ devices = availableOutputDevices.getDevicesFromType(
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER);
+ if (!devices.isEmpty()) break;
+ }
}
if (!isInCall()) {
devices = availableOutputDevices.getFirstDevicesFromTypes({
@@ -386,18 +395,13 @@
STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
break;
}
- // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
- if ((devices2.isEmpty()) &&
- (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
- devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
- }
+
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
}
if (devices2.isEmpty() && (getLastRemovableMediaDevices().size() > 0)) {
- if ((getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
- outputs.isA2dpSupported()) {
+ if ((getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
// Get the last connected device of wired and bluetooth a2dp
devices2 = availableOutputDevices.getFirstDevicesFromTypes(
getLastRemovableMediaDevices());
@@ -452,22 +456,26 @@
devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
break;
+ case STRATEGY_NONE:
+ // Happens when internal strategies are processed ("rerouting", "patch"...)
+ break;
+
default:
- ALOGW("getDevicesForStrategy() unknown strategy: %d", strategy);
+ ALOGW("%s unknown strategy: %d", __func__, strategy);
break;
}
if (devices.isEmpty()) {
- ALOGV("getDevicesForStrategy() no device found for strategy %d", strategy);
+ ALOGV("%s no device found for strategy %d", __func__, strategy);
sp<DeviceDescriptor> defaultOutputDevice = getApmObserver()->getDefaultOutputDevice();
if (defaultOutputDevice != nullptr) {
devices.add(defaultOutputDevice);
}
ALOGE_IF(devices.isEmpty(),
- "getDevicesForStrategy() no default device defined");
+ "%s no default device defined", __func__);
}
- ALOGVV("getDevices ForStrategy() strategy %d, device %s",
+ ALOGVV("%s strategy %d, device %s", __func__,
strategy, dumpDeviceTypes(devices.types()).c_str());
return devices;
}
@@ -514,8 +522,9 @@
if (device != nullptr) break;
}
device = availableDevices.getFirstExistingDevice({
- AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
- AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_IN_WIRED_HEADSET,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_IN_USB_DEVICE,
+ AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_SOURCE_VOICE_COMMUNICATION:
@@ -539,9 +548,13 @@
FALLTHROUGH_INTENDED;
default: // FORCE_NONE
+ // TODO (b/161358428): remove AUDIO_DEVICE_IN_BLE_HEADSET from the list
+ // when preferred device for strategy phone will be used instead of
+ // AUDIO_POLICY_FORCE_FOR_COMMUNICATION.
device = availableDevices.getFirstExistingDevice({
- AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
- AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_IN_WIRED_HEADSET,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_IN_USB_DEVICE,
+ AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_POLICY_FORCE_SPEAKER:
@@ -566,8 +579,9 @@
if (device != nullptr) break;
}
device = availableDevices.getFirstExistingDevice({
- AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_IN_USB_HEADSET,
- AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_IN_BUILTIN_MIC});
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_IN_WIRED_HEADSET,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_IN_USB_DEVICE,
+ AUDIO_DEVICE_IN_BUILTIN_MIC});
break;
case AUDIO_SOURCE_CAMCORDER:
// For a device without built-in mic, adding usb device
@@ -631,19 +645,17 @@
// check if this strategy has a preferred device that is available,
// if yes, give priority to it
- AudioDeviceTypeAddr preferredStrategyDevice;
- const status_t status = getPreferredDeviceForStrategy(strategy, preferredStrategyDevice);
+ AudioDeviceTypeAddrVector preferredStrategyDevices;
+ const status_t status = getDevicesForRoleAndStrategy(
+ strategy, DEVICE_ROLE_PREFERRED, preferredStrategyDevices);
if (status == NO_ERROR) {
// there is a preferred device, is it available?
- sp<DeviceDescriptor> preferredAvailableDevDescr = availableOutputDevices.getDevice(
- preferredStrategyDevice.mType,
- String8(preferredStrategyDevice.mAddress.c_str()),
- AUDIO_FORMAT_DEFAULT);
- if (preferredAvailableDevDescr != nullptr) {
- ALOGVV("%s using pref device 0x%08x/%s for strategy %u",
- __func__, preferredStrategyDevice.mType,
- preferredStrategyDevice.mAddress.c_str(), strategy);
- return DeviceVector(preferredAvailableDevDescr);
+ DeviceVector preferredAvailableDevVec =
+ availableOutputDevices.getDevicesFromDeviceTypeAddrVec(preferredStrategyDevices);
+ if (preferredAvailableDevVec.size() == preferredAvailableDevVec.size()) {
+ ALOGVV("%s using pref device %s for strategy %u",
+ __func__, preferredAvailableDevVec.toString().c_str(), strategy);
+ return preferredAvailableDevVec;
}
}
diff --git a/services/audiopolicy/manager/Android.bp b/services/audiopolicy/manager/Android.bp
new file mode 100644
index 0000000..5bb432f
--- /dev/null
+++ b/services/audiopolicy/manager/Android.bp
@@ -0,0 +1,32 @@
+cc_library_shared {
+ name: "libaudiopolicymanager",
+
+ srcs: [
+ "AudioPolicyFactory.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/services/audioflinger"
+ ],
+
+ shared_libs: [
+ "libaudiopolicymanagerdefault",
+ ],
+
+ static_libs: [
+ "libaudiopolicycomponents",
+ ],
+
+ header_libs: [
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ "libaudioutils_headers",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+}
diff --git a/services/audiopolicy/manager/Android.mk b/services/audiopolicy/manager/Android.mk
deleted file mode 100644
index cae6cfa..0000000
--- a/services/audiopolicy/manager/Android.mk
+++ /dev/null
@@ -1,30 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-ifneq ($(USE_CUSTOM_AUDIO_POLICY), 1)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPolicyFactory.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libaudiopolicymanagerdefault
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents
-
-LOCAL_C_INCLUDES += \
- $(call include-path-for, audio-utils)
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiopolicycommon \
- libaudiopolicyengine_interface_headers \
- libaudiopolicymanager_interface_headers
-
-LOCAL_CFLAGS := -Wall -Werror
-
-LOCAL_MODULE:= libaudiopolicymanager
-
-include $(BUILD_SHARED_LIBRARY)
-
-endif #ifneq ($(USE_CUSTOM_AUDIO_POLICY), 1)
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 12c6ae5..e4b0dd1 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -49,6 +49,7 @@
#include <private/android_filesystem_config.h>
#include <system/audio.h>
#include <system/audio_config.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
#include "AudioPolicyManager.h"
#include <Serializer.h>
#include "TypeConverter.h"
@@ -903,7 +904,8 @@
// Only honor audibility enforced when required. The client will be
// forced to reconnect if the forced usage changes.
if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
- dstAttr->flags &= ~AUDIO_FLAG_AUDIBILITY_ENFORCED;
+ dstAttr->flags = static_cast<audio_flags_mask_t>(
+ dstAttr->flags & ~AUDIO_FLAG_AUDIBILITY_ENFORCED);
}
return NO_ERROR;
@@ -935,7 +937,7 @@
return status;
}
if (auto it = mAllowedCapturePolicies.find(uid); it != end(mAllowedCapturePolicies)) {
- resultAttr->flags |= it->second;
+ resultAttr->flags = static_cast<audio_flags_mask_t>(resultAttr->flags | it->second);
}
*stream = mEngine->getStreamTypeForAttributes(*resultAttr);
@@ -1253,7 +1255,8 @@
// Discard haptic channel mask when forcing muting haptic channels.
audio_channel_mask_t channelMask = forceMutingHaptic
- ? (config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL) : config->channel_mask;
+ ? static_cast<audio_channel_mask_t>(config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL)
+ : config->channel_mask;
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
@@ -1309,7 +1312,8 @@
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
+ output = selectOutput(
+ outputs, *flags, config->format, channelMask, config->sample_rate, session);
}
ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
"sampling rate %d, format %#x, channels %#x, flags %#x",
@@ -1482,14 +1486,26 @@
}
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
- audio_output_flags_t flags,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- uint32_t samplingRate)
+ audio_output_flags_t flags,
+ audio_format_t format,
+ audio_channel_mask_t channelMask,
+ uint32_t samplingRate,
+ audio_session_t sessionId)
{
LOG_ALWAYS_FATAL_IF(!(format == AUDIO_FORMAT_INVALID || audio_is_linear_pcm(format)),
"%s called with format %#x", __func__, format);
+ // Return the output that haptic-generating attached to when 1) session id is specified,
+ // 2) haptic-generating effect exists for given session id and 3) the output that
+ // haptic-generating effect attached to is in given outputs.
+ if (sessionId != AUDIO_SESSION_NONE) {
+ audio_io_handle_t hapticGeneratingOutput = mEffects.getIoForSession(
+ sessionId, FX_IID_HAPTICGENERATOR);
+ if (outputs.indexOf(hapticGeneratingOutput) >= 0) {
+ return hapticGeneratingOutput;
+ }
+ }
+
// Flags disqualifying an output: the match must happen before calling selectOutput()
static const audio_output_flags_t kExcludedFlags = (audio_output_flags_t)
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
@@ -3094,16 +3110,16 @@
// Returns true if all devices types match the predicate and are supported by one HW module
bool AudioPolicyManager::areAllDevicesSupported(
- const Vector<AudioDeviceTypeAddr>& devices,
+ const AudioDeviceTypeAddrVector& devices,
std::function<bool(audio_devices_t)> predicate,
const char *context) {
for (size_t i = 0; i < devices.size(); i++) {
sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(
- devices[i].mType, devices[i].mAddress.c_str(), String8(),
+ devices[i].mType, devices[i].getAddress(), String8(),
AUDIO_FORMAT_DEFAULT, false /*allowToCreate*/, true /*matchAddress*/);
if (devDesc == nullptr || (predicate != nullptr && !predicate(devices[i].mType))) {
- ALOGE("%s: device type %#x address %s not supported or not an output device",
- context, devices[i].mType, devices[i].mAddress.c_str());
+ ALOGE("%s: device type %#x address %s not supported or not match predicate",
+ context, devices[i].mType, devices[i].getAddress());
return false;
}
}
@@ -3111,7 +3127,7 @@
}
status_t AudioPolicyManager::setUidDeviceAffinities(uid_t uid,
- const Vector<AudioDeviceTypeAddr>& devices) {
+ const AudioDeviceTypeAddrVector& devices) {
ALOGV("%s() uid=%d num devices %zu", __FUNCTION__, uid, devices.size());
if (!areAllDevicesSupported(devices, audio_is_output_device, __func__)) {
return BAD_VALUE;
@@ -3143,20 +3159,19 @@
return res;
}
-status_t AudioPolicyManager::setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device) {
- ALOGV("%s() strategy=%d device=%08x addr=%s", __FUNCTION__,
- strategy, device.mType, device.mAddress.c_str());
+status_t AudioPolicyManager::setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices) {
+ ALOGV("%s() strategy=%d role=%d %s", __func__, strategy, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
- Vector<AudioDeviceTypeAddr> devices;
- devices.add(device);
if (!areAllDevicesSupported(devices, audio_is_output_device, __func__)) {
return BAD_VALUE;
}
- status_t status = mEngine->setPreferredDeviceForStrategy(strategy, device);
+ status_t status = mEngine->setDevicesRoleForStrategy(strategy, role, devices);
if (status != NO_ERROR) {
- ALOGW("Engine could not set preferred device %08x %s for strategy %d",
- device.mType, device.mAddress.c_str(), strategy);
+ ALOGW("Engine could not set preferred devices %s for strategy %d role %d",
+ dumpAudioDeviceTypeAddrVector(devices).c_str(), strategy, role);
return status;
}
@@ -3192,11 +3207,12 @@
}
}
-status_t AudioPolicyManager::removePreferredDeviceForStrategy(product_strategy_t strategy)
+status_t AudioPolicyManager::removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role)
{
- ALOGI("%s() strategy=%d", __FUNCTION__, strategy);
+ ALOGI("%s() strategy=%d role=%d", __func__, strategy, role);
- status_t status = mEngine->removePreferredDeviceForStrategy(strategy);
+ status_t status = mEngine->removeDevicesRoleForStrategy(strategy, role);
if (status != NO_ERROR) {
ALOGW("Engine could not remove preferred device for strategy %d", strategy);
return status;
@@ -3208,14 +3224,81 @@
return NO_ERROR;
}
-status_t AudioPolicyManager::getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device) {
- return mEngine->getPreferredDeviceForStrategy(strategy, device);
+status_t AudioPolicyManager::getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices) {
+ return mEngine->getDevicesForRoleAndStrategy(strategy, role, devices);
+}
+
+status_t AudioPolicyManager::setDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ ALOGV("%s() audioSource=%d role=%d %s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_call_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+ status_t status = mEngine->setDevicesRoleForCapturePreset(audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not set preferred devices %s for audio source %d role %d",
+ dumpAudioDeviceTypeAddrVector(devices).c_str(), audioSource, role);
+
+ return status;
+}
+
+status_t AudioPolicyManager::addDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices) {
+ ALOGV("%s() audioSource=%d role=%d %s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_call_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+ status_t status = mEngine->addDevicesRoleForCapturePreset(audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not add preferred devices %s for audio source %d role %d",
+ dumpAudioDeviceTypeAddrVector(devices).c_str(), audioSource, role);
+
+ return status;
+}
+
+status_t AudioPolicyManager::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ ALOGV("%s() audioSource=%d role=%d devices=%s", __func__, audioSource, role,
+ dumpAudioDeviceTypeAddrVector(devices).c_str());
+
+ if (!areAllDevicesSupported(devices, audio_call_is_input_device, __func__)) {
+ return BAD_VALUE;
+ }
+
+ status_t status = mEngine->removeDevicesRoleForCapturePreset(
+ audioSource, role, devices);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not remove devices role (%d) for capture preset %d", role, audioSource);
+
+ return status;
+}
+
+status_t AudioPolicyManager::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role) {
+ ALOGV("%s() audioSource=%d role=%d", __func__, audioSource, role);
+
+ status_t status = mEngine->clearDevicesRoleForCapturePreset(audioSource, role);
+ ALOGW_IF(status != NO_ERROR,
+ "Engine could not clear devices role (%d) for capture preset %d", role, audioSource);
+
+ return status;
+}
+
+status_t AudioPolicyManager::getDevicesForRoleAndCapturePreset(
+ audio_source_t audioSource, device_role_t role, AudioDeviceTypeAddrVector &devices) {
+ return mEngine->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
}
status_t AudioPolicyManager::setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices) {
- ALOGI("%s() userId=%d num devices %zu", __FUNCTION__, userId, devices.size());\
+ const AudioDeviceTypeAddrVector& devices) {
+ ALOGI("%s() userId=%d num devices %zu", __func__, userId, devices.size());
if (!areAllDevicesSupported(devices, audio_is_output_device, __func__)) {
return BAD_VALUE;
}
@@ -5273,7 +5356,7 @@
if (status != OK) {
continue;
}
- if (client->getPrimaryMix() != primaryMix) {
+ if (client->getPrimaryMix() != primaryMix || client->hasLostPrimaryMix()) {
invalidate = true;
if (desc->isStrategyActive(psId)) {
maxLatency = desc->latency();
@@ -5304,7 +5387,8 @@
client->flags(),
client->config().format,
client->config().channel_mask,
- client->config().sample_rate);
+ client->config().sample_rate,
+ client->session());
if (newOutput != srcOut) {
invalidate = true;
break;
@@ -6157,7 +6241,9 @@
(isBtScoVolSrc && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO))) {
ALOGV("%s cannot set volume group %d volume with force use = %d for comm", __func__,
volumeSource, forceUseForComm);
- return INVALID_OPERATION;
+ // Do not return an error here as AudioService will always set both voice call
+ // and bluetooth SCO volumes due to stream aliasing.
+ return NO_ERROR;
}
if (deviceTypes.empty()) {
deviceTypes = outputDesc->devices().types();
@@ -6165,9 +6251,8 @@
float volumeDb = computeVolume(curves, volumeSource, index, deviceTypes);
if (outputDesc->isFixedVolume(deviceTypes) ||
- // Force VoIP volume to max for bluetooth SCO
-
- ((isVoiceVolSrc || isBtScoVolSrc) &&
+ // Force VoIP volume to max for bluetooth SCO device except if muted
+ (index != 0 && (isVoiceVolSrc || isBtScoVolSrc) &&
isSingleDeviceType(deviceTypes, audio_is_bluetooth_out_sco_device))) {
volumeDb = 0.0f;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index b588f89..217013f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -263,17 +263,42 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes);
virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
virtual status_t setUidDeviceAffinities(uid_t uid,
- const Vector<AudioDeviceTypeAddr>& devices);
+ const AudioDeviceTypeAddrVector& devices);
virtual status_t removeUidDeviceAffinities(uid_t uid);
virtual status_t setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices);
+ const AudioDeviceTypeAddrVector& devices);
virtual status_t removeUserIdDeviceAffinities(int userId);
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device);
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy);
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device);
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role);
+
+
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
virtual status_t startAudioSource(const struct audio_port_config *source,
const audio_attributes_t *attributes,
@@ -608,7 +633,8 @@
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
audio_format_t format = AUDIO_FORMAT_INVALID,
audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE,
- uint32_t samplingRate = 0);
+ uint32_t samplingRate = 0,
+ audio_session_t sessionId = AUDIO_SESSION_NONE);
// samplingRate, format, channelMask are in/out and so may be modified
sp<IOProfile> getInputProfile(const sp<DeviceDescriptor> & device,
uint32_t& samplingRate,
@@ -938,7 +964,7 @@
sp<AudioPatch> *patchDescPtr);
bool areAllDevicesSupported(
- const Vector<AudioDeviceTypeAddr>& devices,
+ const AudioDeviceTypeAddrVector& devices,
std::function<bool(audio_devices_t)> predicate,
const char* context);
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
new file mode 100644
index 0000000..8a7a1b2
--- /dev/null
+++ b/services/audiopolicy/service/Android.bp
@@ -0,0 +1,55 @@
+cc_library_shared {
+ name: "libaudiopolicyservice",
+
+ srcs: [
+ "AudioPolicyClientImpl.cpp",
+ "AudioPolicyEffects.cpp",
+ "AudioPolicyInterfaceImpl.cpp",
+ "AudioPolicyService.cpp",
+ "CaptureStateNotifier.cpp",
+ ],
+
+ include_dirs: [
+ "frameworks/av/services/audioflinger"
+ ],
+
+ shared_libs: [
+ "libaudioclient",
+ "libaudiofoundation",
+ "libaudiopolicymanager",
+ "libaudioutils",
+ "libbinder",
+ "libcutils",
+ "libeffectsconfig",
+ "libhardware_legacy",
+ "liblog",
+ "libmedia_helper",
+ "libmediametrics",
+ "libmediautils",
+ "libsensorprivacy",
+ "libutils",
+ "capture_state_listener-aidl-cpp",
+ ],
+
+ static_libs: [
+ "libaudiopolicycomponents",
+ ],
+
+ header_libs: [
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ "libaudioutils_headers",
+ ],
+
+ cflags: [
+ "-fvisibility=hidden",
+ "-Werror",
+ "-Wall",
+ "-Wthread-safety",
+ ],
+
+ export_shared_lib_headers: [
+ "libsensorprivacy",
+ ],
+}
diff --git a/services/audiopolicy/service/Android.mk b/services/audiopolicy/service/Android.mk
deleted file mode 100644
index 680b077..0000000
--- a/services/audiopolicy/service/Android.mk
+++ /dev/null
@@ -1,50 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES:= \
- AudioPolicyService.cpp \
- AudioPolicyEffects.cpp \
- AudioPolicyInterfaceImpl.cpp \
- AudioPolicyClientImpl.cpp \
- CaptureStateNotifier.cpp
-
-LOCAL_C_INCLUDES := \
- frameworks/av/services/audioflinger \
- $(call include-path-for, audio-utils)
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiopolicycommon \
- libaudiopolicyengine_interface_headers \
- libaudiopolicymanager_interface_headers
-
-LOCAL_SHARED_LIBRARIES := \
- libcutils \
- libutils \
- liblog \
- libbinder \
- libaudioclient \
- libaudioutils \
- libaudiofoundation \
- libhardware_legacy \
- libaudiopolicymanager \
- libmedia_helper \
- libmediametrics \
- libmediautils \
- libeffectsconfig \
- libsensorprivacy \
- capture_state_listener-aidl-cpp
-
-LOCAL_EXPORT_SHARED_LIBRARY_HEADERS := \
- libsensorprivacy
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents
-
-LOCAL_MODULE:= libaudiopolicyservice
-
-LOCAL_CFLAGS += -fvisibility=hidden
-LOCAL_CFLAGS += -Wall -Werror -Wthread-safety
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index df27f6e..df8e4c5 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -244,11 +244,12 @@
uid = callingUid;
}
if (!mPackageManager.allowPlaybackCapture(uid)) {
- attr->flags |= AUDIO_FLAG_NO_MEDIA_PROJECTION;
+ attr->flags = static_cast<audio_flags_mask_t>(attr->flags | AUDIO_FLAG_NO_MEDIA_PROJECTION);
}
if (((attr->flags & (AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE)) != 0)
&& !bypassInterruptionPolicyAllowed(pid, uid)) {
- attr->flags &= ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE);
+ attr->flags = static_cast<audio_flags_mask_t>(
+ attr->flags & ~(AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY|AUDIO_FLAG_BYPASS_MUTE));
}
AutoCallerClear acc;
AudioPolicyInterface::output_type_t outputType;
@@ -572,8 +573,7 @@
}
// check calling permissions
- if (!(startRecording(client->opPackageName, client->pid, client->uid,
- client->attributes.source == AUDIO_SOURCE_HOTWORD)
+ if (!(startRecording(client->opPackageName, client->pid, client->uid)
|| client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
__func__, client->uid, client->pid);
@@ -661,8 +661,7 @@
client->active = false;
client->startTimeNs = 0;
updateUidStates_l();
- finishRecording(client->opPackageName, client->uid,
- client->attributes.source == AUDIO_SOURCE_HOTWORD);
+ finishRecording(client->opPackageName, client->uid);
}
return status;
@@ -688,8 +687,7 @@
updateUidStates_l();
// finish the recording app op
- finishRecording(client->opPackageName, client->uid,
- client->attributes.source == AUDIO_SOURCE_HOTWORD);
+ finishRecording(client->opPackageName, client->uid);
AutoCallerClear acc;
return mAudioPolicyManager->stopInput(portId);
}
@@ -1260,7 +1258,7 @@
}
status_t AudioPolicyService::setUidDeviceAffinities(uid_t uid,
- const Vector<AudioDeviceTypeAddr>& devices) {
+ const AudioDeviceTypeAddrVector& devices) {
Mutex::Autolock _l(mLock);
if(!modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
@@ -1285,7 +1283,7 @@
}
status_t AudioPolicyService::setUserIdDeviceAffinities(int userId,
- const Vector<AudioDeviceTypeAddr>& devices) {
+ const AudioDeviceTypeAddrVector& devices) {
Mutex::Autolock _l(mLock);
if(!modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
@@ -1497,33 +1495,36 @@
return mAudioPolicyManager->isCallScreenModeSupported();
}
-status_t AudioPolicyService::setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device)
+status_t AudioPolicyService::setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->setPreferredDeviceForStrategy(strategy, device);
+ return mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
}
-status_t AudioPolicyService::removePreferredDeviceForStrategy(product_strategy_t strategy)
+status_t AudioPolicyService::removeDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->removePreferredDeviceForStrategy(strategy);
+ return mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role);
}
-status_t AudioPolicyService::getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device)
+status_t AudioPolicyService::getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->getPreferredDeviceForStrategy(strategy, device);
+ return mAudioPolicyManager->getDevicesForRoleAndStrategy(strategy, role, devices);
}
status_t AudioPolicyService::registerSoundTriggerCaptureStateListener(
@@ -1534,4 +1535,55 @@
return NO_ERROR;
}
+status_t AudioPolicyService::setDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->setDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::addDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->addDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role, const AudioDeviceTypeAddrVector& devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->removeDevicesRoleForCapturePreset(audioSource, role, devices);
+}
+
+status_t AudioPolicyService::clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->clearDevicesRoleForCapturePreset(audioSource, role);
+}
+
+status_t AudioPolicyService::getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices)
+{
+ if (mAudioPolicyManager == nullptr) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices);
+}
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 869a963..0b218c2 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -226,19 +226,41 @@
virtual status_t registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration);
- virtual status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
+ virtual status_t setUidDeviceAffinities(uid_t uid, const AudioDeviceTypeAddrVector& devices);
virtual status_t removeUidDeviceAffinities(uid_t uid);
- virtual status_t setPreferredDeviceForStrategy(product_strategy_t strategy,
- const AudioDeviceTypeAddr &device);
+ virtual status_t setDevicesRoleForStrategy(product_strategy_t strategy,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
- virtual status_t removePreferredDeviceForStrategy(product_strategy_t strategy);
+ virtual status_t removeDevicesRoleForStrategy(product_strategy_t strategy, device_role_t role);
+ virtual status_t getDevicesForRoleAndStrategy(product_strategy_t strategy,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
- virtual status_t getPreferredDeviceForStrategy(product_strategy_t strategy,
- AudioDeviceTypeAddr &device);
- virtual status_t setUserIdDeviceAffinities(int userId, const Vector<AudioDeviceTypeAddr>& devices);
+ virtual status_t setDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t addDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ const AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t removeDevicesRoleForCapturePreset(
+ audio_source_t audioSource, device_role_t role,
+ const AudioDeviceTypeAddrVector& devices);
+
+ virtual status_t clearDevicesRoleForCapturePreset(audio_source_t audioSource,
+ device_role_t role);
+
+ virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
+ device_role_t role,
+ AudioDeviceTypeAddrVector &devices);
+
+ virtual status_t setUserIdDeviceAffinities(int userId,
+ const AudioDeviceTypeAddrVector& devices);
virtual status_t removeUserIdDeviceAffinities(int userId);
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index efdb241..ca03e1f 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -18,7 +18,10 @@
"libxml2",
],
- static_libs: ["libaudiopolicycomponents"],
+ static_libs: [
+ "libaudiopolicycomponents",
+ "libgmock"
+ ],
header_libs: [
"libaudiopolicycommon",
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index a0074bc..ca2164b 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -20,6 +20,7 @@
#include <unistd.h>
#include <gtest/gtest.h>
+#include <gmock/gmock.h>
#define LOG_TAG "APM_Test"
#include <Serializer.h>
@@ -36,6 +37,7 @@
#include "AudioPolicyTestManager.h"
using namespace android;
+using testing::UnorderedElementsAre;
TEST(AudioPolicyManagerTestInit, EngineFailure) {
AudioPolicyTestClient client;
@@ -87,7 +89,7 @@
void getOutputForAttr(
audio_port_handle_t *selectedDeviceId,
audio_format_t format,
- int channelMask,
+ audio_channel_mask_t channelMask,
int sampleRate,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
audio_io_handle_t *output = nullptr,
@@ -98,7 +100,7 @@
audio_unique_id_t riid,
audio_port_handle_t *selectedDeviceId,
audio_format_t format,
- int channelMask,
+ audio_channel_mask_t channelMask,
int sampleRate,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
audio_port_handle_t *portId = nullptr);
@@ -164,7 +166,7 @@
void AudioPolicyManagerTest::getOutputForAttr(
audio_port_handle_t *selectedDeviceId,
audio_format_t format,
- int channelMask,
+ audio_channel_mask_t channelMask,
int sampleRate,
audio_output_flags_t flags,
audio_io_handle_t *output,
@@ -194,7 +196,7 @@
audio_unique_id_t riid,
audio_port_handle_t *selectedDeviceId,
audio_format_t format,
- int channelMask,
+ audio_channel_mask_t channelMask,
int sampleRate,
audio_input_flags_t flags,
audio_port_handle_t *portId) {
@@ -707,7 +709,8 @@
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
audio_source_t source = AUDIO_SOURCE_REMOTE_SUBMIX;
- audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, 0, ""};
+ audio_attributes_t attr = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, AUDIO_FLAG_NONE, ""};
std::string tags = "addr=" + mMixAddress;
strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
getInputForAttr(attr, mTracker->getRiid(), &selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT,
@@ -757,9 +760,9 @@
AudioPolicyManagerTestDPPlaybackReRouting,
testing::Values(
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
- AUDIO_SOURCE_DEFAULT, 0, ""}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
)
);
@@ -768,47 +771,47 @@
AudioPolicyManagerTestDPPlaybackReRouting,
testing::Values(
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
- AUDIO_USAGE_ASSISTANCE_SONIFICATION,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VIRTUAL_SOURCE,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
- AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, "addr=remote_submix_media"}
)
);
@@ -817,41 +820,41 @@
AudioPolicyManagerTestDPPlaybackReRouting,
testing::Values(
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
AUDIO_USAGE_ASSISTANCE_SONIFICATION,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
- AUDIO_SOURCE_DEFAULT, 0, ""},
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_SPEECH, AUDIO_USAGE_ASSISTANT,
- AUDIO_SOURCE_DEFAULT, 0, ""}
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""}
)
);
@@ -892,7 +895,8 @@
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
audio_usage_t usage = AUDIO_USAGE_VIRTUAL_SOURCE;
- audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, usage, AUDIO_SOURCE_DEFAULT, 0, ""};
+ audio_attributes_t attr =
+ {AUDIO_CONTENT_TYPE_UNKNOWN, usage, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
std::string tags = std::string("addr=") + mMixAddress;
strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
@@ -941,17 +945,19 @@
AudioPolicyManagerTestDPMixRecordInjection,
testing::Values(
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_CAMCORDER, 0, ""},
+ AUDIO_SOURCE_CAMCORDER, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_CAMCORDER, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_CAMCORDER, AUDIO_FLAG_NONE,
+ "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_MIC, 0, "addr=remote_submix_media"},
+ AUDIO_SOURCE_MIC, AUDIO_FLAG_NONE,
+ "addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_MIC, 0, ""},
+ AUDIO_SOURCE_MIC, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_VOICE_COMMUNICATION, 0, ""},
+ AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_VOICE_COMMUNICATION, 0,
+ AUDIO_SOURCE_VOICE_COMMUNICATION, AUDIO_FLAG_NONE,
"addr=remote_submix_media"}
)
);
@@ -962,14 +968,15 @@
AudioPolicyManagerTestDPMixRecordInjection,
testing::Values(
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_VOICE_RECOGNITION, 0, ""},
+ AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_HOTWORD, 0, ""},
+ AUDIO_SOURCE_HOTWORD, AUDIO_FLAG_NONE, ""},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_VOICE_RECOGNITION, 0,
+ AUDIO_SOURCE_VOICE_RECOGNITION, AUDIO_FLAG_NONE,
"addr=remote_submix_media"},
(audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
- AUDIO_SOURCE_HOTWORD, 0, "addr=remote_submix_media"}
+ AUDIO_SOURCE_HOTWORD, AUDIO_FLAG_NONE,
+ "addr=remote_submix_media"}
)
);
@@ -1188,3 +1195,109 @@
EXPECT_GT(mClient->getAudioPortListUpdateCount(), prevAudioPortListUpdateCount);
EXPECT_GT(mManager->getAudioPortGeneration(), prevAudioPortGeneration);
}
+
+using DevicesRoleForCapturePresetParam = std::tuple<audio_source_t, device_role_t>;
+
+class AudioPolicyManagerDevicesRoleForCapturePresetTest
+ : public AudioPolicyManagerTestWithConfigurationFile,
+ public testing::WithParamInterface<DevicesRoleForCapturePresetParam> {
+protected:
+ // The `inputDevice` and `inputDevice2` indicate the audio devices type to be used for setting
+ // device role. They must be declared in the test_audio_policy_configuration.xml
+ AudioDeviceTypeAddr inputDevice = AudioDeviceTypeAddr(AUDIO_DEVICE_IN_BUILTIN_MIC, "");
+ AudioDeviceTypeAddr inputDevice2 = AudioDeviceTypeAddr(AUDIO_DEVICE_IN_HDMI, "");
+};
+
+TEST_P(AudioPolicyManagerDevicesRoleForCapturePresetTest, DevicesRoleForCapturePreset) {
+ const audio_source_t audioSource = std::get<0>(GetParam());
+ const device_role_t role = std::get<1>(GetParam());
+
+ // Test invalid device when setting
+ const AudioDeviceTypeAddr outputDevice(AUDIO_DEVICE_OUT_SPEAKER, "");
+ const AudioDeviceTypeAddrVector outputDevices = {outputDevice};
+ ASSERT_EQ(BAD_VALUE,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ ASSERT_EQ(BAD_VALUE,
+ mManager->addDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+ AudioDeviceTypeAddrVector devices;
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ ASSERT_TRUE(devices.empty());
+ ASSERT_EQ(BAD_VALUE,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, outputDevices));
+
+ // Without setting, call get/remove/clear must fail
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, devices));
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->clearDevicesRoleForCapturePreset(audioSource, role));
+
+ // Test set/get devices role
+ const AudioDeviceTypeAddrVector inputDevices = {inputDevice};
+ ASSERT_EQ(NO_ERROR,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice));
+
+ // Test setting will change the previously set devices
+ const AudioDeviceTypeAddrVector inputDevices2 = {inputDevice2};
+ ASSERT_EQ(NO_ERROR,
+ mManager->setDevicesRoleForCapturePreset(audioSource, role, inputDevices2));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice2));
+
+ // Test add devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->addDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice, inputDevice2));
+
+ // Test remove devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+ devices.clear();
+ ASSERT_EQ(NO_ERROR, mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+ EXPECT_THAT(devices, UnorderedElementsAre(inputDevice2));
+
+ // Test remove devices that are not set as the device role
+ ASSERT_EQ(BAD_VALUE,
+ mManager->removeDevicesRoleForCapturePreset(audioSource, role, inputDevices));
+
+ // Test clear devices
+ ASSERT_EQ(NO_ERROR,
+ mManager->clearDevicesRoleForCapturePreset(audioSource, role));
+ devices.clear();
+ ASSERT_EQ(NAME_NOT_FOUND,
+ mManager->getDevicesForRoleAndCapturePreset(audioSource, role, devices));
+}
+
+INSTANTIATE_TEST_CASE_P(
+ DevicesRoleForCapturePresetOperation,
+ AudioPolicyManagerDevicesRoleForCapturePresetTest,
+ testing::Values(
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_MIC, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_UPLINK,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_DOWNLINK,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_CALL, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_CAMCORDER, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_RECOGNITION,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_COMMUNICATION,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_REMOTE_SUBMIX,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_UNPROCESSED, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_VOICE_PERFORMANCE,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_ECHO_REFERENCE,
+ DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_FM_TUNER, DEVICE_ROLE_PREFERRED}),
+ DevicesRoleForCapturePresetParam({AUDIO_SOURCE_HOTWORD, DEVICE_ROLE_PREFERRED})
+ )
+ );
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 501d922..4a36865 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -30,7 +30,6 @@
"common/CameraProviderManager.cpp",
"common/DepthPhotoProcessor.cpp",
"common/FrameProcessorBase.cpp",
- "api1/CameraClient.cpp",
"api1/Camera2Client.cpp",
"api1/client2/Parameters.cpp",
"api1/client2/FrameProcessor.cpp",
@@ -46,7 +45,6 @@
"api2/DepthCompositeStream.cpp",
"api2/HeicEncoderInfoManager.cpp",
"api2/HeicCompositeStream.cpp",
- "device1/CameraHardwareInterface.cpp",
"device3/BufferUtils.cpp",
"device3/Camera3Device.cpp",
"device3/Camera3OfflineSession.cpp",
@@ -54,7 +52,7 @@
"device3/Camera3IOStreamBase.cpp",
"device3/Camera3InputStream.cpp",
"device3/Camera3OutputStream.cpp",
- "device3/Camera3DummyStream.cpp",
+ "device3/Camera3FakeStream.cpp",
"device3/Camera3SharedOutputStream.cpp",
"device3/StatusTracker.cpp",
"device3/Camera3BufferManager.cpp",
@@ -122,7 +120,6 @@
"android.hardware.camera.provider@2.4",
"android.hardware.camera.provider@2.5",
"android.hardware.camera.provider@2.6",
- "android.hardware.camera.device@1.0",
"android.hardware.camera.device@3.2",
"android.hardware.camera.device@3.3",
"android.hardware.camera.device@3.4",
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index e629cdd..ccdd9e5 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -59,9 +59,8 @@
if (mProviderManager->supportSetTorchMode(cameraId.string())) {
mFlashControl = new ProviderFlashControl(mProviderManager);
} else {
- // Only HAL1 devices do not support setTorchMode
- mFlashControl =
- new CameraHardwareInterfaceFlashControl(mProviderManager, mCallbacks);
+ ALOGE("Flashlight control not supported by this device!");
+ return NO_INIT;
}
return OK;
@@ -309,271 +308,4 @@
}
// ProviderFlashControl implementation ends
-/////////////////////////////////////////////////////////////////////
-// CameraHardwareInterfaceFlashControl implementation begins
-// Flash control for camera module <= v2.3 and camera HAL v1
-/////////////////////////////////////////////////////////////////////
-
-CameraHardwareInterfaceFlashControl::CameraHardwareInterfaceFlashControl(
- sp<CameraProviderManager> manager,
- CameraProviderManager::StatusListener* callbacks) :
- mProviderManager(manager),
- mCallbacks(callbacks),
- mTorchEnabled(false) {
-}
-
-CameraHardwareInterfaceFlashControl::~CameraHardwareInterfaceFlashControl() {
- disconnectCameraDevice();
-
- mSurface.clear();
- mSurfaceTexture.clear();
- mProducer.clear();
- mConsumer.clear();
-
- if (mTorchEnabled) {
- if (mCallbacks) {
- ALOGV("%s: notify the framework that torch was turned off",
- __FUNCTION__);
- mCallbacks->onTorchStatusChanged(mCameraId, TorchModeStatus::AVAILABLE_OFF);
- }
- }
-}
-
-status_t CameraHardwareInterfaceFlashControl::setTorchMode(
- const String8& cameraId, bool enabled) {
- Mutex::Autolock l(mLock);
-
- // pre-check
- status_t res;
- if (enabled) {
- bool hasFlash = false;
- // Check if it has a flash unit and leave camera device open.
- res = hasFlashUnitLocked(cameraId, &hasFlash, /*keepDeviceOpen*/true);
- // invalid camera?
- if (res) {
- // hasFlashUnitLocked() returns BAD_INDEX if mDevice is connected to
- // another camera device.
- return res == BAD_INDEX ? BAD_INDEX : -EINVAL;
- }
- // no flash unit?
- if (!hasFlash) {
- // Disconnect camera device if it has no flash.
- disconnectCameraDevice();
- return -ENOSYS;
- }
- } else if (mDevice == NULL || cameraId != mCameraId) {
- // disabling the torch mode of an un-opened or different device.
- return OK;
- } else {
- // disabling the torch mode of currently opened device
- disconnectCameraDevice();
- mTorchEnabled = false;
- mCallbacks->onTorchStatusChanged(cameraId, TorchModeStatus::AVAILABLE_OFF);
- return OK;
- }
-
- res = startPreviewAndTorch();
- if (res) {
- return res;
- }
-
- mTorchEnabled = true;
- mCallbacks->onTorchStatusChanged(cameraId, TorchModeStatus::AVAILABLE_ON);
- return OK;
-}
-
-status_t CameraHardwareInterfaceFlashControl::hasFlashUnit(
- const String8& cameraId, bool *hasFlash) {
- Mutex::Autolock l(mLock);
- // Close device after checking if it has a flash unit.
- return hasFlashUnitLocked(cameraId, hasFlash, /*keepDeviceOpen*/false);
-}
-
-status_t CameraHardwareInterfaceFlashControl::hasFlashUnitLocked(
- const String8& cameraId, bool *hasFlash, bool keepDeviceOpen) {
- bool closeCameraDevice = false;
-
- if (!hasFlash) {
- return BAD_VALUE;
- }
-
- status_t res;
- if (mDevice == NULL) {
- // Connect to camera device to query if it has a flash unit.
- res = connectCameraDevice(cameraId);
- if (res) {
- return res;
- }
- // Close camera device only when it is just opened and the caller doesn't want to keep
- // the camera device open.
- closeCameraDevice = !keepDeviceOpen;
- }
-
- if (cameraId != mCameraId) {
- return BAD_INDEX;
- }
-
- const char *flashMode =
- mParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
- if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
- *hasFlash = true;
- } else {
- *hasFlash = false;
- }
-
- if (closeCameraDevice) {
- res = disconnectCameraDevice();
- if (res != OK) {
- ALOGE("%s: Failed to disconnect camera device. %s (%d)", __FUNCTION__,
- strerror(-res), res);
- return res;
- }
- }
-
- return OK;
-}
-
-status_t CameraHardwareInterfaceFlashControl::startPreviewAndTorch() {
- status_t res = OK;
- res = mDevice->startPreview();
- if (res) {
- ALOGE("%s: start preview failed. %s (%d)", __FUNCTION__,
- strerror(-res), res);
- return res;
- }
-
- mParameters.set(CameraParameters::KEY_FLASH_MODE,
- CameraParameters::FLASH_MODE_TORCH);
-
- return mDevice->setParameters(mParameters);
-}
-
-status_t CameraHardwareInterfaceFlashControl::getSmallestSurfaceSize(
- int32_t *width, int32_t *height) {
- if (!width || !height) {
- return BAD_VALUE;
- }
-
- int32_t w = INT32_MAX;
- int32_t h = 1;
- Vector<Size> sizes;
-
- mParameters.getSupportedPreviewSizes(sizes);
- for (size_t i = 0; i < sizes.size(); i++) {
- Size s = sizes[i];
- if (w * h > s.width * s.height) {
- w = s.width;
- h = s.height;
- }
- }
-
- if (w == INT32_MAX) {
- return NAME_NOT_FOUND;
- }
-
- *width = w;
- *height = h;
-
- return OK;
-}
-
-status_t CameraHardwareInterfaceFlashControl::initializePreviewWindow(
- const sp<CameraHardwareInterface>& device, int32_t width, int32_t height) {
- status_t res;
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
-
- mSurfaceTexture = new GLConsumer(mConsumer, 0, GLConsumer::TEXTURE_EXTERNAL,
- true, true);
- if (mSurfaceTexture == NULL) {
- return NO_MEMORY;
- }
-
- int32_t format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- res = mSurfaceTexture->setDefaultBufferSize(width, height);
- if (res) {
- return res;
- }
- res = mSurfaceTexture->setDefaultBufferFormat(format);
- if (res) {
- return res;
- }
-
- mSurface = new Surface(mProducer, /*useAsync*/ true);
- if (mSurface == NULL) {
- return NO_MEMORY;
- }
-
- res = native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_CAMERA);
- if (res) {
- ALOGE("%s: Unable to connect to native window", __FUNCTION__);
- return res;
- }
-
- return device->setPreviewWindow(mSurface);
-}
-
-status_t CameraHardwareInterfaceFlashControl::connectCameraDevice(
- const String8& cameraId) {
- sp<CameraHardwareInterface> device =
- new CameraHardwareInterface(cameraId.string());
-
- status_t res = device->initialize(mProviderManager);
- if (res) {
- ALOGE("%s: initializing camera %s failed", __FUNCTION__,
- cameraId.string());
- return res;
- }
-
- // need to set __get_memory in set_callbacks().
- device->setCallbacks(NULL, NULL, NULL, NULL, NULL);
-
- mParameters = device->getParameters();
-
- int32_t width, height;
- res = getSmallestSurfaceSize(&width, &height);
- if (res) {
- ALOGE("%s: failed to get smallest surface size for camera %s",
- __FUNCTION__, cameraId.string());
- return res;
- }
-
- res = initializePreviewWindow(device, width, height);
- if (res) {
- ALOGE("%s: failed to initialize preview window for camera %s",
- __FUNCTION__, cameraId.string());
- return res;
- }
-
- mCameraId = cameraId;
- mDevice = device;
- return OK;
-}
-
-status_t CameraHardwareInterfaceFlashControl::disconnectCameraDevice() {
- if (mDevice == NULL) {
- return OK;
- }
-
- if (mParameters.get(CameraParameters::KEY_FLASH_MODE)) {
- // There is a flash, turn if off.
- // (If there isn't one, leave the parameter null)
- mParameters.set(CameraParameters::KEY_FLASH_MODE,
- CameraParameters::FLASH_MODE_OFF);
- mDevice->setParameters(mParameters);
- }
- mDevice->stopPreview();
- status_t res = native_window_api_disconnect(mSurface.get(),
- NATIVE_WINDOW_API_CAMERA);
- if (res) {
- ALOGW("%s: native_window_api_disconnect failed: %s (%d)",
- __FUNCTION__, strerror(-res), res);
- }
- mDevice->setPreviewWindow(NULL);
- mDevice->release();
- mDevice = NULL;
-
- return OK;
-}
-// CameraHardwareInterfaceFlashControl implementation ends
-
}
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
index 1baaba2..b97fa5f 100644
--- a/services/camera/libcameraservice/CameraFlashlight.h
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -23,8 +23,6 @@
#include <utils/SortedVector.h>
#include "common/CameraProviderManager.h"
#include "common/CameraDeviceBase.h"
-#include "device1/CameraHardwareInterface.h"
-
namespace android {
@@ -124,59 +122,6 @@
Mutex mLock;
};
-/**
- * Flash control for camera module <= v2.3 and camera HAL v1
- */
-class CameraHardwareInterfaceFlashControl : public FlashControlBase {
- public:
- CameraHardwareInterfaceFlashControl(
- sp<CameraProviderManager> manager,
- CameraProviderManager::StatusListener* callbacks);
- virtual ~CameraHardwareInterfaceFlashControl();
-
- // FlashControlBase
- status_t setTorchMode(const String8& cameraId, bool enabled);
- status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
-
- private:
- // connect to a camera device
- status_t connectCameraDevice(const String8& cameraId);
-
- // disconnect and free mDevice
- status_t disconnectCameraDevice();
-
- // initialize the preview window
- status_t initializePreviewWindow(const sp<CameraHardwareInterface>& device,
- int32_t width, int32_t height);
-
- // start preview and enable torch
- status_t startPreviewAndTorch();
-
- // get the smallest surface
- status_t getSmallestSurfaceSize(int32_t *width, int32_t *height);
-
- // protected by mLock
- // If this function opens camera device in order to check if it has a flash unit, the
- // camera device will remain open if keepDeviceOpen is true and the camera device will be
- // closed if keepDeviceOpen is false. If camera device is already open when calling this
- // function, keepDeviceOpen is ignored.
- status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash, bool keepDeviceOpen);
-
- sp<CameraProviderManager> mProviderManager;
- CameraProviderManager::StatusListener* mCallbacks;
- sp<CameraHardwareInterface> mDevice;
- String8 mCameraId;
- CameraParameters mParameters;
- bool mTorchEnabled;
-
- sp<IGraphicBufferProducer> mProducer;
- sp<IGraphicBufferConsumer> mConsumer;
- sp<GLConsumer> mSurfaceTexture;
- sp<Surface> mSurface;
-
- Mutex mLock;
-};
-
} // namespace android
#endif
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 3d9998a..138e429 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -70,7 +70,6 @@
#include <system/camera.h>
#include "CameraService.h"
-#include "api1/CameraClient.h"
#include "api1/Camera2Client.h"
#include "api2/CameraDeviceClient.h"
#include "utils/CameraTraces.h"
@@ -679,9 +678,15 @@
status_t res = mCameraProviderManager->getCameraCharacteristics(
String8(cameraId).string(), cameraInfo);
if (res != OK) {
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera "
- "characteristics for device %s: %s (%d)", String8(cameraId).string(),
- strerror(-res), res);
+ if (res == NAME_NOT_FOUND) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Unable to retrieve camera "
+ "characteristics for unknown device %s: %s (%d)", String8(cameraId).string(),
+ strerror(-res), res);
+ } else {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera "
+ "characteristics for device %s: %s (%d)", String8(cameraId).string(),
+ strerror(-res), res);
+ }
}
SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
if (getSystemCameraKind(String8(cameraId), &deviceKind) != OK) {
@@ -802,35 +807,26 @@
Status CameraService::makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
- const std::optional<String16>& featureId, const String8& cameraId, int api1CameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid, int halVersion,
+ const std::optional<String16>& featureId, const String8& cameraId,
+ int api1CameraId, int facing, int clientPid, uid_t clientUid, int servicePid,
int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
- if (halVersion < 0 || halVersion == deviceVersion) {
- // Default path: HAL version is unspecified by caller, create CameraClient
- // based on device version reported by the HAL.
- switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- if (effectiveApiLevel == API_1) { // Camera1 API route
- sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, featureId,
- api1CameraId, facing, clientPid, clientUid,
- getpid());
- } else { // Camera2 API route
- ALOGW("Camera using old HAL version: %d", deviceVersion);
- return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
- "Camera device \"%s\" HAL version %d does not support camera2 API",
- cameraId.string(), deviceVersion);
- }
+ // Create CameraClient based on device version reported by the HAL.
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ ALOGE("Camera using old HAL version: %d", deviceVersion);
+ return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
+ "Camera device \"%s\" HAL version %d no longer supported",
+ cameraId.string(), deviceVersion);
break;
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- case CAMERA_DEVICE_API_VERSION_3_4:
- case CAMERA_DEVICE_API_VERSION_3_5:
- case CAMERA_DEVICE_API_VERSION_3_6:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ case CAMERA_DEVICE_API_VERSION_3_2:
+ case CAMERA_DEVICE_API_VERSION_3_3:
+ case CAMERA_DEVICE_API_VERSION_3_4:
+ case CAMERA_DEVICE_API_VERSION_3_5:
+ case CAMERA_DEVICE_API_VERSION_3_6:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
*client = new Camera2Client(cameraService, tmp, packageName, featureId,
@@ -844,32 +840,12 @@
cameraId, facing, clientPid, clientUid, servicePid);
}
break;
- default:
+ default:
// Should not be reachable
ALOGE("Unknown camera device HAL version: %d", deviceVersion);
return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
"Camera device \"%s\" has unknown HAL version %d",
cameraId.string(), deviceVersion);
- }
- } else {
- // A particular HAL version is requested by caller. Create CameraClient
- // based on the requested HAL version.
- if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
- halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
- // Only support higher HAL version device opened as HAL1.0 device.
- sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, featureId,
- api1CameraId, facing, clientPid, clientUid,
- servicePid);
- } else {
- // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
- ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
- " opened as HAL %x device", halVersion, deviceVersion,
- CAMERA_DEVICE_API_VERSION_1_0);
- return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
- "Camera device \"%s\" (HAL version %d) cannot be opened as HAL version %d",
- cameraId.string(), deviceVersion, halVersion);
- }
}
return Status::ok();
}
@@ -957,7 +933,6 @@
sp<Client> tmp = nullptr;
if (!(ret = connectHelper<ICameraClient,Client>(
sp<ICameraClient>{nullptr}, id, cameraId,
- static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
internalPackageName, {}, uid, USE_CALLING_PID,
API_1, /*shimUpdateOnly*/ true, /*out*/ tmp)
).isOk()) {
@@ -1476,34 +1451,7 @@
String8 id = cameraIdIntToStr(api1CameraId);
sp<Client> client = nullptr;
ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId,
- CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, {},
- clientUid, clientPid, API_1, /*shimUpdateOnly*/ false, /*out*/client);
-
- if(!ret.isOk()) {
- logRejected(id, CameraThreadState::getCallingPid(), String8(clientPackageName),
- ret.toString8());
- return ret;
- }
-
- *device = client;
- return ret;
-}
-
-Status CameraService::connectLegacy(
- const sp<ICameraClient>& cameraClient,
- int api1CameraId, int halVersion,
- const String16& clientPackageName,
- int clientUid,
- /*out*/
- sp<ICamera>* device) {
-
- ATRACE_CALL();
- String8 id = cameraIdIntToStr(api1CameraId);
-
- Status ret = Status::ok();
- sp<Client> client = nullptr;
- ret = connectHelper<ICameraClient,Client>(cameraClient, id, api1CameraId, halVersion,
- clientPackageName, {}, clientUid, USE_CALLING_PID, API_1,
+ clientPackageName, {}, clientUid, clientPid, API_1,
/*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
@@ -1545,8 +1493,9 @@
int cUid = CameraThreadState::getCallingUid();
SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
if (getSystemCameraKind(cameraId, &systemCameraKind) != OK) {
- ALOGE("%s: Invalid camera id %s, ", __FUNCTION__, cameraId.c_str());
- return true;
+ // This isn't a known camera ID, so it's not a system camera
+ ALOGV("%s: Unknown camera id %s, ", __FUNCTION__, cameraId.c_str());
+ return false;
}
// (1) Cameraserver trying to connect, accept.
@@ -1595,8 +1544,7 @@
clientPackageNameAdj = String16(vendorClient.c_str());
}
ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
- /*api1CameraId*/-1,
- CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageNameAdj, clientFeatureId,
+ /*api1CameraId*/-1, clientPackageNameAdj, clientFeatureId,
clientUid, USE_CALLING_PID, API_2, /*shimUpdateOnly*/ false, /*out*/client);
if(!ret.isOk()) {
@@ -1611,7 +1559,7 @@
template<class CALLBACK, class CLIENT>
Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int api1CameraId, int halVersion, const String16& clientPackageName,
+ int api1CameraId, const String16& clientPackageName,
const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device) {
@@ -1621,9 +1569,8 @@
int originalClientPid = 0;
- ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
+ ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) and "
"Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
- (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
static_cast<int>(effectiveApiLevel));
sp<CLIENT> client = nullptr;
@@ -1703,7 +1650,7 @@
if(!(ret = makeClient(this, cameraCb, clientPackageName, clientFeatureId,
cameraId, api1CameraId, facing,
clientPid, clientUid, getpid(),
- halVersion, deviceVersion, effectiveApiLevel,
+ deviceVersion, effectiveApiLevel,
/*out*/&tmp)).isOk()) {
return ret;
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 6771718..6f37e9f 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -71,7 +71,6 @@
public virtual CameraProviderManager::StatusListener
{
friend class BinderService<CameraService>;
- friend class CameraClient;
friend class CameraOfflineSessionClient;
public:
class Client;
@@ -135,12 +134,6 @@
/*out*/
sp<hardware::ICamera>* device);
- virtual binder::Status connectLegacy(const sp<hardware::ICameraClient>& cameraClient,
- int32_t cameraId, int32_t halVersion,
- const String16& clientPackageName, int32_t clientUid,
- /*out*/
- sp<hardware::ICamera>* device);
-
virtual binder::Status connectDevice(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, const String16& cameraId,
const String16& clientPackageName, const std::optional<String16>& clientFeatureId,
@@ -729,7 +722,7 @@
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int api1CameraId, int halVersion, const String16& clientPackageName,
+ int api1CameraId, const String16& clientPackageName,
const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool shimUpdateOnly, /*out*/sp<CLIENT>& device);
@@ -1065,7 +1058,7 @@
static binder::Status makeClient(const sp<CameraService>& cameraService,
const sp<IInterface>& cameraCb, const String16& packageName,
const std::optional<String16>& featureId, const String8& cameraId, int api1CameraId,
- int facing, int clientPid, uid_t clientUid, int servicePid, int halVersion,
+ int facing, int clientPid, uid_t clientUid, int servicePid,
int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
diff --git a/services/camera/libcameraservice/TEST_MAPPING b/services/camera/libcameraservice/TEST_MAPPING
index 6fdac68..ca6cc58 100644
--- a/services/camera/libcameraservice/TEST_MAPPING
+++ b/services/camera/libcameraservice/TEST_MAPPING
@@ -1,7 +1,12 @@
{
"presubmit": [
{
- "name": "cameraservice_test"
+ "name": "cameraservice_test"
+ }
+ ],
+ "imports": [
+ {
+ "path": "frameworks/av/camera"
}
]
}
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
deleted file mode 100644
index b860ceb..0000000
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ /dev/null
@@ -1,1208 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "CameraClient"
-//#define LOG_NDEBUG 0
-
-#include <cutils/atomic.h>
-#include <cutils/properties.h>
-#include <gui/Surface.h>
-#include <media/hardware/HardwareAPI.h>
-
-#include "api1/CameraClient.h"
-#include "device1/CameraHardwareInterface.h"
-#include "CameraService.h"
-#include "utils/CameraThreadState.h"
-
-namespace android {
-
-#define LOG1(...) ALOGD_IF(gLogLevel >= 1, __VA_ARGS__);
-#define LOG2(...) ALOGD_IF(gLogLevel >= 2, __VA_ARGS__);
-
-CameraClient::CameraClient(const sp<CameraService>& cameraService,
- const sp<hardware::ICameraClient>& cameraClient,
- const String16& clientPackageName, const std::optional<String16>& clientFeatureId,
- int cameraId, int cameraFacing,
- int clientPid, int clientUid,
- int servicePid):
- Client(cameraService, cameraClient, clientPackageName, clientFeatureId,
- String8::format("%d", cameraId), cameraId, cameraFacing, clientPid,
- clientUid, servicePid)
-{
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("CameraClient::CameraClient E (pid %d, id %d)", callingPid, cameraId);
-
- mHardware = NULL;
- mMsgEnabled = 0;
- mSurface = 0;
- mPreviewWindow = 0;
- mDestructionStarted = false;
-
- // Callback is disabled by default
- mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
- mOrientation = getOrientation(0, mCameraFacing == CAMERA_FACING_FRONT);
- mPlayShutterSound = true;
- LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
-}
-
-status_t CameraClient::initialize(sp<CameraProviderManager> manager,
- const String8& /*monitorTags*/) {
- int callingPid = CameraThreadState::getCallingPid();
- status_t res;
-
- LOG1("CameraClient::initialize E (pid %d, id %d)", callingPid, mCameraId);
-
- // Verify ops permissions
- res = startCameraOps();
- if (res != OK) {
- return res;
- }
-
- char camera_device_name[10];
- snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
-
- mHardware = new CameraHardwareInterface(camera_device_name);
- res = mHardware->initialize(manager);
- if (res != OK) {
- ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
- mHardware.clear();
- return res;
- }
-
- mHardware->setCallbacks(notifyCallback,
- dataCallback,
- dataCallbackTimestamp,
- handleCallbackTimestampBatch,
- (void *)(uintptr_t)mCameraId);
-
- // Enable zoom, error, focus, and metadata messages by default
- enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
- CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);
-
- LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
- return OK;
-}
-
-
-// tear down the client
-CameraClient::~CameraClient() {
- mDestructionStarted = true;
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("CameraClient::~CameraClient E (pid %d, this %p)", callingPid, this);
-
- disconnect();
- LOG1("CameraClient::~CameraClient X (pid %d, this %p)", callingPid, this);
-}
-
-status_t CameraClient::dump(int fd, const Vector<String16>& args) {
- return BasicClient::dump(fd, args);
-}
-
-status_t CameraClient::dumpClient(int fd, const Vector<String16>& args) {
- const size_t SIZE = 256;
- char buffer[SIZE];
-
- size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) with UID %d\n",
- mCameraId,
- (getRemoteCallback() != NULL ?
- IInterface::asBinder(getRemoteCallback()).get() : NULL),
- mClientUid);
- len = (len > SIZE - 1) ? SIZE - 1 : len;
- write(fd, buffer, len);
-
- len = snprintf(buffer, SIZE, "Latest set parameters:\n");
- len = (len > SIZE - 1) ? SIZE - 1 : len;
- write(fd, buffer, len);
-
- mLatestSetParameters.dump(fd, args);
-
- const char *enddump = "\n\n";
- write(fd, enddump, strlen(enddump));
-
- sp<CameraHardwareInterface> hardware = mHardware;
- if (hardware != nullptr) {
- return hardware->dump(fd, args);
- }
- ALOGI("%s: camera device closed already, skip dumping", __FUNCTION__);
- return OK;
-}
-
-// ----------------------------------------------------------------------------
-
-status_t CameraClient::checkPid() const {
- int callingPid = CameraThreadState::getCallingPid();
- if (callingPid == mClientPid) return NO_ERROR;
-
- ALOGW("attempt to use a locked camera from a different process"
- " (old pid %d, new pid %d)", mClientPid, callingPid);
- return EBUSY;
-}
-
-status_t CameraClient::checkPidAndHardware() const {
- if (mHardware == 0) {
- ALOGE("attempt to use a camera after disconnect() (pid %d)",
- CameraThreadState::getCallingPid());
- return INVALID_OPERATION;
- }
- status_t result = checkPid();
- if (result != NO_ERROR) return result;
- return NO_ERROR;
-}
-
-status_t CameraClient::lock() {
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("lock (pid %d)", callingPid);
- Mutex::Autolock lock(mLock);
-
- // lock camera to this client if the the camera is unlocked
- if (mClientPid == 0) {
- mClientPid = callingPid;
- return NO_ERROR;
- }
-
- // returns NO_ERROR if the client already owns the camera, EBUSY otherwise
- return checkPid();
-}
-
-status_t CameraClient::unlock() {
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("unlock (pid %d)", callingPid);
- Mutex::Autolock lock(mLock);
-
- // allow anyone to use camera (after they lock the camera)
- status_t result = checkPid();
- if (result == NO_ERROR) {
- if (mHardware->recordingEnabled()) {
- ALOGE("Not allowed to unlock camera during recording.");
- return INVALID_OPERATION;
- }
- mClientPid = 0;
- LOG1("clear mRemoteCallback (pid %d)", callingPid);
- // we need to remove the reference to ICameraClient so that when the app
- // goes away, the reference count goes to 0.
- mRemoteCallback.clear();
- }
- return result;
-}
-
-// connect a new client to the camera
-status_t CameraClient::connect(const sp<hardware::ICameraClient>& client) {
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("connect E (pid %d)", callingPid);
- Mutex::Autolock lock(mLock);
-
- if (mClientPid != 0 && checkPid() != NO_ERROR) {
- ALOGW("Tried to connect to a locked camera (old pid %d, new pid %d)",
- mClientPid, callingPid);
- return EBUSY;
- }
-
- if (mRemoteCallback != 0 &&
- (IInterface::asBinder(client) == IInterface::asBinder(mRemoteCallback))) {
- LOG1("Connect to the same client");
- return NO_ERROR;
- }
-
- mPreviewCallbackFlag = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
- mClientPid = callingPid;
- mRemoteCallback = client;
-
- LOG1("connect X (pid %d)", callingPid);
- return NO_ERROR;
-}
-
-static void disconnectWindow(const sp<ANativeWindow>& window) {
- if (window != 0) {
- status_t result = native_window_api_disconnect(window.get(),
- NATIVE_WINDOW_API_CAMERA);
- if (result != NO_ERROR) {
- ALOGW("native_window_api_disconnect failed: %s (%d)", strerror(-result),
- result);
- }
- }
-}
-
-binder::Status CameraClient::disconnect() {
- int callingPid = CameraThreadState::getCallingPid();
- LOG1("disconnect E (pid %d)", callingPid);
- Mutex::Autolock lock(mLock);
-
- binder::Status res = binder::Status::ok();
- // Allow both client and the cameraserver to disconnect at all times
- if (callingPid != mClientPid && callingPid != mServicePid) {
- ALOGW("different client - don't disconnect");
- return res;
- }
-
- // Make sure disconnect() is done once and once only, whether it is called
- // from the user directly, or called by the destructor.
- if (mHardware == 0) return res;
-
- LOG1("hardware teardown");
- // Before destroying mHardware, we must make sure it's in the
- // idle state.
- // Turn off all messages.
- disableMsgType(CAMERA_MSG_ALL_MSGS);
- mHardware->stopPreview();
- sCameraService->updateProxyDeviceState(
- hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName,
- hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
- mHardware->cancelPicture();
- // Release the hardware resources.
- mHardware->release();
-
- // Release the held ANativeWindow resources.
- if (mPreviewWindow != 0) {
- disconnectWindow(mPreviewWindow);
- mPreviewWindow = 0;
- mHardware->setPreviewWindow(mPreviewWindow);
- }
- mHardware.clear();
-
- CameraService::Client::disconnect();
-
- LOG1("disconnect X (pid %d)", callingPid);
-
- return res;
-}
-
-// ----------------------------------------------------------------------------
-
-status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder,
- const sp<ANativeWindow>& window) {
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- // return if no change in surface.
- if (binder == mSurface) {
- return NO_ERROR;
- }
-
- if (window != 0) {
- result = native_window_api_connect(window.get(), NATIVE_WINDOW_API_CAMERA);
- if (result != NO_ERROR) {
- ALOGE("native_window_api_connect failed: %s (%d)", strerror(-result),
- result);
- return result;
- }
- }
-
- // If preview has been already started, register preview buffers now.
- if (mHardware->previewEnabled()) {
- if (window != 0) {
- mHardware->setPreviewScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- mHardware->setPreviewTransform(mOrientation);
- result = mHardware->setPreviewWindow(window);
- }
- }
-
- if (result == NO_ERROR) {
- // Everything has succeeded. Disconnect the old window and remember the
- // new window.
- disconnectWindow(mPreviewWindow);
- mSurface = binder;
- mPreviewWindow = window;
- } else {
- // Something went wrong after we connected to the new window, so
- // disconnect here.
- disconnectWindow(window);
- }
-
- return result;
-}
-
-// set the buffer consumer that the preview will use
-status_t CameraClient::setPreviewTarget(
- const sp<IGraphicBufferProducer>& bufferProducer) {
- LOG1("setPreviewTarget(%p) (pid %d)", bufferProducer.get(),
- CameraThreadState::getCallingPid());
-
- sp<IBinder> binder;
- sp<ANativeWindow> window;
- if (bufferProducer != 0) {
- binder = IInterface::asBinder(bufferProducer);
- // Using controlledByApp flag to ensure that the buffer queue remains in
- // async mode for the old camera API, where many applications depend
- // on that behavior.
- window = new Surface(bufferProducer, /*controlledByApp*/ true);
- }
- return setPreviewWindow(binder, window);
-}
-
-// set the preview callback flag to affect how the received frames from
-// preview are handled.
-void CameraClient::setPreviewCallbackFlag(int callback_flag) {
- LOG1("setPreviewCallbackFlag(%d) (pid %d)", callback_flag, CameraThreadState::getCallingPid());
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return;
-
- mPreviewCallbackFlag = callback_flag;
- if (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) {
- enableMsgType(CAMERA_MSG_PREVIEW_FRAME);
- } else {
- disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
- }
-}
-
-status_t CameraClient::setPreviewCallbackTarget(
- const sp<IGraphicBufferProducer>& callbackProducer) {
- (void)callbackProducer;
- ALOGE("%s: Unimplemented!", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-// start preview mode
-status_t CameraClient::startPreview() {
- LOG1("startPreview (pid %d)", CameraThreadState::getCallingPid());
- return startCameraMode(CAMERA_PREVIEW_MODE);
-}
-
-// start recording mode
-status_t CameraClient::startRecording() {
- LOG1("startRecording (pid %d)", CameraThreadState::getCallingPid());
- return startCameraMode(CAMERA_RECORDING_MODE);
-}
-
-// start preview or recording
-status_t CameraClient::startCameraMode(camera_mode mode) {
- LOG1("startCameraMode(%d)", mode);
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- switch(mode) {
- case CAMERA_PREVIEW_MODE:
- if (mSurface == 0 && mPreviewWindow == 0) {
- LOG1("mSurface is not set yet.");
- // still able to start preview in this case.
- }
- return startPreviewMode();
- case CAMERA_RECORDING_MODE:
- if (mSurface == 0 && mPreviewWindow == 0) {
- ALOGE("mSurface or mPreviewWindow must be set before startRecordingMode.");
- return INVALID_OPERATION;
- }
- return startRecordingMode();
- default:
- return UNKNOWN_ERROR;
- }
-}
-
-status_t CameraClient::startPreviewMode() {
- LOG1("startPreviewMode");
- status_t result = NO_ERROR;
-
- // if preview has been enabled, nothing needs to be done
- if (mHardware->previewEnabled()) {
- return NO_ERROR;
- }
-
- if (mPreviewWindow != 0) {
- mHardware->setPreviewScalingMode(
- NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- mHardware->setPreviewTransform(mOrientation);
- }
- mHardware->setPreviewWindow(mPreviewWindow);
- result = mHardware->startPreview();
- if (result == NO_ERROR) {
- sCameraService->updateProxyDeviceState(
- hardware::ICameraServiceProxy::CAMERA_STATE_ACTIVE,
- mCameraIdStr, mCameraFacing, mClientPackageName,
- hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
- }
- return result;
-}
-
-status_t CameraClient::startRecordingMode() {
- LOG1("startRecordingMode");
- status_t result = NO_ERROR;
-
- // if recording has been enabled, nothing needs to be done
- if (mHardware->recordingEnabled()) {
- return NO_ERROR;
- }
-
- // if preview has not been started, start preview first
- if (!mHardware->previewEnabled()) {
- result = startPreviewMode();
- if (result != NO_ERROR) {
- return result;
- }
- }
-
- // start recording mode
- enableMsgType(CAMERA_MSG_VIDEO_FRAME);
- sCameraService->playSound(CameraService::SOUND_RECORDING_START);
- result = mHardware->startRecording();
- if (result != NO_ERROR) {
- ALOGE("mHardware->startRecording() failed with status %d", result);
- }
- return result;
-}
-
-// stop preview mode
-void CameraClient::stopPreview() {
- LOG1("stopPreview (pid %d)", CameraThreadState::getCallingPid());
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return;
-
-
- disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
- mHardware->stopPreview();
- sCameraService->updateProxyDeviceState(
- hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName,
- hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
- mPreviewBuffer.clear();
-}
-
-// stop recording mode
-void CameraClient::stopRecording() {
- LOG1("stopRecording (pid %d)", CameraThreadState::getCallingPid());
- {
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return;
-
- disableMsgType(CAMERA_MSG_VIDEO_FRAME);
- mHardware->stopRecording();
- sCameraService->playSound(CameraService::SOUND_RECORDING_STOP);
-
- mPreviewBuffer.clear();
- }
-
- {
- Mutex::Autolock l(mAvailableCallbackBuffersLock);
- if (!mAvailableCallbackBuffers.empty()) {
- mAvailableCallbackBuffers.clear();
- }
- }
-}
-
-// release a recording frame
-void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return;
- if (mem == nullptr) {
- android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26164272",
- CameraThreadState::getCallingUid(), nullptr, 0);
- return;
- }
-
- mHardware->releaseRecordingFrame(mem);
-}
-
-void CameraClient::releaseRecordingFrameHandle(native_handle_t *handle) {
- if (handle == nullptr) return;
- Mutex::Autolock lock(mLock);
- sp<IMemory> dataPtr;
- {
- Mutex::Autolock l(mAvailableCallbackBuffersLock);
- if (!mAvailableCallbackBuffers.empty()) {
- dataPtr = mAvailableCallbackBuffers.back();
- mAvailableCallbackBuffers.pop_back();
- }
- }
-
- if (dataPtr == nullptr) {
- ALOGE("%s: %d: No callback buffer available. Dropping a native handle.", __FUNCTION__,
- __LINE__);
- native_handle_close(handle);
- native_handle_delete(handle);
- return;
- } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
- ALOGE("%s: %d: Callback buffer size doesn't match VideoNativeHandleMetadata", __FUNCTION__,
- __LINE__);
- native_handle_close(handle);
- native_handle_delete(handle);
- return;
- }
-
- if (mHardware != nullptr) {
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
- metadata->eType = kMetadataBufferTypeNativeHandleSource;
- metadata->pHandle = handle;
- mHardware->releaseRecordingFrame(dataPtr);
- }
-}
-
-void CameraClient::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
- Mutex::Autolock lock(mLock);
- bool disconnected = (mHardware == nullptr);
- size_t n = handles.size();
- std::vector<sp<IMemory>> frames;
- if (!disconnected) {
- frames.reserve(n);
- }
- bool error = false;
- for (auto& handle : handles) {
- sp<IMemory> dataPtr;
- {
- Mutex::Autolock l(mAvailableCallbackBuffersLock);
- if (!mAvailableCallbackBuffers.empty()) {
- dataPtr = mAvailableCallbackBuffers.back();
- mAvailableCallbackBuffers.pop_back();
- }
- }
-
- if (dataPtr == nullptr) {
- ALOGE("%s: %d: No callback buffer available. Dropping frames.", __FUNCTION__,
- __LINE__);
- error = true;
- break;
- } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
- ALOGE("%s: %d: Callback buffer must be VideoNativeHandleMetadata", __FUNCTION__,
- __LINE__);
- error = true;
- break;
- }
-
- if (!disconnected) {
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
- metadata->eType = kMetadataBufferTypeNativeHandleSource;
- metadata->pHandle = handle;
- frames.push_back(dataPtr);
- }
- }
-
- if (error) {
- for (auto& handle : handles) {
- native_handle_close(handle);
- native_handle_delete(handle);
- }
- } else if (!disconnected) {
- mHardware->releaseRecordingFrameBatch(frames);
- }
- return;
-}
-
-status_t CameraClient::setVideoBufferMode(int32_t videoBufferMode) {
- LOG1("setVideoBufferMode: %d", videoBufferMode);
- bool enableMetadataInBuffers = false;
-
- if (videoBufferMode == VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA) {
- enableMetadataInBuffers = true;
- } else if (videoBufferMode != VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
- ALOGE("%s: %d: videoBufferMode %d is not supported.", __FUNCTION__, __LINE__,
- videoBufferMode);
- return BAD_VALUE;
- }
-
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) {
- return UNKNOWN_ERROR;
- }
-
- return mHardware->storeMetaDataInBuffers(enableMetadataInBuffers);
-}
-
-bool CameraClient::previewEnabled() {
- LOG1("previewEnabled (pid %d)", CameraThreadState::getCallingPid());
-
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return false;
- return mHardware->previewEnabled();
-}
-
-bool CameraClient::recordingEnabled() {
- LOG1("recordingEnabled (pid %d)", CameraThreadState::getCallingPid());
-
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) return false;
- return mHardware->recordingEnabled();
-}
-
-status_t CameraClient::autoFocus() {
- LOG1("autoFocus (pid %d)", CameraThreadState::getCallingPid());
-
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- return mHardware->autoFocus();
-}
-
-status_t CameraClient::cancelAutoFocus() {
- LOG1("cancelAutoFocus (pid %d)", CameraThreadState::getCallingPid());
-
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- return mHardware->cancelAutoFocus();
-}
-
-// take a picture - image is returned in callback
-status_t CameraClient::takePicture(int msgType) {
- LOG1("takePicture (pid %d): 0x%x", CameraThreadState::getCallingPid(), msgType);
-
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- if ((msgType & CAMERA_MSG_RAW_IMAGE) &&
- (msgType & CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
- ALOGE("CAMERA_MSG_RAW_IMAGE and CAMERA_MSG_RAW_IMAGE_NOTIFY"
- " cannot be both enabled");
- return BAD_VALUE;
- }
-
- // We only accept picture related message types
- // and ignore other types of messages for takePicture().
- int picMsgType = msgType
- & (CAMERA_MSG_SHUTTER |
- CAMERA_MSG_POSTVIEW_FRAME |
- CAMERA_MSG_RAW_IMAGE |
- CAMERA_MSG_RAW_IMAGE_NOTIFY |
- CAMERA_MSG_COMPRESSED_IMAGE);
-
- enableMsgType(picMsgType);
-
- return mHardware->takePicture();
-}
-
-// set preview/capture parameters - key/value pairs
-status_t CameraClient::setParameters(const String8& params) {
- LOG1("setParameters (pid %d) (%s)", CameraThreadState::getCallingPid(), params.string());
-
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- mLatestSetParameters = CameraParameters(params);
- CameraParameters p(params);
- return mHardware->setParameters(p);
-}
-
-// get preview/capture parameters - key/value pairs
-String8 CameraClient::getParameters() const {
- Mutex::Autolock lock(mLock);
- // The camera service can unconditionally get the parameters at all times
- if (CameraThreadState::getCallingPid() != mServicePid && checkPidAndHardware() != NO_ERROR) {
- return String8();
- }
-
- String8 params(mHardware->getParameters().flatten());
- LOG1("getParameters (pid %d) (%s)", CameraThreadState::getCallingPid(), params.string());
- return params;
-}
-
-// enable shutter sound
-status_t CameraClient::enableShutterSound(bool enable) {
- LOG1("enableShutterSound (pid %d)", CameraThreadState::getCallingPid());
-
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- if (enable) {
- mPlayShutterSound = true;
- return OK;
- }
-
- mPlayShutterSound = false;
- return OK;
-}
-
-status_t CameraClient::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
- LOG1("sendCommand (pid %d)", CameraThreadState::getCallingPid());
- int orientation;
- Mutex::Autolock lock(mLock);
- status_t result = checkPidAndHardware();
- if (result != NO_ERROR) return result;
-
- if (cmd == CAMERA_CMD_SET_DISPLAY_ORIENTATION) {
- // Mirror the preview if the camera is front-facing.
- orientation = getOrientation(arg1, mCameraFacing == CAMERA_FACING_FRONT);
- if (orientation == -1) return BAD_VALUE;
-
- if (mOrientation != orientation) {
- mOrientation = orientation;
- if (mPreviewWindow != 0) {
- mHardware->setPreviewTransform(mOrientation);
- }
- }
- return OK;
- } else if (cmd == CAMERA_CMD_ENABLE_SHUTTER_SOUND) {
- switch (arg1) {
- case 0:
- return enableShutterSound(false);
- case 1:
- return enableShutterSound(true);
- default:
- return BAD_VALUE;
- }
- return OK;
- } else if (cmd == CAMERA_CMD_PLAY_RECORDING_SOUND) {
- sCameraService->playSound(CameraService::SOUND_RECORDING_START);
- } else if (cmd == CAMERA_CMD_SET_VIDEO_BUFFER_COUNT) {
- // Silently ignore this command
- return INVALID_OPERATION;
- } else if (cmd == CAMERA_CMD_PING) {
- // If mHardware is 0, checkPidAndHardware will return error.
- return OK;
- }
-
- return mHardware->sendCommand(cmd, arg1, arg2);
-}
-
-// ----------------------------------------------------------------------------
-
-void CameraClient::enableMsgType(int32_t msgType) {
- android_atomic_or(msgType, &mMsgEnabled);
- mHardware->enableMsgType(msgType);
-}
-
-void CameraClient::disableMsgType(int32_t msgType) {
- android_atomic_and(~msgType, &mMsgEnabled);
- mHardware->disableMsgType(msgType);
-}
-
-#define CHECK_MESSAGE_INTERVAL 10 // 10ms
-bool CameraClient::lockIfMessageWanted(int32_t msgType) {
- int sleepCount = 0;
- while (mMsgEnabled & msgType) {
- if (mLock.tryLock() == NO_ERROR) {
- if (sleepCount > 0) {
- LOG1("lockIfMessageWanted(%d): waited for %d ms",
- msgType, sleepCount * CHECK_MESSAGE_INTERVAL);
- }
-
- // If messages are no longer enabled after acquiring lock, release and drop message
- if ((mMsgEnabled & msgType) == 0) {
- mLock.unlock();
- break;
- }
-
- return true;
- }
- if (sleepCount++ == 0) {
- LOG1("lockIfMessageWanted(%d): enter sleep", msgType);
- }
- usleep(CHECK_MESSAGE_INTERVAL * 1000);
- }
- ALOGW("lockIfMessageWanted(%d): dropped unwanted message", msgType);
- return false;
-}
-
-sp<CameraClient> CameraClient::getClientFromCookie(void* user) {
- String8 cameraId = String8::format("%d", (int)(intptr_t) user);
- auto clientDescriptor = sCameraService->mActiveClientManager.get(cameraId);
- if (clientDescriptor != nullptr) {
- return sp<CameraClient>{
- static_cast<CameraClient*>(clientDescriptor->getValue().get())};
- }
- return sp<CameraClient>{nullptr};
-}
-
-// Callback messages can be dispatched to internal handlers or pass to our
-// client's callback functions, depending on the message type.
-//
-// notifyCallback:
-// CAMERA_MSG_SHUTTER handleShutter
-// (others) c->notifyCallback
-// dataCallback:
-// CAMERA_MSG_PREVIEW_FRAME handlePreviewData
-// CAMERA_MSG_POSTVIEW_FRAME handlePostview
-// CAMERA_MSG_RAW_IMAGE handleRawPicture
-// CAMERA_MSG_COMPRESSED_IMAGE handleCompressedPicture
-// (others) c->dataCallback
-// dataCallbackTimestamp
-// (others) c->dataCallbackTimestamp
-
-void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
- int32_t ext2, void* user) {
- LOG2("notifyCallback(%d)", msgType);
-
- sp<CameraClient> client = getClientFromCookie(user);
- if (client.get() == nullptr) return;
-
- if (!client->lockIfMessageWanted(msgType)) return;
-
- switch (msgType) {
- case CAMERA_MSG_SHUTTER:
- // ext1 is the dimension of the yuv picture.
- client->handleShutter();
- break;
- default:
- client->handleGenericNotify(msgType, ext1, ext2);
- break;
- }
-}
-
-void CameraClient::dataCallback(int32_t msgType,
- const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
- LOG2("dataCallback(%d)", msgType);
-
- sp<CameraClient> client = getClientFromCookie(user);
- if (client.get() == nullptr) return;
-
- if (!client->lockIfMessageWanted(msgType)) return;
- if (dataPtr == 0 && metadata == NULL) {
- ALOGE("Null data returned in data callback");
- client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
- return;
- }
-
- switch (msgType & ~CAMERA_MSG_PREVIEW_METADATA) {
- case CAMERA_MSG_PREVIEW_FRAME:
- client->handlePreviewData(msgType, dataPtr, metadata);
- break;
- case CAMERA_MSG_POSTVIEW_FRAME:
- client->handlePostview(dataPtr);
- break;
- case CAMERA_MSG_RAW_IMAGE:
- client->handleRawPicture(dataPtr);
- break;
- case CAMERA_MSG_COMPRESSED_IMAGE:
- client->handleCompressedPicture(dataPtr);
- break;
- default:
- client->handleGenericData(msgType, dataPtr, metadata);
- break;
- }
-}
-
-void CameraClient::dataCallbackTimestamp(nsecs_t timestamp,
- int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
- LOG2("dataCallbackTimestamp(%d)", msgType);
-
- sp<CameraClient> client = getClientFromCookie(user);
- if (client.get() == nullptr) return;
-
- if (!client->lockIfMessageWanted(msgType)) return;
-
- if (dataPtr == 0) {
- ALOGE("Null data returned in data with timestamp callback");
- client->handleGenericNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
- return;
- }
-
- client->handleGenericDataTimestamp(timestamp, msgType, dataPtr);
-}
-
-void CameraClient::handleCallbackTimestampBatch(
- int32_t msgType, const std::vector<HandleTimestampMessage>& msgs, void* user) {
- LOG2("dataCallbackTimestampBatch");
- sp<CameraClient> client = getClientFromCookie(user);
- if (client.get() == nullptr) return;
- if (!client->lockIfMessageWanted(msgType)) return;
-
- sp<hardware::ICameraClient> c = client->mRemoteCallback;
- client->mLock.unlock();
- if (c != 0 && msgs.size() > 0) {
- size_t n = msgs.size();
- std::vector<nsecs_t> timestamps;
- std::vector<native_handle_t*> handles;
- timestamps.reserve(n);
- handles.reserve(n);
- for (auto& msg : msgs) {
- native_handle_t* handle = nullptr;
- if (msg.dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
- ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
- return;
- }
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(msg.dataPtr->unsecurePointer());
- if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
- handle = metadata->pHandle;
- }
-
- if (handle == nullptr) {
- ALOGE("%s: VideoNativeHandleMetadata type mismatch or null handle passed!",
- __FUNCTION__);
- return;
- }
- {
- Mutex::Autolock l(client->mAvailableCallbackBuffersLock);
- client->mAvailableCallbackBuffers.push_back(msg.dataPtr);
- }
- timestamps.push_back(msg.timestamp);
- handles.push_back(handle);
- }
- c->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
- }
-}
-
-// snapshot taken callback
-void CameraClient::handleShutter(void) {
- if (mPlayShutterSound) {
- sCameraService->playSound(CameraService::SOUND_SHUTTER);
- }
-
- sp<hardware::ICameraClient> c = mRemoteCallback;
- if (c != 0) {
- mLock.unlock();
- c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
- if (!lockIfMessageWanted(CAMERA_MSG_SHUTTER)) return;
- }
- disableMsgType(CAMERA_MSG_SHUTTER);
-
- // Shutters only happen in response to takePicture, so mark device as
- // idle now, until preview is restarted
- sCameraService->updateProxyDeviceState(
- hardware::ICameraServiceProxy::CAMERA_STATE_IDLE,
- mCameraIdStr, mCameraFacing, mClientPackageName,
- hardware::ICameraServiceProxy::CAMERA_API_LEVEL_1);
-
- mLock.unlock();
-}
-
-// preview callback - frame buffer update
-void CameraClient::handlePreviewData(int32_t msgType,
- const sp<IMemory>& mem,
- camera_frame_metadata_t *metadata) {
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
-
- // local copy of the callback flags
- int flags = mPreviewCallbackFlag;
-
- // is callback enabled?
- if (!(flags & CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK)) {
- // If the enable bit is off, the copy-out and one-shot bits are ignored
- LOG2("frame callback is disabled");
- mLock.unlock();
- return;
- }
-
- // hold a strong pointer to the client
- sp<hardware::ICameraClient> c = mRemoteCallback;
-
- // clear callback flags if no client or one-shot mode
- if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) {
- LOG2("Disable preview callback");
- mPreviewCallbackFlag &= ~(CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK |
- CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK |
- CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK);
- disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
- }
-
- if (c != 0) {
- // Is the received frame copied out or not?
- if (flags & CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK) {
- LOG2("frame is copied");
- copyFrameAndPostCopiedFrame(msgType, c, heap, offset, size, metadata);
- } else {
- LOG2("frame is forwarded");
- mLock.unlock();
- c->dataCallback(msgType, mem, metadata);
- }
- } else {
- mLock.unlock();
- }
-}
-
-// picture callback - postview image ready
-void CameraClient::handlePostview(const sp<IMemory>& mem) {
- disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
-
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0) {
- c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL);
- }
-}
-
-// picture callback - raw image ready
-void CameraClient::handleRawPicture(const sp<IMemory>& mem) {
- disableMsgType(CAMERA_MSG_RAW_IMAGE);
-
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
-
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0) {
- c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL);
- }
-}
-
-// picture callback - compressed picture ready
-void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
- disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
-
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0) {
- c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
- }
-}
-
-
-void CameraClient::handleGenericNotify(int32_t msgType,
- int32_t ext1, int32_t ext2) {
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0) {
- c->notifyCallback(msgType, ext1, ext2);
- }
-}
-
-void CameraClient::handleGenericData(int32_t msgType,
- const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) {
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0) {
- c->dataCallback(msgType, dataPtr, metadata);
- }
-}
-
-void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp,
- int32_t msgType, const sp<IMemory>& dataPtr) {
- sp<hardware::ICameraClient> c = mRemoteCallback;
- mLock.unlock();
- if (c != 0 && dataPtr != nullptr) {
- native_handle_t* handle = nullptr;
-
- // Check if dataPtr contains a VideoNativeHandleMetadata.
- if (dataPtr->size() == sizeof(VideoNativeHandleMetadata)) {
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
- if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
- handle = metadata->pHandle;
- }
- }
-
- // If dataPtr contains a native handle, send it via recordingFrameHandleCallbackTimestamp.
- if (handle != nullptr) {
- {
- Mutex::Autolock l(mAvailableCallbackBuffersLock);
- mAvailableCallbackBuffers.push_back(dataPtr);
- }
- c->recordingFrameHandleCallbackTimestamp(timestamp, handle);
- } else {
- c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
- }
- }
-}
-
-void CameraClient::copyFrameAndPostCopiedFrame(
- int32_t msgType, const sp<hardware::ICameraClient>& client,
- const sp<IMemoryHeap>& heap, size_t offset, size_t size,
- camera_frame_metadata_t *metadata) {
- LOG2("copyFrameAndPostCopiedFrame");
- // It is necessary to copy out of pmem before sending this to
- // the callback. For efficiency, reuse the same MemoryHeapBase
- // provided it's big enough. Don't allocate the memory or
- // perform the copy if there's no callback.
- // hold the preview lock while we grab a reference to the preview buffer
- sp<MemoryHeapBase> previewBuffer;
-
- if (mPreviewBuffer == 0) {
- mPreviewBuffer = new MemoryHeapBase(size, 0, NULL);
- } else if (size > mPreviewBuffer->virtualSize()) {
- mPreviewBuffer.clear();
- mPreviewBuffer = new MemoryHeapBase(size, 0, NULL);
- }
- if (mPreviewBuffer == 0) {
- ALOGE("failed to allocate space for preview buffer");
- mLock.unlock();
- return;
- }
- previewBuffer = mPreviewBuffer;
-
- void* previewBufferBase = previewBuffer->base();
- void* heapBase = heap->base();
-
- if (heapBase == MAP_FAILED) {
- ALOGE("%s: Failed to mmap heap for preview frame.", __FUNCTION__);
- mLock.unlock();
- return;
- } else if (previewBufferBase == MAP_FAILED) {
- ALOGE("%s: Failed to mmap preview buffer for preview frame.", __FUNCTION__);
- mLock.unlock();
- return;
- }
-
- memcpy(previewBufferBase, (uint8_t *) heapBase + offset, size);
-
- sp<MemoryBase> frame = new MemoryBase(previewBuffer, 0, size);
- if (frame == 0) {
- ALOGE("failed to allocate space for frame callback");
- mLock.unlock();
- return;
- }
-
- mLock.unlock();
- client->dataCallback(msgType, frame, metadata);
-}
-
-int CameraClient::getOrientation(int degrees, bool mirror) {
- if (!mirror) {
- if (degrees == 0) return 0;
- else if (degrees == 90) return HAL_TRANSFORM_ROT_90;
- else if (degrees == 180) return HAL_TRANSFORM_ROT_180;
- else if (degrees == 270) return HAL_TRANSFORM_ROT_270;
- } else { // Do mirror (horizontal flip)
- if (degrees == 0) { // FLIP_H and ROT_0
- return HAL_TRANSFORM_FLIP_H;
- } else if (degrees == 90) { // FLIP_H and ROT_90
- return HAL_TRANSFORM_FLIP_H | HAL_TRANSFORM_ROT_90;
- } else if (degrees == 180) { // FLIP_H and ROT_180
- return HAL_TRANSFORM_FLIP_V;
- } else if (degrees == 270) { // FLIP_H and ROT_270
- return HAL_TRANSFORM_FLIP_V | HAL_TRANSFORM_ROT_90;
- }
- }
- ALOGE("Invalid setDisplayOrientation degrees=%d", degrees);
- return -1;
-}
-
-status_t CameraClient::setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer) {
- (void)bufferProducer;
- ALOGE("%s: %d: CameraClient doesn't support setting a video target.", __FUNCTION__, __LINE__);
- return INVALID_OPERATION;
-}
-
-status_t CameraClient::setAudioRestriction(int mode) {
- if (!isValidAudioRestriction(mode)) {
- ALOGE("%s: invalid audio restriction mode %d", __FUNCTION__, mode);
- return BAD_VALUE;
- }
-
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) {
- return INVALID_OPERATION;
- }
- return BasicClient::setAudioRestriction(mode);
-}
-
-int32_t CameraClient::getGlobalAudioRestriction() {
- Mutex::Autolock lock(mLock);
- if (checkPidAndHardware() != NO_ERROR) {
- return INVALID_OPERATION;
- }
- return BasicClient::getServiceAudioRestriction();
-}
-
-// API1->Device1 does not support this feature
-status_t CameraClient::setRotateAndCropOverride(uint8_t /*rotateAndCrop*/) {
- return OK;
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
deleted file mode 100644
index aacb00e..0000000
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_CAMERACLIENT_H
-#define ANDROID_SERVERS_CAMERA_CAMERACLIENT_H
-
-#include "CameraService.h"
-
-namespace android {
-
-class MemoryHeapBase;
-class CameraHardwareInterface;
-
-/**
- * Interface between android.hardware.Camera API and Camera HAL device for version
- * CAMERA_DEVICE_API_VERSION_1_0.
- */
-
-class CameraClient : public CameraService::Client
-{
-public:
- // ICamera interface (see ICamera for details)
- virtual binder::Status disconnect();
- virtual status_t connect(const sp<hardware::ICameraClient>& client);
- virtual status_t lock();
- virtual status_t unlock();
- virtual status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
- virtual void setPreviewCallbackFlag(int flag);
- virtual status_t setPreviewCallbackTarget(
- const sp<IGraphicBufferProducer>& callbackProducer);
- virtual status_t startPreview();
- virtual void stopPreview();
- virtual bool previewEnabled();
- virtual status_t setVideoBufferMode(int32_t videoBufferMode);
- virtual status_t startRecording();
- virtual void stopRecording();
- virtual bool recordingEnabled();
- virtual void releaseRecordingFrame(const sp<IMemory>& mem);
- virtual void releaseRecordingFrameHandle(native_handle_t *handle);
- virtual void releaseRecordingFrameHandleBatch(
- const std::vector<native_handle_t*>& handles);
- virtual status_t autoFocus();
- virtual status_t cancelAutoFocus();
- virtual status_t takePicture(int msgType);
- virtual status_t setParameters(const String8& params);
- virtual String8 getParameters() const;
- virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
- virtual status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
- virtual status_t setAudioRestriction(int mode);
- virtual int32_t getGlobalAudioRestriction();
-
- virtual status_t setRotateAndCropOverride(uint8_t override);
-
- // Interface used by CameraService
- CameraClient(const sp<CameraService>& cameraService,
- const sp<hardware::ICameraClient>& cameraClient,
- const String16& clientPackageName,
- const std::optional<String16>& clientFeatureId,
- int cameraId,
- int cameraFacing,
- int clientPid,
- int clientUid,
- int servicePid);
- ~CameraClient();
-
- virtual status_t initialize(sp<CameraProviderManager> manager,
- const String8& monitorTags) override;
-
- virtual status_t dump(int fd, const Vector<String16>& args);
-
- virtual status_t dumpClient(int fd, const Vector<String16>& args);
-
-private:
-
- // check whether the calling process matches mClientPid.
- status_t checkPid() const;
- status_t checkPidAndHardware() const; // also check mHardware != 0
-
- // these are internal functions used to set up preview buffers
- status_t registerPreviewBuffers();
-
- // camera operation mode
- enum camera_mode {
- CAMERA_PREVIEW_MODE = 0, // frame automatically released
- CAMERA_RECORDING_MODE = 1, // frame has to be explicitly released by releaseRecordingFrame()
- };
- // these are internal functions used for preview/recording
- status_t startCameraMode(camera_mode mode);
- status_t startPreviewMode();
- status_t startRecordingMode();
-
- // internal function used by sendCommand to enable/disable shutter sound.
- status_t enableShutterSound(bool enable);
-
- static sp<CameraClient> getClientFromCookie(void* user);
-
- // these are static callback functions
- static void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2, void* user);
- static void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
- camera_frame_metadata_t *metadata, void* user);
- static void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
- static void handleCallbackTimestampBatch(
- int32_t msgType, const std::vector<HandleTimestampMessage>&, void* user);
- // handlers for messages
- void handleShutter(void);
- void handlePreviewData(int32_t msgType, const sp<IMemory>& mem,
- camera_frame_metadata_t *metadata);
- void handlePostview(const sp<IMemory>& mem);
- void handleRawPicture(const sp<IMemory>& mem);
- void handleCompressedPicture(const sp<IMemory>& mem);
- void handleGenericNotify(int32_t msgType, int32_t ext1, int32_t ext2);
- void handleGenericData(int32_t msgType, const sp<IMemory>& dataPtr,
- camera_frame_metadata_t *metadata);
- void handleGenericDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
-
- void copyFrameAndPostCopiedFrame(
- int32_t msgType,
- const sp<hardware::ICameraClient>& client,
- const sp<IMemoryHeap>& heap,
- size_t offset, size_t size,
- camera_frame_metadata_t *metadata);
-
- int getOrientation(int orientation, bool mirror);
-
- status_t setPreviewWindow(
- const sp<IBinder>& binder,
- const sp<ANativeWindow>& window);
-
-
- // these are initialized in the constructor.
- sp<CameraHardwareInterface> mHardware; // cleared after disconnect()
- int mPreviewCallbackFlag;
- int mOrientation; // Current display orientation
- bool mPlayShutterSound;
- bool mLegacyMode; // camera2 api legacy mode?
-
- // Ensures atomicity among the public methods
- mutable Mutex mLock;
- // This is a binder of Surface or Surface.
- sp<IBinder> mSurface;
- sp<ANativeWindow> mPreviewWindow;
-
- // If the user want us to return a copy of the preview frame (instead
- // of the original one), we allocate mPreviewBuffer and reuse it if possible.
- sp<MemoryHeapBase> mPreviewBuffer;
-
- // Debugging information
- CameraParameters mLatestSetParameters;
-
- // mAvailableCallbackBuffers stores sp<IMemory> that HAL uses to send VideoNativeHandleMetadata.
- // It will be used to send VideoNativeHandleMetadata back to HAL when camera receives the
- // native handle from releaseRecordingFrameHandle.
- Mutex mAvailableCallbackBuffersLock;
- std::vector<sp<IMemory>> mAvailableCallbackBuffers;
-
- // We need to avoid the deadlock when the incoming command thread and
- // the CameraHardwareInterface callback thread both want to grab mLock.
- // An extra flag is used to tell the callback thread that it should stop
- // trying to deliver the callback messages if the client is not
- // interested in it anymore. For example, if the client is calling
- // stopPreview(), the preview frame messages do not need to be delivered
- // anymore.
-
- // This function takes the same parameter as the enableMsgType() and
- // disableMsgType() functions in CameraHardwareInterface.
- void enableMsgType(int32_t msgType);
- void disableMsgType(int32_t msgType);
- volatile int32_t mMsgEnabled;
-
- // This function keeps trying to grab mLock, or give up if the message
- // is found to be disabled. It returns true if mLock is grabbed.
- bool lockIfMessageWanted(int32_t msgType);
-};
-
-}
-
-#endif
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index dbc863b..d543cab 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -3253,6 +3253,8 @@
status_t Parameters::calculatePictureFovs(float *horizFov, float *vertFov)
const {
+ // For external camera, use FOVs = (-1.0, -1.0) as default values. Calculate
+ // FOVs only if there is sufficient information.
if (fastInfo.isExternalCamera) {
if (horizFov != NULL) {
*horizFov = -1.0;
@@ -3260,16 +3262,29 @@
if (vertFov != NULL) {
*vertFov = -1.0;
}
- return OK;
}
camera_metadata_ro_entry_t sensorSize =
staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
- if (!sensorSize.count) return NO_INIT;
+ if (!sensorSize.count) {
+ // It is non-fatal for external cameras since it has default values.
+ if (fastInfo.isExternalCamera) {
+ return OK;
+ } else {
+ return NO_INIT;
+ }
+ }
camera_metadata_ro_entry_t pixelArraySize =
staticInfo(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, 2, 2);
- if (!pixelArraySize.count) return NO_INIT;
+ if (!pixelArraySize.count) {
+ // It is non-fatal for external cameras since it has default values.
+ if (fastInfo.isExternalCamera) {
+ return OK;
+ } else {
+ return NO_INIT;
+ }
+ }
float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
fastInfo.arrayHeight;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 022d686..e80838b 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -21,6 +21,7 @@
#include <cutils/properties.h>
#include <utils/CameraThreadState.h>
#include <utils/Log.h>
+#include <utils/SessionConfigurationUtils.h>
#include <utils/Trace.h>
#include <gui/Surface.h>
#include <camera/camera2/CaptureRequest.h>
@@ -492,7 +493,8 @@
return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
}
- res = checkOperatingMode(operatingMode, mDevice->info(), mCameraIdStr);
+ res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -550,247 +552,6 @@
return res;
}
-binder::Status CameraDeviceClient::checkSurfaceType(size_t numBufferProducers,
- bool deferredConsumer, int surfaceType) {
- if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
- ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
- __FUNCTION__, numBufferProducers, MAX_SURFACES_PER_STREAM);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
- } else if ((numBufferProducers == 0) && (!deferredConsumer)) {
- ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "No valid consumers.");
- }
-
- bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
- (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
-
- if (deferredConsumer && !validSurfaceType) {
- ALOGE("%s: Target surface has invalid surfaceType = %d.", __FUNCTION__, surfaceType);
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
- }
-
- return binder::Status::ok();
-}
-
-binder::Status CameraDeviceClient::checkPhysicalCameraId(
- const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
- const String8 &logicalCameraId) {
- if (physicalCameraId.size() == 0) {
- return binder::Status::ok();
- }
- if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
- physicalCameraId.string()) == physicalCameraIds.end()) {
- String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
- logicalCameraId.string(), physicalCameraId.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- return binder::Status::ok();
-}
-
-binder::Status CameraDeviceClient::checkOperatingMode(int operatingMode,
- const CameraMetadata &staticInfo, const String8 &cameraId) {
- if (operatingMode < 0) {
- String8 msg = String8::format(
- "Camera %s: Invalid operating mode %d requested", cameraId.string(), operatingMode);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
-
- bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
- if (isConstrainedHighSpeed) {
- camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
- bool isConstrainedHighSpeedSupported = false;
- for(size_t i = 0; i < entry.count; ++i) {
- uint8_t capability = entry.data.u8[i];
- if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
- isConstrainedHighSpeedSupported = true;
- break;
- }
- }
- if (!isConstrainedHighSpeedSupported) {
- String8 msg = String8::format(
- "Camera %s: Try to create a constrained high speed configuration on a device"
- " that doesn't support it.", cameraId.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
- }
-
- return binder::Status::ok();
-}
-
-void CameraDeviceClient::mapStreamInfo(const OutputStreamInfo &streamInfo,
- camera3_stream_rotation_t rotation, String8 physicalId,
- hardware::camera::device::V3_4::Stream *stream /*out*/) {
- if (stream == nullptr) {
- return;
- }
-
- stream->v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
- stream->v3_2.width = streamInfo.width;
- stream->v3_2.height = streamInfo.height;
- stream->v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
- auto u = streamInfo.consumerUsage;
- camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
- stream->v3_2.usage = Camera3Device::mapToConsumerUsage(u);
- stream->v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
- stream->v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
- stream->v3_2.id = -1; // Invalid stream id
- stream->physicalCameraId = std::string(physicalId.string());
- stream->bufferSize = 0;
-}
-
-binder::Status
-CameraDeviceClient::convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
- const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
- metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
- bool *unsupported) {
- auto operatingMode = sessionConfiguration.getOperatingMode();
- binder::Status res = checkOperatingMode(operatingMode, deviceInfo, logicalCameraId);
- if (!res.isOk()) {
- return res;
- }
-
- if (unsupported == nullptr) {
- String8 msg("unsupported nullptr");
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- *unsupported = false;
- auto ret = Camera3Device::mapToStreamConfigurationMode(
- static_cast<camera3_stream_configuration_mode_t> (operatingMode),
- /*out*/ &streamConfiguration.operationMode);
- if (ret != OK) {
- String8 msg = String8::format(
- "Camera %s: Failed mapping operating mode %d requested: %s (%d)",
- logicalCameraId.string(), operatingMode, strerror(-ret), ret);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- msg.string());
- }
-
- bool isInputValid = (sessionConfiguration.getInputWidth() > 0) &&
- (sessionConfiguration.getInputHeight() > 0) &&
- (sessionConfiguration.getInputFormat() > 0);
- auto outputConfigs = sessionConfiguration.getOutputConfigurations();
- size_t streamCount = outputConfigs.size();
- streamCount = isInputValid ? streamCount + 1 : streamCount;
- streamConfiguration.streams.resize(streamCount);
- size_t streamIdx = 0;
- if (isInputValid) {
- streamConfiguration.streams[streamIdx++] = {{/*streamId*/0,
- hardware::camera::device::V3_2::StreamType::INPUT,
- static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
- static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
- Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
- /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
- hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
- /*physicalId*/ nullptr, /*bufferSize*/0};
- }
-
- for (const auto &it : outputConfigs) {
- const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
- it.getGraphicBufferProducers();
- bool deferredConsumer = it.isDeferred();
- String8 physicalCameraId = String8(it.getPhysicalCameraId());
- size_t numBufferProducers = bufferProducers.size();
- bool isStreamInfoValid = false;
- OutputStreamInfo streamInfo;
-
- res = checkSurfaceType(numBufferProducers, deferredConsumer, it.getSurfaceType());
- if (!res.isOk()) {
- return res;
- }
- res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
- logicalCameraId);
- if (!res.isOk()) {
- return res;
- }
-
- if (deferredConsumer) {
- streamInfo.width = it.getWidth();
- streamInfo.height = it.getHeight();
- streamInfo.format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- streamInfo.dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
- auto surfaceType = it.getSurfaceType();
- streamInfo.consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
- if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
- streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
- }
- mapStreamInfo(streamInfo, CAMERA3_STREAM_ROTATION_0, physicalCameraId,
- &streamConfiguration.streams[streamIdx++]);
- isStreamInfoValid = true;
-
- if (numBufferProducers == 0) {
- continue;
- }
- }
-
- for (auto& bufferProducer : bufferProducers) {
- sp<Surface> surface;
- const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
- logicalCameraId,
- physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo );
-
- if (!res.isOk())
- return res;
-
- if (!isStreamInfoValid) {
- bool isDepthCompositeStream =
- camera3::DepthCompositeStream::isDepthCompositeStream(surface);
- bool isHeicCompositeStream =
- camera3::HeicCompositeStream::isHeicCompositeStream(surface);
- if (isDepthCompositeStream || isHeicCompositeStream) {
- // We need to take in to account that composite streams can have
- // additional internal camera streams.
- std::vector<OutputStreamInfo> compositeStreams;
- if (isDepthCompositeStream) {
- ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
- deviceInfo, &compositeStreams);
- } else {
- ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
- deviceInfo, &compositeStreams);
- }
- if (ret != OK) {
- String8 msg = String8::format(
- "Camera %s: Failed adding composite streams: %s (%d)",
- logicalCameraId.string(), strerror(-ret), ret);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
-
- if (compositeStreams.size() == 0) {
- // No internal streams means composite stream not
- // supported.
- *unsupported = true;
- return binder::Status::ok();
- } else if (compositeStreams.size() > 1) {
- streamCount += compositeStreams.size() - 1;
- streamConfiguration.streams.resize(streamCount);
- }
-
- for (const auto& compositeStream : compositeStreams) {
- mapStreamInfo(compositeStream,
- static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
- }
- } else {
- mapStreamInfo(streamInfo,
- static_cast<camera3_stream_rotation_t> (it.getRotation()),
- physicalCameraId, &streamConfiguration.streams[streamIdx++]);
- }
- isStreamInfoValid = true;
- }
- }
- }
- return binder::Status::ok();
-}
-
binder::Status CameraDeviceClient::isSessionConfigurationSupported(
const SessionConfiguration& sessionConfiguration, bool *status /*out*/) {
ATRACE_CALL();
@@ -806,7 +567,8 @@
}
auto operatingMode = sessionConfiguration.getOperatingMode();
- res = checkOperatingMode(operatingMode, mDevice->info(), mCameraIdStr);
+ res = SessionConfigurationUtils::checkOperatingMode(operatingMode, mDevice->info(),
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -821,8 +583,9 @@
metadataGetter getMetadata = [this](const String8 &id) {return mDevice->infoPhysical(id);};
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = convertToHALStreamCombination(sessionConfiguration, mCameraIdStr,
- mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration, &earlyExit);
+ res = SessionConfigurationUtils::convertToHALStreamCombination(sessionConfiguration,
+ mCameraIdStr, mDevice->info(), getMetadata, physicalCameraIds, streamConfiguration,
+ &earlyExit);
if (!res.isOk()) {
return res;
}
@@ -970,7 +733,7 @@
String8 physicalCameraId = String8(outputConfiguration.getPhysicalCameraId());
bool deferredConsumerOnly = deferredConsumer && numBufferProducers == 0;
- res = checkSurfaceType(numBufferProducers, deferredConsumer,
+ res = SessionConfigurationUtils::checkSurfaceType(numBufferProducers, deferredConsumer,
outputConfiguration.getSurfaceType());
if (!res.isOk()) {
return res;
@@ -981,7 +744,8 @@
}
std::vector<std::string> physicalCameraIds;
mProviderManager->isLogicalCamera(mCameraIdStr.string(), &physicalCameraIds);
- res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId, mCameraIdStr);
+ res = SessionConfigurationUtils::checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
+ mCameraIdStr);
if (!res.isOk()) {
return res;
}
@@ -1009,8 +773,8 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
- mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(streamInfo, isStreamInfoValid,
+ surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
if (!res.isOk())
return res;
@@ -1313,8 +1077,9 @@
for (size_t i = 0; i < newOutputsMap.size(); i++) {
OutputStreamInfo outInfo;
sp<Surface> surface;
- res = createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false, surface,
- newOutputsMap.valueAt(i), mCameraIdStr, mDevice->infoPhysical(physicalCameraId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(outInfo, /*isStreamInfoValid*/ false,
+ surface, newOutputsMap.valueAt(i), mCameraIdStr,
+ mDevice->infoPhysical(physicalCameraId));
if (!res.isOk())
return res;
@@ -1364,226 +1129,6 @@
return res;
}
-bool CameraDeviceClient::isPublicFormat(int32_t format)
-{
- switch(format) {
- case HAL_PIXEL_FORMAT_RGBA_8888:
- case HAL_PIXEL_FORMAT_RGBX_8888:
- case HAL_PIXEL_FORMAT_RGB_888:
- case HAL_PIXEL_FORMAT_RGB_565:
- case HAL_PIXEL_FORMAT_BGRA_8888:
- case HAL_PIXEL_FORMAT_YV12:
- case HAL_PIXEL_FORMAT_Y8:
- case HAL_PIXEL_FORMAT_Y16:
- case HAL_PIXEL_FORMAT_RAW16:
- case HAL_PIXEL_FORMAT_RAW10:
- case HAL_PIXEL_FORMAT_RAW12:
- case HAL_PIXEL_FORMAT_RAW_OPAQUE:
- case HAL_PIXEL_FORMAT_BLOB:
- case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
- case HAL_PIXEL_FORMAT_YCbCr_420_888:
- case HAL_PIXEL_FORMAT_YCbCr_422_SP:
- case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- case HAL_PIXEL_FORMAT_YCbCr_422_I:
- return true;
- default:
- return false;
- }
-}
-
-binder::Status CameraDeviceClient::createSurfaceFromGbp(
- OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
- const String8 &cameraId, const CameraMetadata &physicalCameraMetadata) {
-
- // bufferProducer must be non-null
- if (gbp == nullptr) {
- String8 msg = String8::format("Camera %s: Surface is NULL", cameraId.string());
- ALOGW("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- // HACK b/10949105
- // Query consumer usage bits to set async operation mode for
- // GLConsumer using controlledByApp parameter.
- bool useAsync = false;
- uint64_t consumerUsage = 0;
- status_t err;
- if ((err = gbp->getConsumerUsage(&consumerUsage)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
- ALOGW("%s: Camera %s with consumer usage flag: %" PRIu64 ": Forcing asynchronous mode for stream",
- __FUNCTION__, cameraId.string(), consumerUsage);
- useAsync = true;
- }
-
- uint64_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
- GRALLOC_USAGE_RENDERSCRIPT;
- uint64_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
- GraphicBuffer::USAGE_HW_TEXTURE |
- GraphicBuffer::USAGE_HW_COMPOSER;
- bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
- (consumerUsage & allowedFlags) != 0;
-
- surface = new Surface(gbp, useAsync);
- ANativeWindow *anw = surface.get();
-
- int width, height, format;
- android_dataspace dataSpace;
- if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
- if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
- reinterpret_cast<int*>(&dataSpace))) != OK) {
- String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
- cameraId.string(), strerror(-err), err);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
- }
-
- // FIXME: remove this override since the default format should be
- // IMPLEMENTATION_DEFINED. b/9487482 & b/35317944
- if ((format >= HAL_PIXEL_FORMAT_RGBA_8888 && format <= HAL_PIXEL_FORMAT_BGRA_8888) &&
- ((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
- ((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
- ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
- __FUNCTION__, cameraId.string(), format);
- format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- }
- // Round dimensions to the nearest dimensions available for this format
- if (flexibleConsumer && isPublicFormat(format) &&
- !CameraDeviceClient::roundBufferDimensionNearest(width, height,
- format, dataSpace, physicalCameraMetadata, /*out*/&width, /*out*/&height)) {
- String8 msg = String8::format("Camera %s: No supported stream configurations with "
- "format %#x defined, failed to create output stream",
- cameraId.string(), format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
-
- if (!isStreamInfoValid) {
- streamInfo.width = width;
- streamInfo.height = height;
- streamInfo.format = format;
- streamInfo.dataSpace = dataSpace;
- streamInfo.consumerUsage = consumerUsage;
- return binder::Status::ok();
- }
- if (width != streamInfo.width) {
- String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
- cameraId.string(), width, streamInfo.width);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (height != streamInfo.height) {
- String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
- cameraId.string(), height, streamInfo.height);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (format != streamInfo.format) {
- String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
- cameraId.string(), format, streamInfo.format);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
- if (dataSpace != streamInfo.dataSpace) {
- String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
- cameraId.string(), dataSpace, streamInfo.dataSpace);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- //At the native side, there isn't a way to check whether 2 surfaces come from the same
- //surface class type. Use usage flag to approximate the comparison.
- if (consumerUsage != streamInfo.consumerUsage) {
- String8 msg = String8::format(
- "Camera %s:Surface usage flag doesn't match %" PRIu64 " vs %" PRIu64 "",
- cameraId.string(), consumerUsage, streamInfo.consumerUsage);
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
- }
- }
- return binder::Status::ok();
-}
-
-bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
- int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
- /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
-
- camera_metadata_ro_entry streamConfigs =
- (dataSpace == HAL_DATASPACE_DEPTH) ?
- info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
- (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
- info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
- info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
-
- int32_t bestWidth = -1;
- int32_t bestHeight = -1;
-
- // Iterate through listed stream configurations and find the one with the smallest euclidean
- // distance from the given dimensions for the given format.
- for (size_t i = 0; i < streamConfigs.count; i += 4) {
- int32_t fmt = streamConfigs.data.i32[i];
- int32_t w = streamConfigs.data.i32[i + 1];
- int32_t h = streamConfigs.data.i32[i + 2];
-
- // Ignore input/output type for now
- if (fmt == format) {
- if (w == width && h == height) {
- bestWidth = width;
- bestHeight = height;
- break;
- } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 ||
- CameraDeviceClient::euclidDistSquare(w, h, width, height) <
- CameraDeviceClient::euclidDistSquare(bestWidth, bestHeight, width, height))) {
- bestWidth = w;
- bestHeight = h;
- }
- }
- }
-
- if (bestWidth == -1) {
- // Return false if no configurations for this format were listed
- return false;
- }
-
- // Set the outputs to the closet width/height
- if (outWidth != NULL) {
- *outWidth = bestWidth;
- }
- if (outHeight != NULL) {
- *outHeight = bestHeight;
- }
-
- // Return true if at least one configuration for this format was listed
- return true;
-}
-
-int64_t CameraDeviceClient::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
- int64_t d0 = x0 - x1;
- int64_t d1 = y0 - y1;
- return d0 * d0 + d1 * d1;
-}
-
// Create a request object from a template.
binder::Status CameraDeviceClient::createDefaultRequest(int templateId,
/*out*/
@@ -1896,8 +1441,9 @@
}
sp<Surface> surface;
- res = createSurfaceFromGbp(mStreamInfoMap[streamId], true /*isStreamInfoValid*/,
- surface, bufferProducer, mCameraIdStr, mDevice->infoPhysical(physicalId));
+ res = SessionConfigurationUtils::createSurfaceFromGbp(mStreamInfoMap[streamId],
+ true /*isStreamInfoValid*/, surface, bufferProducer, mCameraIdStr,
+ mDevice->infoPhysical(physicalId));
if (!res.isOk())
return res;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 5cd16ee..2807aee 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -204,16 +204,6 @@
virtual void notifyRequestQueueEmpty();
virtual void notifyRepeatingRequestError(long lastFrameNumber);
- // utility function to convert AIDL SessionConfiguration to HIDL
- // streamConfiguration. Also checks for validity of SessionConfiguration and
- // returns a non-ok binder::Status if the passed in session configuration
- // isn't valid.
- static binder::Status
- convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
- const String8 &cameraId, const CameraMetadata &deviceInfo,
- metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
- bool *earlyExit);
/**
* Interface used by independent components of CameraDeviceClient.
*/
@@ -266,18 +256,8 @@
/** Utility members */
binder::Status checkPidStatus(const char* checkLocation);
- static binder::Status checkOperatingMode(int operatingMode, const CameraMetadata &staticInfo,
- const String8 &cameraId);
- static binder::Status checkSurfaceType(size_t numBufferProducers, bool deferredConsumer,
- int surfaceType);
- static void mapStreamInfo(const OutputStreamInfo &streamInfo,
- camera3_stream_rotation_t rotation, String8 physicalId,
- hardware::camera::device::V3_4::Stream *stream /*out*/);
bool enforceRequestPermissions(CameraMetadata& metadata);
- // Find the square of the euclidean distance between two points
- static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
-
// Create an output stream with surface deferred for future.
binder::Status createDeferredSurfaceStreamLocked(
const hardware::camera2::params::OutputConfiguration &outputConfiguration,
@@ -288,33 +268,11 @@
// cases.
binder::Status setStreamTransformLocked(int streamId);
- // Find the closest dimensions for a given format in available stream configurations with
- // a width <= ROUNDING_WIDTH_CAP
- static const int32_t ROUNDING_WIDTH_CAP = 1920;
- static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
- android_dataspace dataSpace, const CameraMetadata& info,
- /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
-
- //check if format is not custom format
- static bool isPublicFormat(int32_t format);
-
- // Create a Surface from an IGraphicBufferProducer. Returns error if
- // IGraphicBufferProducer's property doesn't match with streamInfo
- static binder::Status createSurfaceFromGbp(OutputStreamInfo& streamInfo, bool isStreamInfoValid,
- sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp, const String8 &cameraId,
- const CameraMetadata &physicalCameraMetadata);
-
-
// Utility method to insert the surface into SurfaceMap
binder::Status insertGbpLocked(const sp<IGraphicBufferProducer>& gbp,
/*out*/SurfaceMap* surfaceMap, /*out*/Vector<int32_t>* streamIds,
/*out*/int32_t* currentStreamId);
- // Check that the physicalCameraId passed in is spported by the camera
- // device.
- static binder::Status checkPhysicalCameraId(const std::vector<std::string> &physicalCameraIds,
- const String8 &physicalCameraId, const String8 &logicalCameraId);
-
// IGraphicsBufferProducer binder -> Stream ID + Surface ID for output streams
KeyedVector<sp<IBinder>, StreamSurfaceId> mStreamMap;
@@ -346,7 +304,6 @@
KeyedVector<sp<IBinder>, sp<CompositeStream>> mCompositeStreamMap;
- static const int32_t MAX_SURFACES_PER_STREAM = 4;
sp<CameraProviderManager> mProviderManager;
};
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index a63f402..4fe5adf 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -510,7 +510,8 @@
sp<camera3::StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != nullptr) {
- mStatusId = statusTracker->addComponent();
+ std::string name = std::string("HeicStream ") + std::to_string(getStreamId());
+ mStatusId = statusTracker->addComponent(name);
}
run("HeicCompositeStreamProc");
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 876d70d..e9dcb01 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -417,46 +417,6 @@
return mapToStatusT(status);
}
-status_t CameraProviderManager::openSession(const std::string &id,
- const sp<device::V1_0::ICameraDeviceCallback>& callback,
- /*out*/
- sp<device::V1_0::ICameraDevice> *session) {
-
- std::lock_guard<std::mutex> lock(mInterfaceMutex);
-
- auto deviceInfo = findDeviceInfoLocked(id,
- /*minVersion*/ {1,0}, /*maxVersion*/ {2,0});
- if (deviceInfo == nullptr) return NAME_NOT_FOUND;
-
- auto *deviceInfo1 = static_cast<ProviderInfo::DeviceInfo1*>(deviceInfo);
- sp<ProviderInfo> parentProvider = deviceInfo->mParentProvider.promote();
- if (parentProvider == nullptr) {
- return DEAD_OBJECT;
- }
- const sp<provider::V2_4::ICameraProvider> provider = parentProvider->startProviderInterface();
- if (provider == nullptr) {
- return DEAD_OBJECT;
- }
- saveRef(DeviceMode::CAMERA, id, provider);
-
- auto interface = deviceInfo1->startDeviceInterface<
- CameraProviderManager::ProviderInfo::DeviceInfo1::InterfaceT>();
- if (interface == nullptr) {
- return DEAD_OBJECT;
- }
- hardware::Return<Status> status = interface->open(callback);
- if (!status.isOk()) {
- removeRef(DeviceMode::CAMERA, id);
- ALOGE("%s: Transaction error opening a session for camera device %s: %s",
- __FUNCTION__, id.c_str(), status.description().c_str());
- return DEAD_OBJECT;
- }
- if (status == Status::OK) {
- *session = interface;
- }
- return mapToStatusT(status);
-}
-
void CameraProviderManager::saveRef(DeviceMode usageType, const std::string &cameraId,
sp<provider::V2_4::ICameraProvider> provider) {
if (!kEnableLazyHal) {
@@ -1567,9 +1527,9 @@
std::unique_ptr<DeviceInfo> deviceInfo;
switch (major) {
case 1:
- deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, mProviderTagid,
- id, minor);
- break;
+ ALOGE("%s: Device %s: Unsupported HIDL device HAL major version %d:", __FUNCTION__,
+ name.c_str(), major);
+ return BAD_VALUE;
case 3:
deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
id, minor);
@@ -2113,35 +2073,6 @@
}
template<>
-sp<device::V1_0::ICameraDevice>
-CameraProviderManager::ProviderInfo::startDeviceInterface
- <device::V1_0::ICameraDevice>(const std::string &name) {
- Status status;
- sp<device::V1_0::ICameraDevice> cameraInterface;
- hardware::Return<void> ret;
- const sp<provider::V2_4::ICameraProvider> interface = startProviderInterface();
- if (interface == nullptr) {
- return nullptr;
- }
- ret = interface->getCameraDeviceInterface_V1_x(name, [&status, &cameraInterface](
- Status s, sp<device::V1_0::ICameraDevice> interface) {
- status = s;
- cameraInterface = interface;
- });
- if (!ret.isOk()) {
- ALOGE("%s: Transaction error trying to obtain interface for camera device %s: %s",
- __FUNCTION__, name.c_str(), ret.description().c_str());
- return nullptr;
- }
- if (status != Status::OK) {
- ALOGE("%s: Unable to obtain interface for camera device %s: %s", __FUNCTION__,
- name.c_str(), statusToString(status));
- return nullptr;
- }
- return cameraInterface;
-}
-
-template<>
sp<device::V3_2::ICameraDevice>
CameraProviderManager::ProviderInfo::startDeviceInterface
<device::V3_2::ICameraDevice>(const std::string &name) {
@@ -2194,126 +2125,6 @@
return mapToStatusT(s);
}
-CameraProviderManager::ProviderInfo::DeviceInfo1::DeviceInfo1(const std::string& name,
- const metadata_vendor_id_t tagId, const std::string &id,
- uint16_t minorVersion,
- const CameraResourceCost& resourceCost,
- sp<ProviderInfo> parentProvider,
- const std::vector<std::string>& publicCameraIds,
- sp<InterfaceT> interface) :
- DeviceInfo(name, tagId, id, hardware::hidl_version{1, minorVersion},
- publicCameraIds, resourceCost, parentProvider) {
- // Get default parameters and initialize flash unit availability
- // Requires powering on the camera device
- hardware::Return<Status> status = interface->open(nullptr);
- if (!status.isOk()) {
- ALOGE("%s: Transaction error opening camera device %s to check for a flash unit: %s",
- __FUNCTION__, id.c_str(), status.description().c_str());
- return;
- }
- if (status != Status::OK) {
- ALOGE("%s: Unable to open camera device %s to check for a flash unit: %s", __FUNCTION__,
- id.c_str(), CameraProviderManager::statusToString(status));
- return;
- }
- hardware::Return<void> ret;
- ret = interface->getParameters([this](const hardware::hidl_string& parms) {
- mDefaultParameters.unflatten(String8(parms.c_str()));
- });
- if (!ret.isOk()) {
- ALOGE("%s: Transaction error reading camera device %s params to check for a flash unit: %s",
- __FUNCTION__, id.c_str(), status.description().c_str());
- return;
- }
- const char *flashMode =
- mDefaultParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
- if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
- mHasFlashUnit = true;
- }
-
- status_t res = cacheCameraInfo(interface);
- if (res != OK) {
- ALOGE("%s: Could not cache CameraInfo", __FUNCTION__);
- return;
- }
-
- ret = interface->close();
- if (!ret.isOk()) {
- ALOGE("%s: Transaction error closing camera device %s after check for a flash unit: %s",
- __FUNCTION__, id.c_str(), status.description().c_str());
- }
-
- if (!kEnableLazyHal) {
- // Save HAL reference indefinitely
- mSavedInterface = interface;
- }
-}
-
-CameraProviderManager::ProviderInfo::DeviceInfo1::~DeviceInfo1() {}
-
-status_t CameraProviderManager::ProviderInfo::DeviceInfo1::setTorchMode(bool enabled) {
- return setTorchModeForDevice<InterfaceT>(enabled);
-}
-
-status_t CameraProviderManager::ProviderInfo::DeviceInfo1::getCameraInfo(
- hardware::CameraInfo *info) const {
- if (info == nullptr) return BAD_VALUE;
- *info = mInfo;
- return OK;
-}
-
-status_t CameraProviderManager::ProviderInfo::DeviceInfo1::cacheCameraInfo(
- sp<CameraProviderManager::ProviderInfo::DeviceInfo1::InterfaceT> interface) {
- Status status;
- device::V1_0::CameraInfo cInfo;
- hardware::Return<void> ret;
- ret = interface->getCameraInfo([&status, &cInfo](Status s, device::V1_0::CameraInfo camInfo) {
- status = s;
- cInfo = camInfo;
- });
- if (!ret.isOk()) {
- ALOGE("%s: Transaction error reading camera info from device %s: %s",
- __FUNCTION__, mId.c_str(), ret.description().c_str());
- return DEAD_OBJECT;
- }
- if (status != Status::OK) {
- return mapToStatusT(status);
- }
-
- switch(cInfo.facing) {
- case device::V1_0::CameraFacing::BACK:
- mInfo.facing = hardware::CAMERA_FACING_BACK;
- break;
- case device::V1_0::CameraFacing::EXTERNAL:
- // Map external to front for legacy API
- case device::V1_0::CameraFacing::FRONT:
- mInfo.facing = hardware::CAMERA_FACING_FRONT;
- break;
- default:
- ALOGW("%s: Device %s: Unknown camera facing: %d",
- __FUNCTION__, mId.c_str(), cInfo.facing);
- mInfo.facing = hardware::CAMERA_FACING_BACK;
- }
- mInfo.orientation = cInfo.orientation;
-
- return OK;
-}
-
-status_t CameraProviderManager::ProviderInfo::DeviceInfo1::dumpState(int fd) {
- native_handle_t* handle = native_handle_create(1,0);
- handle->data[0] = fd;
- const sp<InterfaceT> interface = startDeviceInterface<InterfaceT>();
- if (interface == nullptr) {
- return DEAD_OBJECT;
- }
- hardware::Return<Status> s = interface->dumpState(handle);
- native_handle_delete(handle);
- if (!s.isOk()) {
- return INVALID_OPERATION;
- }
- return mapToStatusT(s);
-}
-
CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
const metadata_vendor_id_t tagId, const std::string &id,
uint16_t minorVersion,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a0e5f8f..8727e7f 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -270,11 +270,6 @@
/*out*/
sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
- status_t openSession(const std::string &id,
- const sp<hardware::camera::device::V1_0::ICameraDeviceCallback>& callback,
- /*out*/
- sp<hardware::camera::device::V1_0::ICameraDevice> *session);
-
/**
* Save the ICameraProvider while it is being used by a camera or torch client
*/
@@ -521,27 +516,6 @@
// physical camera IDs.
std::vector<std::string> mProviderPublicCameraIds;
- // HALv1-specific camera fields, including the actual device interface
- struct DeviceInfo1 : public DeviceInfo {
- typedef hardware::camera::device::V1_0::ICameraDevice InterfaceT;
-
- virtual status_t setTorchMode(bool enabled) override;
- virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
- //In case of Device1Info assume that we are always API1 compatible
- virtual bool isAPI1Compatible() const override { return true; }
- virtual status_t dumpState(int fd) override;
- DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
- const std::string &id, uint16_t minorVersion,
- const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
- sp<ProviderInfo> parentProvider,
- const std::vector<std::string>& publicCameraIds,
- sp<InterfaceT> interface);
- virtual ~DeviceInfo1();
- private:
- CameraParameters2 mDefaultParameters;
- status_t cacheCameraInfo(sp<InterfaceT> interface);
- };
-
// HALv3-specific camera fields, including the actual device interface
struct DeviceInfo3 : public DeviceInfo {
typedef hardware::camera::device::V3_2::ICameraDevice InterfaceT;
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
deleted file mode 100644
index 62ef681..0000000
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#define LOG_TAG "CameraHardwareInterface"
-//#define LOG_NDEBUG 0
-
-#include <inttypes.h>
-#include <media/hardware/HardwareAPI.h> // For VideoNativeHandleMetadata
-#include "CameraHardwareInterface.h"
-
-namespace android {
-
-using namespace hardware::camera::device::V1_0;
-using namespace hardware::camera::common::V1_0;
-using hardware::hidl_handle;
-
-CameraHardwareInterface::~CameraHardwareInterface()
-{
- ALOGI("Destroying camera %s", mName.string());
- if (mHidlDevice != nullptr) {
- mHidlDevice->close();
- mHidlDevice.clear();
- cleanupCirculatingBuffers();
- }
-}
-
-status_t CameraHardwareInterface::initialize(sp<CameraProviderManager> manager) {
- ALOGI("Opening camera %s", mName.string());
-
- status_t ret = manager->openSession(mName.string(), this, &mHidlDevice);
- if (ret != OK) {
- ALOGE("%s: openSession failed! %s (%d)", __FUNCTION__, strerror(-ret), ret);
- }
- return ret;
-}
-
-status_t CameraHardwareInterface::setPreviewScalingMode(int scalingMode)
-{
- int rc = OK;
- mPreviewScalingMode = scalingMode;
- if (mPreviewWindow != nullptr) {
- rc = native_window_set_scaling_mode(mPreviewWindow.get(),
- scalingMode);
- }
- return rc;
-}
-
-status_t CameraHardwareInterface::setPreviewTransform(int transform) {
- int rc = OK;
- mPreviewTransform = transform;
- if (mPreviewWindow != nullptr) {
- rc = native_window_set_buffers_transform(mPreviewWindow.get(),
- mPreviewTransform);
- }
- return rc;
-}
-
-/**
- * Implementation of android::hardware::camera::device::V1_0::ICameraDeviceCallback
- */
-hardware::Return<void> CameraHardwareInterface::notifyCallback(
- NotifyCallbackMsg msgType, int32_t ext1, int32_t ext2) {
- sNotifyCb((int32_t) msgType, ext1, ext2, (void*) this);
- return hardware::Void();
-}
-
-hardware::Return<uint32_t> CameraHardwareInterface::registerMemory(
- const hardware::hidl_handle& descriptor,
- uint32_t bufferSize, uint32_t bufferCount) {
- if (descriptor->numFds != 1) {
- ALOGE("%s: camera memory descriptor has numFds %d (expect 1)",
- __FUNCTION__, descriptor->numFds);
- return 0;
- }
- if (descriptor->data[0] < 0) {
- ALOGE("%s: camera memory descriptor has FD %d (expect >= 0)",
- __FUNCTION__, descriptor->data[0]);
- return 0;
- }
-
- camera_memory_t* mem = sGetMemory(descriptor->data[0], bufferSize, bufferCount, this);
- sp<CameraHeapMemory> camMem(static_cast<CameraHeapMemory *>(mem->handle));
- int memPoolId = camMem->mHeap->getHeapID();
- if (memPoolId < 0) {
- ALOGE("%s: CameraHeapMemory has FD %d (expect >= 0)", __FUNCTION__, memPoolId);
- return 0;
- }
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- mHidlMemPoolMap.insert(std::make_pair(memPoolId, mem));
- return memPoolId;
-}
-
-hardware::Return<void> CameraHardwareInterface::unregisterMemory(uint32_t memId) {
- camera_memory_t* mem = nullptr;
- {
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- if (mHidlMemPoolMap.count(memId) == 0) {
- ALOGE("%s: memory pool ID %d not found", __FUNCTION__, memId);
- return hardware::Void();
- }
- mem = mHidlMemPoolMap.at(memId);
- mHidlMemPoolMap.erase(memId);
- }
- sPutMemory(mem);
- return hardware::Void();
-}
-
-hardware::Return<void> CameraHardwareInterface::dataCallback(
- DataCallbackMsg msgType, uint32_t data, uint32_t bufferIndex,
- const hardware::camera::device::V1_0::CameraFrameMetadata& metadata) {
- camera_memory_t* mem = nullptr;
- {
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- if (mHidlMemPoolMap.count(data) == 0) {
- ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
- return hardware::Void();
- }
- mem = mHidlMemPoolMap.at(data);
- }
- camera_frame_metadata_t md;
- md.number_of_faces = metadata.faces.size();
- md.faces = (camera_face_t*) metadata.faces.data();
- sDataCb((int32_t) msgType, mem, bufferIndex, &md, this);
- return hardware::Void();
-}
-
-hardware::Return<void> CameraHardwareInterface::dataCallbackTimestamp(
- DataCallbackMsg msgType, uint32_t data,
- uint32_t bufferIndex, int64_t timestamp) {
- camera_memory_t* mem = nullptr;
- {
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- if (mHidlMemPoolMap.count(data) == 0) {
- ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
- return hardware::Void();
- }
- mem = mHidlMemPoolMap.at(data);
- }
- sDataCbTimestamp(timestamp, (int32_t) msgType, mem, bufferIndex, this);
- return hardware::Void();
-}
-
-hardware::Return<void> CameraHardwareInterface::handleCallbackTimestamp(
- DataCallbackMsg msgType, const hidl_handle& frameData, uint32_t data,
- uint32_t bufferIndex, int64_t timestamp) {
- camera_memory_t* mem = nullptr;
- {
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- if (mHidlMemPoolMap.count(data) == 0) {
- ALOGE("%s: memory pool ID %d not found", __FUNCTION__, data);
- return hardware::Void();
- }
- mem = mHidlMemPoolMap.at(data);
- }
- sp<CameraHeapMemory> heapMem(static_cast<CameraHeapMemory *>(mem->handle));
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
- heapMem->mBuffers[bufferIndex]->unsecurePointer();
- md->pHandle = const_cast<native_handle_t*>(frameData.getNativeHandle());
- sDataCbTimestamp(timestamp, (int32_t) msgType, mem, bufferIndex, this);
- return hardware::Void();
-}
-
-hardware::Return<void> CameraHardwareInterface::handleCallbackTimestampBatch(
- DataCallbackMsg msgType,
- const hardware::hidl_vec<hardware::camera::device::V1_0::HandleTimestampMessage>& messages) {
- std::vector<android::HandleTimestampMessage> msgs;
- msgs.reserve(messages.size());
- {
- std::lock_guard<std::mutex> lock(mHidlMemPoolMapLock);
- for (const auto& hidl_msg : messages) {
- if (mHidlMemPoolMap.count(hidl_msg.data) == 0) {
- ALOGE("%s: memory pool ID %d not found", __FUNCTION__, hidl_msg.data);
- return hardware::Void();
- }
- sp<CameraHeapMemory> mem(
- static_cast<CameraHeapMemory *>(mHidlMemPoolMap.at(hidl_msg.data)->handle));
-
- if (hidl_msg.bufferIndex >= mem->mNumBufs) {
- ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
- hidl_msg.bufferIndex, mem->mNumBufs);
- return hardware::Void();
- }
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
- mem->mBuffers[hidl_msg.bufferIndex]->unsecurePointer();
- md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
-
- msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
- }
- }
- mDataCbTimestampBatch((int32_t) msgType, msgs, mCbUser);
- return hardware::Void();
-}
-
-std::pair<bool, uint64_t> CameraHardwareInterface::getBufferId(
- ANativeWindowBuffer* anb) {
- std::lock_guard<std::mutex> lock(mBufferIdMapLock);
-
- buffer_handle_t& buf = anb->handle;
- auto it = mBufferIdMap.find(buf);
- if (it == mBufferIdMap.end()) {
- uint64_t bufId = mNextBufferId++;
- mBufferIdMap[buf] = bufId;
- mReversedBufMap[bufId] = anb;
- return std::make_pair(true, bufId);
- } else {
- return std::make_pair(false, it->second);
- }
-}
-
-void CameraHardwareInterface::cleanupCirculatingBuffers() {
- std::lock_guard<std::mutex> lock(mBufferIdMapLock);
- mBufferIdMap.clear();
- mReversedBufMap.clear();
-}
-
-hardware::Return<void>
-CameraHardwareInterface::dequeueBuffer(dequeueBuffer_cb _hidl_cb) {
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return hardware::Void();
- }
- ANativeWindowBuffer* anb;
- int rc = native_window_dequeue_buffer_and_wait(a, &anb);
- Status s = Status::INTERNAL_ERROR;
- uint64_t bufferId = 0;
- uint32_t stride = 0;
- hidl_handle buf = nullptr;
- if (rc == OK) {
- s = Status::OK;
- auto pair = getBufferId(anb);
- buf = (pair.first) ? anb->handle : nullptr;
- bufferId = pair.second;
- stride = anb->stride;
- }
-
- _hidl_cb(s, bufferId, buf, stride);
- return hardware::Void();
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::enqueueBuffer(uint64_t bufferId) {
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return Status::INTERNAL_ERROR;
- }
- if (mReversedBufMap.count(bufferId) == 0) {
- ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
- return Status::ILLEGAL_ARGUMENT;
- }
- int rc = a->queueBuffer(a, mReversedBufMap.at(bufferId), -1);
- if (rc == 0) {
- return Status::OK;
- }
- return Status::INTERNAL_ERROR;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::cancelBuffer(uint64_t bufferId) {
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return Status::INTERNAL_ERROR;
- }
- if (mReversedBufMap.count(bufferId) == 0) {
- ALOGE("%s: bufferId %" PRIu64 " not found", __FUNCTION__, bufferId);
- return Status::ILLEGAL_ARGUMENT;
- }
- int rc = a->cancelBuffer(a, mReversedBufMap.at(bufferId), -1);
- if (rc == 0) {
- return Status::OK;
- }
- return Status::INTERNAL_ERROR;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setBufferCount(uint32_t count) {
- ANativeWindow *a = mPreviewWindow.get();
- if (a != nullptr) {
- // Workaround for b/27039775
- // Previously, setting the buffer count would reset the buffer
- // queue's flag that allows for all buffers to be dequeued on the
- // producer side, instead of just the producer's declared max count,
- // if no filled buffers have yet been queued by the producer. This
- // reset no longer happens, but some HALs depend on this behavior,
- // so it needs to be maintained for HAL backwards compatibility.
- // Simulate the prior behavior by disconnecting/reconnecting to the
- // window and setting the values again. This has the drawback of
- // actually causing memory reallocation, which may not have happened
- // in the past.
- native_window_api_disconnect(a, NATIVE_WINDOW_API_CAMERA);
- native_window_api_connect(a, NATIVE_WINDOW_API_CAMERA);
- if (mPreviewScalingMode != NOT_SET) {
- native_window_set_scaling_mode(a, mPreviewScalingMode);
- }
- if (mPreviewTransform != NOT_SET) {
- native_window_set_buffers_transform(a, mPreviewTransform);
- }
- if (mPreviewWidth != NOT_SET) {
- native_window_set_buffers_dimensions(a,
- mPreviewWidth, mPreviewHeight);
- native_window_set_buffers_format(a, mPreviewFormat);
- }
- if (mPreviewUsage != 0) {
- native_window_set_usage(a, mPreviewUsage);
- }
- if (mPreviewSwapInterval != NOT_SET) {
- a->setSwapInterval(a, mPreviewSwapInterval);
- }
- if (mPreviewCrop.left != NOT_SET) {
- native_window_set_crop(a, &(mPreviewCrop));
- }
- }
- int rc = native_window_set_buffer_count(a, count);
- if (rc == OK) {
- cleanupCirculatingBuffers();
- return Status::OK;
- }
- return Status::INTERNAL_ERROR;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setBuffersGeometry(
- uint32_t w, uint32_t h, hardware::graphics::common::V1_0::PixelFormat format) {
- Status s = Status::INTERNAL_ERROR;
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return s;
- }
- mPreviewWidth = w;
- mPreviewHeight = h;
- mPreviewFormat = (int) format;
- int rc = native_window_set_buffers_dimensions(a, w, h);
- if (rc == OK) {
- rc = native_window_set_buffers_format(a, mPreviewFormat);
- }
- if (rc == OK) {
- cleanupCirculatingBuffers();
- s = Status::OK;
- }
- return s;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) {
- Status s = Status::INTERNAL_ERROR;
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return s;
- }
- mPreviewCrop.left = left;
- mPreviewCrop.top = top;
- mPreviewCrop.right = right;
- mPreviewCrop.bottom = bottom;
- int rc = native_window_set_crop(a, &mPreviewCrop);
- if (rc == OK) {
- s = Status::OK;
- }
- return s;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setUsage(hardware::graphics::common::V1_0::BufferUsage usage) {
- Status s = Status::INTERNAL_ERROR;
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return s;
- }
- mPreviewUsage = static_cast<uint64_t> (usage);
- int rc = native_window_set_usage(a, mPreviewUsage);
- if (rc == OK) {
- cleanupCirculatingBuffers();
- s = Status::OK;
- }
- return s;
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setSwapInterval(int32_t interval) {
- Status s = Status::INTERNAL_ERROR;
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return s;
- }
- mPreviewSwapInterval = interval;
- int rc = a->setSwapInterval(a, interval);
- if (rc == OK) {
- s = Status::OK;
- }
- return s;
-}
-
-hardware::Return<void>
-CameraHardwareInterface::getMinUndequeuedBufferCount(getMinUndequeuedBufferCount_cb _hidl_cb) {
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return hardware::Void();
- }
- int count = 0;
- int rc = a->query(a, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &count);
- Status s = Status::INTERNAL_ERROR;
- if (rc == OK) {
- s = Status::OK;
- }
- _hidl_cb(s, count);
- return hardware::Void();
-}
-
-hardware::Return<Status>
-CameraHardwareInterface::setTimestamp(int64_t timestamp) {
- Status s = Status::INTERNAL_ERROR;
- ANativeWindow *a = mPreviewWindow.get();
- if (a == nullptr) {
- ALOGE("%s: preview window is null", __FUNCTION__);
- return s;
- }
- int rc = native_window_set_buffers_timestamp(a, timestamp);
- if (rc == OK) {
- s = Status::OK;
- }
- return s;
-}
-
-status_t CameraHardwareInterface::setPreviewWindow(const sp<ANativeWindow>& buf)
-{
- ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mPreviewWindow = buf;
- if (buf != nullptr) {
- if (mPreviewScalingMode != NOT_SET) {
- setPreviewScalingMode(mPreviewScalingMode);
- }
- if (mPreviewTransform != NOT_SET) {
- setPreviewTransform(mPreviewTransform);
- }
- }
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->setPreviewWindow(buf.get() ? this : nullptr));
- }
- return INVALID_OPERATION;
-}
-
-void CameraHardwareInterface::setCallbacks(notify_callback notify_cb,
- data_callback data_cb,
- data_callback_timestamp data_cb_timestamp,
- data_callback_timestamp_batch data_cb_timestamp_batch,
- void* user)
-{
- mNotifyCb = notify_cb;
- mDataCb = data_cb;
- mDataCbTimestamp = data_cb_timestamp;
- mDataCbTimestampBatch = data_cb_timestamp_batch;
- mCbUser = user;
-
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
-}
-
-void CameraHardwareInterface::enableMsgType(int32_t msgType)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mHidlDevice->enableMsgType(msgType);
- }
-}
-
-void CameraHardwareInterface::disableMsgType(int32_t msgType)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mHidlDevice->disableMsgType(msgType);
- }
-}
-
-int CameraHardwareInterface::msgTypeEnabled(int32_t msgType)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return mHidlDevice->msgTypeEnabled(msgType);
- }
- return false;
-}
-
-status_t CameraHardwareInterface::startPreview()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->startPreview());
- }
- return INVALID_OPERATION;
-}
-
-void CameraHardwareInterface::stopPreview()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mHidlDevice->stopPreview();
- }
-}
-
-int CameraHardwareInterface::previewEnabled()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return mHidlDevice->previewEnabled();
- }
- return false;
-}
-
-status_t CameraHardwareInterface::storeMetaDataInBuffers(int enable)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->storeMetaDataInBuffers(enable));
- }
- return enable ? INVALID_OPERATION: OK;
-}
-
-status_t CameraHardwareInterface::startRecording()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->startRecording());
- }
- return INVALID_OPERATION;
-}
-
-/**
- * Stop a previously started recording.
- */
-void CameraHardwareInterface::stopRecording()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mHidlDevice->stopRecording();
- }
-}
-
-/**
- * Returns true if recording is enabled.
- */
-int CameraHardwareInterface::recordingEnabled()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return mHidlDevice->recordingEnabled();
- }
- return false;
-}
-
-void CameraHardwareInterface::releaseRecordingFrame(const sp<IMemory>& mem)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- int heapId = heap->getHeapID();
- int bufferIndex = offset / size;
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- if (size == sizeof(VideoNativeHandleMetadata)) {
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
- // Caching the handle here because md->pHandle will be subject to HAL's edit
- native_handle_t* nh = md->pHandle;
- hidl_handle frame = nh;
- mHidlDevice->releaseRecordingFrameHandle(heapId, bufferIndex, frame);
- native_handle_close(nh);
- native_handle_delete(nh);
- } else {
- mHidlDevice->releaseRecordingFrame(heapId, bufferIndex);
- }
- }
-}
-
-void CameraHardwareInterface::releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- size_t n = frames.size();
- std::vector<VideoFrameMessage> msgs;
- msgs.reserve(n);
- for (auto& mem : frames) {
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- ssize_t offset;
- size_t size;
- sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- if (size == sizeof(VideoNativeHandleMetadata)) {
- uint32_t heapId = heap->getHeapID();
- uint32_t bufferIndex = offset / size;
- // TODO: Using unsecurePointer() has some associated security pitfalls
- // (see declaration for details).
- // Either document why it is safe in this case or address the
- // issue (e.g. by copying).
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
- // Caching the handle here because md->pHandle will be subject to HAL's edit
- native_handle_t* nh = md->pHandle;
- VideoFrameMessage msg;
- msgs.push_back({nh, heapId, bufferIndex});
- } else {
- ALOGE("%s only supports VideoNativeHandleMetadata mode", __FUNCTION__);
- return;
- }
- }
- }
-
- mHidlDevice->releaseRecordingFrameHandleBatch(msgs);
-
- for (auto& msg : msgs) {
- native_handle_t* nh = const_cast<native_handle_t*>(msg.frameData.getNativeHandle());
- native_handle_close(nh);
- native_handle_delete(nh);
- }
-}
-
-status_t CameraHardwareInterface::autoFocus()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->autoFocus());
- }
- return INVALID_OPERATION;
-}
-
-status_t CameraHardwareInterface::cancelAutoFocus()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->cancelAutoFocus());
- }
- return INVALID_OPERATION;
-}
-
-status_t CameraHardwareInterface::takePicture()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->takePicture());
- }
- return INVALID_OPERATION;
-}
-
-status_t CameraHardwareInterface::cancelPicture()
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->cancelPicture());
- }
- return INVALID_OPERATION;
-}
-
-status_t CameraHardwareInterface::setParameters(const CameraParameters ¶ms)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->setParameters(params.flatten().string()));
- }
- return INVALID_OPERATION;
-}
-
-CameraParameters CameraHardwareInterface::getParameters() const
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- CameraParameters parms;
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- hardware::hidl_string outParam;
- mHidlDevice->getParameters(
- [&outParam](const auto& outStr) {
- outParam = outStr;
- });
- String8 tmp(outParam.c_str());
- parms.unflatten(tmp);
- }
- return parms;
-}
-
-status_t CameraHardwareInterface::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- return CameraProviderManager::mapToStatusT(
- mHidlDevice->sendCommand((CommandType) cmd, arg1, arg2));
- }
- return INVALID_OPERATION;
-}
-
-/**
- * Release the hardware resources owned by this object. Note that this is
- * *not* done in the destructor.
- */
-void CameraHardwareInterface::release() {
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- mHidlDevice->close();
- mHidlDevice.clear();
- }
-}
-
-/**
- * Dump state of the camera hardware
- */
-status_t CameraHardwareInterface::dump(int fd, const Vector<String16>& /*args*/) const
-{
- ALOGV("%s(%s)", __FUNCTION__, mName.string());
- if (CC_LIKELY(mHidlDevice != nullptr)) {
- native_handle_t* handle = native_handle_create(1,0);
- handle->data[0] = fd;
- Status s = mHidlDevice->dumpState(handle);
- native_handle_delete(handle);
- return CameraProviderManager::mapToStatusT(s);
- }
- return OK; // It's fine if the HAL doesn't implement dump()
-}
-
-void CameraHardwareInterface::sNotifyCb(int32_t msg_type, int32_t ext1,
- int32_t ext2, void *user)
-{
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *object =
- static_cast<CameraHardwareInterface *>(user);
- object->mNotifyCb(msg_type, ext1, ext2, object->mCbUser);
-}
-
-void CameraHardwareInterface::sDataCb(int32_t msg_type,
- const camera_memory_t *data, unsigned int index,
- camera_frame_metadata_t *metadata,
- void *user)
-{
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *object =
- static_cast<CameraHardwareInterface *>(user);
- sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
- if (index >= mem->mNumBufs) {
- ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
- index, mem->mNumBufs);
- return;
- }
- object->mDataCb(msg_type, mem->mBuffers[index], metadata, object->mCbUser);
-}
-
-void CameraHardwareInterface::sDataCbTimestamp(nsecs_t timestamp, int32_t msg_type,
- const camera_memory_t *data, unsigned index,
- void *user)
-{
- ALOGV("%s", __FUNCTION__);
- CameraHardwareInterface *object =
- static_cast<CameraHardwareInterface *>(user);
- // Start refcounting the heap object from here on. When the clients
- // drop all references, it will be destroyed (as well as the enclosed
- // MemoryHeapBase.
- sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
- if (index >= mem->mNumBufs) {
- ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
- index, mem->mNumBufs);
- return;
- }
- object->mDataCbTimestamp(timestamp, msg_type, mem->mBuffers[index], object->mCbUser);
-}
-
-camera_memory_t* CameraHardwareInterface::sGetMemory(
- int fd, size_t buf_size, uint_t num_bufs,
- void *user __attribute__((unused)))
-{
- CameraHeapMemory *mem;
- if (fd < 0) {
- mem = new CameraHeapMemory(buf_size, num_bufs);
- } else {
- mem = new CameraHeapMemory(fd, buf_size, num_bufs);
- }
- mem->incStrong(mem);
- return &mem->handle;
-}
-
-void CameraHardwareInterface::sPutMemory(camera_memory_t *data)
-{
- if (!data) {
- return;
- }
-
- CameraHeapMemory *mem = static_cast<CameraHeapMemory *>(data->handle);
- mem->decStrong(mem);
-}
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
deleted file mode 100644
index e519b04..0000000
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H
-#define ANDROID_HARDWARE_CAMERA_HARDWARE_INTERFACE_H
-
-#include <unordered_map>
-#include <binder/IMemory.h>
-#include <binder/MemoryBase.h>
-#include <binder/MemoryHeapBase.h>
-#include <utils/RefBase.h>
-#include <ui/GraphicBuffer.h>
-#include <camera/Camera.h>
-#include <camera/CameraParameters.h>
-#include <system/window.h>
-#include <hardware/camera.h>
-
-#include <common/CameraProviderManager.h>
-
-namespace android {
-
-typedef void (*notify_callback)(int32_t msgType,
- int32_t ext1,
- int32_t ext2,
- void* user);
-
-typedef void (*data_callback)(int32_t msgType,
- const sp<IMemory> &dataPtr,
- camera_frame_metadata_t *metadata,
- void* user);
-
-typedef void (*data_callback_timestamp)(nsecs_t timestamp,
- int32_t msgType,
- const sp<IMemory> &dataPtr,
- void *user);
-
-struct HandleTimestampMessage {
- nsecs_t timestamp;
- const sp<IMemory> dataPtr;
-};
-
-typedef void (*data_callback_timestamp_batch)(
- int32_t msgType,
- const std::vector<HandleTimestampMessage>&, void* user);
-
-/**
- * CameraHardwareInterface.h defines the interface to the
- * camera hardware abstraction layer, used for setting and getting
- * parameters, live previewing, and taking pictures. It is used for
- * HAL devices with version CAMERA_DEVICE_API_VERSION_1_0 only.
- *
- * It is a referenced counted interface with RefBase as its base class.
- * CameraService calls openCameraHardware() to retrieve a strong pointer to the
- * instance of this interface and may be called multiple times. The
- * following steps describe a typical sequence:
- *
- * -# After CameraService calls openCameraHardware(), getParameters() and
- * setParameters() are used to initialize the camera instance.
- * -# startPreview() is called.
- *
- * Prior to taking a picture, CameraService often calls autofocus(). When auto
- * focusing has completed, the camera instance sends a CAMERA_MSG_FOCUS notification,
- * which informs the application whether focusing was successful. The camera instance
- * only sends this message once and it is up to the application to call autoFocus()
- * again if refocusing is desired.
- *
- * CameraService calls takePicture() to request the camera instance take a
- * picture. At this point, if a shutter, postview, raw, and/or compressed
- * callback is desired, the corresponding message must be enabled. Any memory
- * provided in a data callback must be copied if it's needed after returning.
- */
-
-class CameraHardwareInterface :
- public virtual RefBase,
- public virtual hardware::camera::device::V1_0::ICameraDeviceCallback,
- public virtual hardware::camera::device::V1_0::ICameraDevicePreviewCallback {
-
-public:
- explicit CameraHardwareInterface(const char *name):
- mHidlDevice(nullptr),
- mName(name),
- mPreviewScalingMode(NOT_SET),
- mPreviewTransform(NOT_SET),
- mPreviewWidth(NOT_SET),
- mPreviewHeight(NOT_SET),
- mPreviewFormat(NOT_SET),
- mPreviewUsage(0),
- mPreviewSwapInterval(NOT_SET),
- mPreviewCrop{NOT_SET,NOT_SET,NOT_SET,NOT_SET}
- {
- }
-
- ~CameraHardwareInterface();
-
- status_t initialize(sp<CameraProviderManager> manager);
-
- /** Set the ANativeWindow to which preview frames are sent */
- status_t setPreviewWindow(const sp<ANativeWindow>& buf);
-
- status_t setPreviewScalingMode(int scalingMode);
-
- status_t setPreviewTransform(int transform);
-
- /** Set the notification and data callbacks */
- void setCallbacks(notify_callback notify_cb,
- data_callback data_cb,
- data_callback_timestamp data_cb_timestamp,
- data_callback_timestamp_batch data_cb_timestamp_batch,
- void* user);
-
- /**
- * The following three functions all take a msgtype,
- * which is a bitmask of the messages defined in
- * include/ui/Camera.h
- */
-
- /**
- * Enable a message, or set of messages.
- */
- void enableMsgType(int32_t msgType);
-
- /**
- * Disable a message, or a set of messages.
- *
- * Once received a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), camera hal
- * should not rely on its client to call releaseRecordingFrame() to release
- * video recording frames sent out by the cameral hal before and after the
- * disableMsgType(CAMERA_MSG_VIDEO_FRAME) call. Camera hal clients must not
- * modify/access any video recording frame after calling
- * disableMsgType(CAMERA_MSG_VIDEO_FRAME).
- */
- void disableMsgType(int32_t msgType);
-
- /**
- * Query whether a message, or a set of messages, is enabled.
- * Note that this is operates as an AND, if any of the messages
- * queried are off, this will return false.
- */
- int msgTypeEnabled(int32_t msgType);
-
- /**
- * Start preview mode.
- */
- status_t startPreview();
-
- /**
- * Stop a previously started preview.
- */
- void stopPreview();
-
- /**
- * Returns true if preview is enabled.
- */
- int previewEnabled();
-
- /**
- * Request the camera hal to store meta data or real YUV data in
- * the video buffers send out via CAMERA_MSG_VIDEO_FRRAME for a
- * recording session. If it is not called, the default camera
- * hal behavior is to store real YUV data in the video buffers.
- *
- * This method should be called before startRecording() in order
- * to be effective.
- *
- * If meta data is stored in the video buffers, it is up to the
- * receiver of the video buffers to interpret the contents and
- * to find the actual frame data with the help of the meta data
- * in the buffer. How this is done is outside of the scope of
- * this method.
- *
- * Some camera hal may not support storing meta data in the video
- * buffers, but all camera hal should support storing real YUV data
- * in the video buffers. If the camera hal does not support storing
- * the meta data in the video buffers when it is requested to do
- * do, INVALID_OPERATION must be returned. It is very useful for
- * the camera hal to pass meta data rather than the actual frame
- * data directly to the video encoder, since the amount of the
- * uncompressed frame data can be very large if video size is large.
- *
- * @param enable if true to instruct the camera hal to store
- * meta data in the video buffers; false to instruct
- * the camera hal to store real YUV data in the video
- * buffers.
- *
- * @return OK on success.
- */
-
- status_t storeMetaDataInBuffers(int enable);
-
- /**
- * Start record mode. When a record image is available a CAMERA_MSG_VIDEO_FRAME
- * message is sent with the corresponding frame. Every record frame must be released
- * by a cameral hal client via releaseRecordingFrame() before the client calls
- * disableMsgType(CAMERA_MSG_VIDEO_FRAME). After the client calls
- * disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's responsibility
- * to manage the life-cycle of the video recording frames, and the client must
- * not modify/access any video recording frames.
- */
- status_t startRecording();
-
- /**
- * Stop a previously started recording.
- */
- void stopRecording();
-
- /**
- * Returns true if recording is enabled.
- */
- int recordingEnabled();
-
- /**
- * Release a record frame previously returned by CAMERA_MSG_VIDEO_FRAME.
- *
- * It is camera hal client's responsibility to release video recording
- * frames sent out by the camera hal before the camera hal receives
- * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
- * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
- * responsibility of managing the life-cycle of the video recording
- * frames.
- */
- void releaseRecordingFrame(const sp<IMemory>& mem);
-
- /**
- * Release a batch of recording frames previously returned by
- * CAMERA_MSG_VIDEO_FRAME. This method only supports frames that are
- * stored as VideoNativeHandleMetadata.
- *
- * It is camera hal client's responsibility to release video recording
- * frames sent out by the camera hal before the camera hal receives
- * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
- * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
- * responsibility of managing the life-cycle of the video recording
- * frames.
- */
- void releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames);
-
- /**
- * Start auto focus, the notification callback routine is called
- * with CAMERA_MSG_FOCUS once when focusing is complete. autoFocus()
- * will be called again if another auto focus is needed.
- */
- status_t autoFocus();
-
- /**
- * Cancels auto-focus function. If the auto-focus is still in progress,
- * this function will cancel it. Whether the auto-focus is in progress
- * or not, this function will return the focus position to the default.
- * If the camera does not support auto-focus, this is a no-op.
- */
- status_t cancelAutoFocus();
-
- /**
- * Take a picture.
- */
- status_t takePicture();
-
- /**
- * Cancel a picture that was started with takePicture. Calling this
- * method when no picture is being taken is a no-op.
- */
- status_t cancelPicture();
-
- /**
- * Set the camera parameters. This returns BAD_VALUE if any parameter is
- * invalid or not supported. */
- status_t setParameters(const CameraParameters ¶ms);
-
- /** Return the camera parameters. */
- CameraParameters getParameters() const;
-
- /**
- * Send command to camera driver.
- */
- status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
-
- /**
- * Release the hardware resources owned by this object. Note that this is
- * *not* done in the destructor.
- */
- void release();
-
- /**
- * Dump state of the camera hardware
- */
- status_t dump(int fd, const Vector<String16>& /*args*/) const;
-
-private:
- sp<hardware::camera::device::V1_0::ICameraDevice> mHidlDevice;
- String8 mName;
-
- static void sNotifyCb(int32_t msg_type, int32_t ext1,
- int32_t ext2, void *user);
-
- static void sDataCb(int32_t msg_type,
- const camera_memory_t *data, unsigned int index,
- camera_frame_metadata_t *metadata,
- void *user);
-
- static void sDataCbTimestamp(nsecs_t timestamp, int32_t msg_type,
- const camera_memory_t *data, unsigned index,
- void *user);
-
- // This is a utility class that combines a MemoryHeapBase and a MemoryBase
- // in one. Since we tend to use them in a one-to-one relationship, this is
- // handy.
- class CameraHeapMemory : public RefBase {
- public:
- CameraHeapMemory(int fd, size_t buf_size, uint_t num_buffers = 1) :
- mBufSize(buf_size),
- mNumBufs(num_buffers)
- {
- mHeap = new MemoryHeapBase(fd, buf_size * num_buffers);
- commonInitialization();
- }
-
- explicit CameraHeapMemory(size_t buf_size, uint_t num_buffers = 1) :
- mBufSize(buf_size),
- mNumBufs(num_buffers)
- {
- mHeap = new MemoryHeapBase(buf_size * num_buffers);
- commonInitialization();
- }
-
- void commonInitialization()
- {
- handle.data = mHeap->base();
- handle.size = mBufSize * mNumBufs;
- handle.handle = this;
-
- mBuffers = new sp<MemoryBase>[mNumBufs];
- for (uint_t i = 0; i < mNumBufs; i++)
- mBuffers[i] = new MemoryBase(mHeap,
- i * mBufSize,
- mBufSize);
-
- handle.release = sPutMemory;
- }
-
- virtual ~CameraHeapMemory()
- {
- delete [] mBuffers;
- }
-
- size_t mBufSize;
- uint_t mNumBufs;
- sp<MemoryHeapBase> mHeap;
- sp<MemoryBase> *mBuffers;
-
- camera_memory_t handle;
- };
-
- static camera_memory_t* sGetMemory(int fd, size_t buf_size, uint_t num_bufs,
- void *user __attribute__((unused)));
-
- static void sPutMemory(camera_memory_t *data);
-
- std::pair<bool, uint64_t> getBufferId(ANativeWindowBuffer* anb);
- void cleanupCirculatingBuffers();
-
- /**
- * Implementation of android::hardware::camera::device::V1_0::ICameraDeviceCallback
- */
- hardware::Return<void> notifyCallback(
- hardware::camera::device::V1_0::NotifyCallbackMsg msgType,
- int32_t ext1, int32_t ext2) override;
- hardware::Return<uint32_t> registerMemory(
- const hardware::hidl_handle& descriptor,
- uint32_t bufferSize, uint32_t bufferCount) override;
- hardware::Return<void> unregisterMemory(uint32_t memId) override;
- hardware::Return<void> dataCallback(
- hardware::camera::device::V1_0::DataCallbackMsg msgType,
- uint32_t data, uint32_t bufferIndex,
- const hardware::camera::device::V1_0::CameraFrameMetadata& metadata) override;
- hardware::Return<void> dataCallbackTimestamp(
- hardware::camera::device::V1_0::DataCallbackMsg msgType,
- uint32_t data, uint32_t bufferIndex, int64_t timestamp) override;
- hardware::Return<void> handleCallbackTimestamp(
- hardware::camera::device::V1_0::DataCallbackMsg msgType,
- const hardware::hidl_handle& frameData, uint32_t data,
- uint32_t bufferIndex, int64_t timestamp) override;
- hardware::Return<void> handleCallbackTimestampBatch(
- hardware::camera::device::V1_0::DataCallbackMsg msgType,
- const hardware::hidl_vec<
- hardware::camera::device::V1_0::HandleTimestampMessage>&) override;
-
- /**
- * Implementation of android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback
- */
- hardware::Return<void> dequeueBuffer(dequeueBuffer_cb _hidl_cb) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- enqueueBuffer(uint64_t bufferId) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- cancelBuffer(uint64_t bufferId) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setBufferCount(uint32_t count) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setBuffersGeometry(uint32_t w, uint32_t h,
- hardware::graphics::common::V1_0::PixelFormat format) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setUsage(hardware::graphics::common::V1_0::BufferUsage usage) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setSwapInterval(int32_t interval) override;
- hardware::Return<void> getMinUndequeuedBufferCount(
- getMinUndequeuedBufferCount_cb _hidl_cb) override;
- hardware::Return<hardware::camera::common::V1_0::Status>
- setTimestamp(int64_t timestamp) override;
-
- sp<ANativeWindow> mPreviewWindow;
-
- notify_callback mNotifyCb;
- data_callback mDataCb;
- data_callback_timestamp mDataCbTimestamp;
- data_callback_timestamp_batch mDataCbTimestampBatch;
- void *mCbUser;
-
- // Cached values for preview stream parameters
- static const int NOT_SET = -1;
- int mPreviewScalingMode;
- int mPreviewTransform;
- int mPreviewWidth;
- int mPreviewHeight;
- int mPreviewFormat;
- uint64_t mPreviewUsage;
- int mPreviewSwapInterval;
- android_native_rect_t mPreviewCrop;
-
- struct BufferHasher {
- size_t operator()(const buffer_handle_t& buf) const {
- if (buf == nullptr)
- return 0;
-
- size_t result = 1;
- result = 31 * result + buf->numFds;
- result = 31 * result + buf->numInts;
- int length = buf->numFds + buf->numInts;
- for (int i = 0; i < length; i++) {
- result = 31 * result + buf->data[i];
- }
- return result;
- }
- };
-
- struct BufferComparator {
- bool operator()(const buffer_handle_t& buf1, const buffer_handle_t& buf2) const {
- if (buf1->numFds == buf2->numFds && buf1->numInts == buf2->numInts) {
- int length = buf1->numFds + buf1->numInts;
- for (int i = 0; i < length; i++) {
- if (buf1->data[i] != buf2->data[i]) {
- return false;
- }
- }
- return true;
- }
- return false;
- }
- };
-
- std::mutex mBufferIdMapLock; // protecting mBufferIdMap and mNextBufferId
- typedef std::unordered_map<const buffer_handle_t, uint64_t,
- BufferHasher, BufferComparator> BufferIdMap;
- // stream ID -> per stream buffer ID map
- BufferIdMap mBufferIdMap;
- std::unordered_map<uint64_t, ANativeWindowBuffer*> mReversedBufMap;
- uint64_t mNextBufferId = 1;
- static const uint64_t BUFFER_ID_NO_BUFFER = 0;
-
- std::mutex mHidlMemPoolMapLock; // protecting mHidlMemPoolMap
- std::unordered_map<int, camera_memory_t*> mHidlMemPoolMap;
-};
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index d5f136b..d27f11f 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -56,7 +56,7 @@
#include "device3/Camera3Device.h"
#include "device3/Camera3OutputStream.h"
#include "device3/Camera3InputStream.h"
-#include "device3/Camera3DummyStream.h"
+#include "device3/Camera3FakeStream.h"
#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
#include "utils/CameraThreadState.h"
@@ -270,7 +270,7 @@
}
/** Register in-flight map to the status tracker */
- mInFlightStatusId = mStatusTracker->addComponent();
+ mInFlightStatusId = mStatusTracker->addComponent("InflightRequests");
if (mUseHalBufManager) {
res = mRequestBufferSM.initialize(mStatusTracker);
@@ -309,7 +309,7 @@
internalUpdateStatusLocked(STATUS_UNCONFIGURED);
mNextStreamId = 0;
- mDummyStreamId = NO_STREAM;
+ mFakeStreamId = NO_STREAM;
mNeedConfig = true;
mPauseStateNotify = false;
@@ -1768,6 +1768,7 @@
maxExpectedDuration);
status_t res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
if (res != OK) {
+ mStatusTracker->dumpActiveComponents();
SET_ERR_L("Error waiting for HAL to drain: %s (%d)", strerror(-res),
res);
}
@@ -2481,12 +2482,12 @@
}
// Workaround for device HALv3.2 or older spec bug - zero streams requires
- // adding a dummy stream instead.
+ // adding a fake stream instead.
// TODO: Bug: 17321404 for fixing the HAL spec and removing this workaround.
if (mOutputStreams.size() == 0) {
- addDummyStreamLocked();
+ addFakeStreamLocked();
} else {
- tryRemoveDummyStreamLocked();
+ tryRemoveFakeStreamLocked();
}
// Start configuring the streams
@@ -2648,7 +2649,7 @@
mNeedConfig = false;
- internalUpdateStatusLocked((mDummyStreamId == NO_STREAM) ?
+ internalUpdateStatusLocked((mFakeStreamId == NO_STREAM) ?
STATUS_CONFIGURED : STATUS_UNCONFIGURED);
ALOGV("%s: Camera %s: Stream configuration complete", __FUNCTION__, mId.string());
@@ -2662,69 +2663,69 @@
return rc;
}
- if (mDummyStreamId == NO_STREAM) {
+ if (mFakeStreamId == NO_STREAM) {
mRequestBufferSM.onStreamsConfigured();
}
return OK;
}
-status_t Camera3Device::addDummyStreamLocked() {
+status_t Camera3Device::addFakeStreamLocked() {
ATRACE_CALL();
status_t res;
- if (mDummyStreamId != NO_STREAM) {
- // Should never be adding a second dummy stream when one is already
+ if (mFakeStreamId != NO_STREAM) {
+ // Should never be adding a second fake stream when one is already
// active
- SET_ERR_L("%s: Camera %s: A dummy stream already exists!",
+ SET_ERR_L("%s: Camera %s: A fake stream already exists!",
__FUNCTION__, mId.string());
return INVALID_OPERATION;
}
- ALOGV("%s: Camera %s: Adding a dummy stream", __FUNCTION__, mId.string());
+ ALOGV("%s: Camera %s: Adding a fake stream", __FUNCTION__, mId.string());
- sp<Camera3OutputStreamInterface> dummyStream =
- new Camera3DummyStream(mNextStreamId);
+ sp<Camera3OutputStreamInterface> fakeStream =
+ new Camera3FakeStream(mNextStreamId);
- res = mOutputStreams.add(mNextStreamId, dummyStream);
+ res = mOutputStreams.add(mNextStreamId, fakeStream);
if (res < 0) {
- SET_ERR_L("Can't add dummy stream to set: %s (%d)", strerror(-res), res);
+ SET_ERR_L("Can't add fake stream to set: %s (%d)", strerror(-res), res);
return res;
}
- mDummyStreamId = mNextStreamId;
+ mFakeStreamId = mNextStreamId;
mNextStreamId++;
return OK;
}
-status_t Camera3Device::tryRemoveDummyStreamLocked() {
+status_t Camera3Device::tryRemoveFakeStreamLocked() {
ATRACE_CALL();
status_t res;
- if (mDummyStreamId == NO_STREAM) return OK;
+ if (mFakeStreamId == NO_STREAM) return OK;
if (mOutputStreams.size() == 1) return OK;
- ALOGV("%s: Camera %s: Removing the dummy stream", __FUNCTION__, mId.string());
+ ALOGV("%s: Camera %s: Removing the fake stream", __FUNCTION__, mId.string());
- // Ok, have a dummy stream and there's at least one other output stream,
- // so remove the dummy
+ // Ok, have a fake stream and there's at least one other output stream,
+ // so remove the fake
- sp<Camera3StreamInterface> deletedStream = mOutputStreams.get(mDummyStreamId);
+ sp<Camera3StreamInterface> deletedStream = mOutputStreams.get(mFakeStreamId);
if (deletedStream == nullptr) {
- SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
+ SET_ERR_L("Fake stream %d does not appear to exist", mFakeStreamId);
return INVALID_OPERATION;
}
- mOutputStreams.remove(mDummyStreamId);
+ mOutputStreams.remove(mFakeStreamId);
// Free up the stream endpoint so that it can be used by some other stream
res = deletedStream->disconnect();
if (res != OK) {
- SET_ERR_L("Can't disconnect deleted dummy stream %d", mDummyStreamId);
+ SET_ERR_L("Can't disconnect deleted fake stream %d", mFakeStreamId);
// fall through since we want to still list the stream as deleted.
}
mDeletedStreams.add(deletedStream);
- mDummyStreamId = NO_STREAM;
+ mFakeStreamId = NO_STREAM;
return res;
}
@@ -2829,7 +2830,7 @@
}
void Camera3Device::checkInflightMapLengthLocked() {
- // Sanity check - if we have too many in-flight frames with long total inflight duration,
+ // Validation check - if we have too many in-flight frames with long total inflight duration,
// something has likely gone wrong. This might still be legit only if application send in
// a long burst of long exposure requests.
if (mExpectedInflightDuration > kMinWarnInflightDuration) {
@@ -3794,7 +3795,7 @@
mSessionParamKeys(sessionParamKeys),
mLatestSessionParams(sessionParamKeys.size()),
mUseHalBufManager(useHalBufManager) {
- mStatusId = statusTracker->addComponent();
+ mStatusId = statusTracker->addComponent("RequestThread");
}
Camera3Device::RequestThread::~RequestThread() {}
@@ -4420,11 +4421,11 @@
std::set<std::string> cameraIdsWithZoom;
/**
* HAL workaround:
- * Insert a dummy trigger ID if a trigger is set but no trigger ID is
+ * Insert a fake trigger ID if a trigger is set but no trigger ID is
*/
- res = addDummyTriggerIds(captureRequest);
+ res = addFakeTriggerIds(captureRequest);
if (res != OK) {
- SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
+ SET_ERR("RequestThread: Unable to insert fake trigger IDs "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
@@ -5342,26 +5343,26 @@
return OK;
}
-status_t Camera3Device::RequestThread::addDummyTriggerIds(
+status_t Camera3Device::RequestThread::addFakeTriggerIds(
const sp<CaptureRequest> &request) {
// Trigger ID 0 had special meaning in the HAL2 spec, so avoid it here
- static const int32_t dummyTriggerId = 1;
+ static const int32_t fakeTriggerId = 1;
status_t res;
CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
- // If AF trigger is active, insert a dummy AF trigger ID if none already
+ // If AF trigger is active, insert a fake AF trigger ID if none already
// exists
camera_metadata_entry afTrigger = metadata.find(ANDROID_CONTROL_AF_TRIGGER);
camera_metadata_entry afId = metadata.find(ANDROID_CONTROL_AF_TRIGGER_ID);
if (afTrigger.count > 0 &&
afTrigger.data.u8[0] != ANDROID_CONTROL_AF_TRIGGER_IDLE &&
afId.count == 0) {
- res = metadata.update(ANDROID_CONTROL_AF_TRIGGER_ID, &dummyTriggerId, 1);
+ res = metadata.update(ANDROID_CONTROL_AF_TRIGGER_ID, &fakeTriggerId, 1);
if (res != OK) return res;
}
- // If AE precapture trigger is active, insert a dummy precapture trigger ID
+ // If AE precapture trigger is active, insert a fake precapture trigger ID
// if none already exists
camera_metadata_entry pcTrigger =
metadata.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
@@ -5370,7 +5371,7 @@
pcTrigger.data.u8[0] != ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE &&
pcId.count == 0) {
res = metadata.update(ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &dummyTriggerId, 1);
+ &fakeTriggerId, 1);
if (res != OK) return res;
}
@@ -5636,7 +5637,7 @@
std::lock_guard<std::mutex> lock(mLock);
mStatusTracker = statusTracker;
- mRequestBufferStatusId = statusTracker->addComponent();
+ mRequestBufferStatusId = statusTracker->addComponent("BufferRequestSM");
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index e10da2c..c579071 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -474,7 +474,7 @@
int mNextStreamId;
bool mNeedConfig;
- int mDummyStreamId;
+ int mFakeStreamId;
// Whether to send state updates upstream
// Pause when doing transparent reconfiguration
@@ -681,15 +681,15 @@
void cancelStreamsConfigurationLocked();
/**
- * Add a dummy stream to the current stream set as a workaround for
+ * Add a fake stream to the current stream set as a workaround for
* not allowing 0 streams in the camera HAL spec.
*/
- status_t addDummyStreamLocked();
+ status_t addFakeStreamLocked();
/**
- * Remove a dummy stream if the current config includes real streams.
+ * Remove a fake stream if the current config includes real streams.
*/
- status_t tryRemoveDummyStreamLocked();
+ status_t tryRemoveFakeStreamLocked();
/**
* Set device into an error state due to some fatal failure, and set an
@@ -874,7 +874,7 @@
// HAL workaround: Make sure a trigger ID always exists if
// a trigger does
- status_t addDummyTriggerIds(const sp<CaptureRequest> &request);
+ status_t addFakeTriggerIds(const sp<CaptureRequest> &request);
// Override rotate_and_crop control if needed; returns true if the current value was changed
bool overrideAutoRotateAndCrop(const sp<CaptureRequest> &request);
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
deleted file mode 100644
index b637160..0000000
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright (C) 2014-2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "Camera3-DummyStream"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-#include "Camera3DummyStream.h"
-
-namespace android {
-
-namespace camera3 {
-
-const String8 Camera3DummyStream::DUMMY_ID;
-
-Camera3DummyStream::Camera3DummyStream(int id) :
- Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
- /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE, DUMMY_ROTATION,
- DUMMY_ID) {
-
-}
-
-Camera3DummyStream::~Camera3DummyStream() {
-
-}
-
-status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *,
- const std::vector<size_t>&) {
- ATRACE_CALL();
- ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", __FUNCTION__, mId);
- return INVALID_OPERATION;
-}
-
-status_t Camera3DummyStream::returnBufferLocked(
- const camera3_stream_buffer &,
- nsecs_t, const std::vector<size_t>&) {
- ATRACE_CALL();
- ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
- return INVALID_OPERATION;
-}
-
-status_t Camera3DummyStream::returnBufferCheckedLocked(
- const camera3_stream_buffer &,
- nsecs_t,
- bool,
- const std::vector<size_t>&,
- /*out*/
- sp<Fence>*) {
- ATRACE_CALL();
- ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
- return INVALID_OPERATION;
-}
-
-void Camera3DummyStream::dump(int fd, const Vector<String16> &args) const {
- (void) args;
- String8 lines;
- lines.appendFormat(" Stream[%d]: Dummy\n", mId);
- write(fd, lines.string(), lines.size());
-
- Camera3IOStreamBase::dump(fd, args);
-}
-
-status_t Camera3DummyStream::setTransform(int) {
- ATRACE_CALL();
- // Do nothing
- return OK;
-}
-
-status_t Camera3DummyStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
- (void) buffer;
- (void) fenceFd;
- // Do nothing
- return OK;
-}
-
-status_t Camera3DummyStream::configureQueueLocked() {
- // Do nothing
- return OK;
-}
-
-status_t Camera3DummyStream::disconnectLocked() {
- mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
- : STATE_CONSTRUCTED;
- return OK;
-}
-
-status_t Camera3DummyStream::getEndpointUsage(uint64_t *usage) const {
- *usage = DUMMY_USAGE;
- return OK;
-}
-
-bool Camera3DummyStream::isVideoStream() const {
- return false;
-}
-
-bool Camera3DummyStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
- return false;
-}
-
-status_t Camera3DummyStream::dropBuffers(bool /*dropping*/) {
- return OK;
-}
-
-const String8& Camera3DummyStream::getPhysicalCameraId() const {
- return DUMMY_ID;
-}
-
-status_t Camera3DummyStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
- ALOGE("%s: Stream %d: Dummy stream doesn't support set consumer surface!",
- __FUNCTION__, mId);
- return INVALID_OPERATION;
-}
-
-status_t Camera3DummyStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
- const std::vector<OutputStreamInfo> &/*outputInfo*/,
- const std::vector<size_t> &/*removedSurfaceIds*/,
- KeyedVector<sp<Surface>, size_t> * /*outputMap*/) {
- ALOGE("%s: this method is not supported!", __FUNCTION__);
- return INVALID_OPERATION;
-}
-
-}; // namespace camera3
-
-}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
new file mode 100644
index 0000000..230512a
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2014-2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Camera3-FakeStream"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include "Camera3FakeStream.h"
+
+namespace android {
+
+namespace camera3 {
+
+const String8 Camera3FakeStream::FAKE_ID;
+
+Camera3FakeStream::Camera3FakeStream(int id) :
+ Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, FAKE_WIDTH, FAKE_HEIGHT,
+ /*maxSize*/0, FAKE_FORMAT, FAKE_DATASPACE, FAKE_ROTATION,
+ FAKE_ID) {
+
+}
+
+Camera3FakeStream::~Camera3FakeStream() {
+
+}
+
+status_t Camera3FakeStream::getBufferLocked(camera3_stream_buffer *,
+ const std::vector<size_t>&) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Fake stream cannot produce buffers!", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3FakeStream::returnBufferLocked(
+ const camera3_stream_buffer &,
+ nsecs_t, const std::vector<size_t>&) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3FakeStream::returnBufferCheckedLocked(
+ const camera3_stream_buffer &,
+ nsecs_t,
+ bool,
+ const std::vector<size_t>&,
+ /*out*/
+ sp<Fence>*) {
+ ATRACE_CALL();
+ ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
+ return INVALID_OPERATION;
+}
+
+void Camera3FakeStream::dump(int fd, const Vector<String16> &args) const {
+ (void) args;
+ String8 lines;
+ lines.appendFormat(" Stream[%d]: Fake\n", mId);
+ write(fd, lines.string(), lines.size());
+
+ Camera3IOStreamBase::dump(fd, args);
+}
+
+status_t Camera3FakeStream::setTransform(int) {
+ ATRACE_CALL();
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3FakeStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
+ (void) buffer;
+ (void) fenceFd;
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3FakeStream::configureQueueLocked() {
+ // Do nothing
+ return OK;
+}
+
+status_t Camera3FakeStream::disconnectLocked() {
+ mState = (mState == STATE_IN_RECONFIG) ? STATE_IN_CONFIG
+ : STATE_CONSTRUCTED;
+ return OK;
+}
+
+status_t Camera3FakeStream::getEndpointUsage(uint64_t *usage) const {
+ *usage = FAKE_USAGE;
+ return OK;
+}
+
+bool Camera3FakeStream::isVideoStream() const {
+ return false;
+}
+
+bool Camera3FakeStream::isConsumerConfigurationDeferred(size_t /*surface_id*/) const {
+ return false;
+}
+
+status_t Camera3FakeStream::dropBuffers(bool /*dropping*/) {
+ return OK;
+}
+
+const String8& Camera3FakeStream::getPhysicalCameraId() const {
+ return FAKE_ID;
+}
+
+status_t Camera3FakeStream::setConsumers(const std::vector<sp<Surface>>& /*consumers*/) {
+ ALOGE("%s: Stream %d: Fake stream doesn't support set consumer surface!",
+ __FUNCTION__, mId);
+ return INVALID_OPERATION;
+}
+
+status_t Camera3FakeStream::updateStream(const std::vector<sp<Surface>> &/*outputSurfaces*/,
+ const std::vector<OutputStreamInfo> &/*outputInfo*/,
+ const std::vector<size_t> &/*removedSurfaceIds*/,
+ KeyedVector<sp<Surface>, size_t> * /*outputMap*/) {
+ ALOGE("%s: this method is not supported!", __FUNCTION__);
+ return INVALID_OPERATION;
+}
+
+}; // namespace camera3
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
similarity index 80%
rename from services/camera/libcameraservice/device3/Camera3DummyStream.h
rename to services/camera/libcameraservice/device3/Camera3FakeStream.h
index 4b67ea5..fbf37e6 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
-#define ANDROID_SERVERS_CAMERA3_DUMMY_STREAM_H
+#ifndef ANDROID_SERVERS_CAMERA3_FAKE_STREAM_H
+#define ANDROID_SERVERS_CAMERA3_FAKE_STREAM_H
#include <utils/RefBase.h>
#include <gui/Surface.h>
@@ -28,23 +28,23 @@
namespace camera3 {
/**
- * A dummy output stream class, to be used as a placeholder when no valid
+ * A fake output stream class, to be used as a placeholder when no valid
* streams are configured by the client.
* This is necessary because camera HAL v3.2 or older disallow configuring
* 0 output streams, while the public camera2 API allows for it.
*/
-class Camera3DummyStream :
+class Camera3FakeStream :
public Camera3IOStreamBase,
public Camera3OutputStreamInterface {
public:
/**
- * Set up a dummy stream; doesn't actually connect to anything, and uses
- * a default dummy format and size.
+ * Set up a fake stream; doesn't actually connect to anything, and uses
+ * a default fake format and size.
*/
- explicit Camera3DummyStream(int id);
+ explicit Camera3FakeStream(int id);
- virtual ~Camera3DummyStream();
+ virtual ~Camera3FakeStream();
/**
* Camera3Stream interface
@@ -115,15 +115,15 @@
private:
- // Default dummy parameters; 320x240 is a required size for all devices,
+ // Default fake parameters; 320x240 is a required size for all devices,
// otherwise act like a SurfaceView would.
- static const int DUMMY_WIDTH = 320;
- static const int DUMMY_HEIGHT = 240;
- static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- static const android_dataspace DUMMY_DATASPACE = HAL_DATASPACE_UNKNOWN;
- static const camera3_stream_rotation_t DUMMY_ROTATION = CAMERA3_STREAM_ROTATION_0;
- static const uint64_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
- static const String8 DUMMY_ID;
+ static const int FAKE_WIDTH = 320;
+ static const int FAKE_HEIGHT = 240;
+ static const int FAKE_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ static const android_dataspace FAKE_DATASPACE = HAL_DATASPACE_UNKNOWN;
+ static const camera3_stream_rotation_t FAKE_ROTATION = CAMERA3_STREAM_ROTATION_0;
+ static const uint64_t FAKE_USAGE = GRALLOC_USAGE_HW_COMPOSER;
+ static const String8 FAKE_ID;
/**
* Internal Camera3Stream interface
@@ -138,7 +138,7 @@
virtual status_t getEndpointUsage(uint64_t *usage) const;
-}; // class Camera3DummyStream
+}; // class Camera3FakeStream
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index cb59a76..ebd33e9 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -27,13 +27,13 @@
namespace camera3 {
-const String8 Camera3InputStream::DUMMY_ID;
+const String8 Camera3InputStream::FAKE_ID;
Camera3InputStream::Camera3InputStream(int id,
uint32_t width, uint32_t height, int format) :
Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height, /*maxSize*/0,
format, HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0,
- DUMMY_ID) {
+ FAKE_ID) {
if (format == HAL_PIXEL_FORMAT_BLOB) {
ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 97a627a..22697b7 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -53,7 +53,7 @@
sp<IGraphicBufferProducer> mProducer;
Vector<BufferItem> mBuffersInFlight;
- static const String8 DUMMY_ID;
+ static const String8 FAKE_ID;
/**
* Camera3IOStreamBase
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 01ca006..7b812f2 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -114,7 +114,7 @@
mState = STATE_ERROR;
}
- // Sanity check for the consumer usage flag.
+ // Validation check for the consumer usage flag.
if ((consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) == 0 &&
(consumerUsage & GraphicBuffer::USAGE_HW_COMPOSER) == 0) {
ALOGE("%s: Deferred consumer usage flag is illegal %" PRIu64 "!",
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 08cde5d..889ce86 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -416,7 +416,7 @@
ATRACE_ASYNC_END("frame capture", frameNumber);
- // Sanity check - if sensor timestamp matches shutter timestamp in the
+ // Validation check - if sensor timestamp matches shutter timestamp in the
// case of request having callback.
if (request.hasCallback && request.requestStatus == OK &&
sensorTimestamp != shutterTimestamp) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 20f6168..f208561 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -330,7 +330,8 @@
// Register for idle tracking
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0 && mStatusId == StatusTracker::NO_STATUS_ID) {
- mStatusId = statusTracker->addComponent();
+ std::string name = std::string("Stream ") + std::to_string(mId);
+ mStatusId = statusTracker->addComponent(name.c_str());
}
// Check if the stream configuration is unchanged, and skip reallocation if
diff --git a/services/camera/libcameraservice/device3/DistortionMapper.cpp b/services/camera/libcameraservice/device3/DistortionMapper.cpp
index 8132225..2f388f2 100644
--- a/services/camera/libcameraservice/device3/DistortionMapper.cpp
+++ b/services/camera/libcameraservice/device3/DistortionMapper.cpp
@@ -485,7 +485,7 @@
float det = b * b - 4 * a * c;
if (det < 0) {
- // Sanity check - should not happen if pt is within the quad
+ // Validation check - should not happen if pt is within the quad
ALOGE("Bad determinant! a: %f, b: %f, c: %f, det: %f", a,b,c,det);
return -1;
}
diff --git a/services/camera/libcameraservice/device3/StatusTracker.cpp b/services/camera/libcameraservice/device3/StatusTracker.cpp
index 723b5c2..ea1f2c1 100644
--- a/services/camera/libcameraservice/device3/StatusTracker.cpp
+++ b/services/camera/libcameraservice/device3/StatusTracker.cpp
@@ -40,7 +40,7 @@
StatusTracker::~StatusTracker() {
}
-int StatusTracker::addComponent() {
+int StatusTracker::addComponent(std::string componentName) {
int id;
ssize_t err;
{
@@ -49,8 +49,12 @@
ALOGV("%s: Adding new component %d", __FUNCTION__, id);
err = mStates.add(id, IDLE);
- ALOGE_IF(err < 0, "%s: Can't add new component %d: %s (%zd)",
- __FUNCTION__, id, strerror(-err), err);
+ if (componentName.empty()) {
+ componentName = std::to_string(id);
+ }
+ mComponentNames.add(id, componentName);
+ ALOGE_IF(err < 0, "%s: Can't add new component %d (%s): %s (%zd)",
+ __FUNCTION__, id, componentName.c_str(), strerror(-err), err);
}
if (err >= 0) {
@@ -68,6 +72,7 @@
Mutex::Autolock l(mLock);
ALOGV("%s: Removing component %d", __FUNCTION__, id);
idx = mStates.removeItem(id);
+ mComponentNames.removeItem(id);
}
if (idx >= 0) {
@@ -80,6 +85,20 @@
}
+void StatusTracker::dumpActiveComponents() {
+ Mutex::Autolock l(mLock);
+ if (mDeviceState == IDLE) {
+ ALOGI("%s: all components are IDLE", __FUNCTION__);
+ return;
+ }
+ for (size_t i = 0; i < mStates.size(); i++) {
+ if (mStates.valueAt(i) == ACTIVE) {
+ ALOGI("%s: component %d (%s) is active", __FUNCTION__, mStates.keyAt(i),
+ mComponentNames.valueAt(i).c_str());
+ }
+ }
+}
+
void StatusTracker::markComponentIdle(int id, const sp<Fence>& componentFence) {
markComponent(id, IDLE, componentFence);
}
diff --git a/services/camera/libcameraservice/device3/StatusTracker.h b/services/camera/libcameraservice/device3/StatusTracker.h
index 3a1d85c..3741cce 100644
--- a/services/camera/libcameraservice/device3/StatusTracker.h
+++ b/services/camera/libcameraservice/device3/StatusTracker.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_SERVERS_CAMERA3_STATUSTRACKER_H
#define ANDROID_SERVERS_CAMERA3_STATUSTRACKER_H
+#include <string>
#include <utils/Condition.h>
#include <utils/Errors.h>
#include <utils/List.h>
@@ -54,7 +55,7 @@
// Add a component to track; returns non-negative unique ID for the new
// component on success, negative error code on failure.
// New components start in the idle state.
- int addComponent();
+ int addComponent(std::string componentName);
// Remove existing component from idle tracking. Ignores unknown IDs
void removeComponent(int id);
@@ -68,6 +69,8 @@
// Set the state of a tracked component to be active. Ignores unknown IDs.
void markComponentActive(int id);
+ void dumpActiveComponents();
+
virtual void requestExit();
protected:
@@ -105,6 +108,7 @@
// Current component states
KeyedVector<int, ComponentState> mStates;
+ KeyedVector<int, std::string> mComponentNames;
// Merged fence for all processed state changes
sp<Fence> mIdleFence;
// Current overall device state
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 888671c..ba68a63 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -14,20 +14,493 @@
* limitations under the License.
*/
#include "SessionConfigurationUtils.h"
-#include "../api2/CameraDeviceClient.h"
+#include "../api2/DepthCompositeStream.h"
+#include "../api2/HeicCompositeStream.h"
+#include "common/CameraDeviceBase.h"
+#include "../CameraService.h"
+#include "device3/Camera3Device.h"
+#include "device3/Camera3OutputStream.h"
+
+// Convenience methods for constructing binder::Status objects for error returns
+
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+ __VA_ARGS__))
+
+using android::camera3::OutputStreamInfo;
+using android::camera3::OutputStreamInfo;
+using android::hardware::camera2::ICameraDeviceUser;
namespace android {
+int64_t SessionConfigurationUtils::euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1) {
+ int64_t d0 = x0 - x1;
+ int64_t d1 = y0 - y1;
+ return d0 * d0 + d1 * d1;
+}
+
+bool SessionConfigurationUtils::roundBufferDimensionNearest(int32_t width, int32_t height,
+ int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
+ /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
+
+ camera_metadata_ro_entry streamConfigs =
+ (dataSpace == HAL_DATASPACE_DEPTH) ?
+ info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
+ (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_HEIF)) ?
+ info.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS) :
+ info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+
+ int32_t bestWidth = -1;
+ int32_t bestHeight = -1;
+
+ // Iterate through listed stream configurations and find the one with the smallest euclidean
+ // distance from the given dimensions for the given format.
+ for (size_t i = 0; i < streamConfigs.count; i += 4) {
+ int32_t fmt = streamConfigs.data.i32[i];
+ int32_t w = streamConfigs.data.i32[i + 1];
+ int32_t h = streamConfigs.data.i32[i + 2];
+
+ // Ignore input/output type for now
+ if (fmt == format) {
+ if (w == width && h == height) {
+ bestWidth = width;
+ bestHeight = height;
+ break;
+ } else if (w <= ROUNDING_WIDTH_CAP && (bestWidth == -1 ||
+ SessionConfigurationUtils::euclidDistSquare(w, h, width, height) <
+ SessionConfigurationUtils::euclidDistSquare(bestWidth, bestHeight, width,
+ height))) {
+ bestWidth = w;
+ bestHeight = h;
+ }
+ }
+ }
+
+ if (bestWidth == -1) {
+ // Return false if no configurations for this format were listed
+ return false;
+ }
+
+ // Set the outputs to the closet width/height
+ if (outWidth != NULL) {
+ *outWidth = bestWidth;
+ }
+ if (outHeight != NULL) {
+ *outHeight = bestHeight;
+ }
+
+ // Return true if at least one configuration for this format was listed
+ return true;
+}
+
+bool SessionConfigurationUtils::isPublicFormat(int32_t format)
+{
+ switch(format) {
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_RGBX_8888:
+ case HAL_PIXEL_FORMAT_RGB_888:
+ case HAL_PIXEL_FORMAT_RGB_565:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ case HAL_PIXEL_FORMAT_YV12:
+ case HAL_PIXEL_FORMAT_Y8:
+ case HAL_PIXEL_FORMAT_Y16:
+ case HAL_PIXEL_FORMAT_RAW16:
+ case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW12:
+ case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+ case HAL_PIXEL_FORMAT_BLOB:
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ case HAL_PIXEL_FORMAT_YCbCr_420_888:
+ case HAL_PIXEL_FORMAT_YCbCr_422_SP:
+ case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+ case HAL_PIXEL_FORMAT_YCbCr_422_I:
+ return true;
+ default:
+ return false;
+ }
+}
+
+binder::Status SessionConfigurationUtils::createSurfaceFromGbp(
+ OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8 &cameraId, const CameraMetadata &physicalCameraMetadata) {
+
+ // bufferProducer must be non-null
+ if (gbp == nullptr) {
+ String8 msg = String8::format("Camera %s: Surface is NULL", cameraId.string());
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ // HACK b/10949105
+ // Query consumer usage bits to set async operation mode for
+ // GLConsumer using controlledByApp parameter.
+ bool useAsync = false;
+ uint64_t consumerUsage = 0;
+ status_t err;
+ if ((err = gbp->getConsumerUsage(&consumerUsage)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface consumer usage: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
+ ALOGW("%s: Camera %s with consumer usage flag: %" PRIu64 ": Forcing asynchronous mode for"
+ "stream", __FUNCTION__, cameraId.string(), consumerUsage);
+ useAsync = true;
+ }
+
+ uint64_t disallowedFlags = GraphicBuffer::USAGE_HW_VIDEO_ENCODER |
+ GRALLOC_USAGE_RENDERSCRIPT;
+ uint64_t allowedFlags = GraphicBuffer::USAGE_SW_READ_MASK |
+ GraphicBuffer::USAGE_HW_TEXTURE |
+ GraphicBuffer::USAGE_HW_COMPOSER;
+ bool flexibleConsumer = (consumerUsage & disallowedFlags) == 0 &&
+ (consumerUsage & allowedFlags) != 0;
+
+ surface = new Surface(gbp, useAsync);
+ ANativeWindow *anw = surface.get();
+
+ int width, height, format;
+ android_dataspace dataSpace;
+ if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface width: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface height: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface format: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+ reinterpret_cast<int*>(&dataSpace))) != OK) {
+ String8 msg = String8::format("Camera %s: Failed to query Surface dataspace: %s (%d)",
+ cameraId.string(), strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ }
+
+ // FIXME: remove this override since the default format should be
+ // IMPLEMENTATION_DEFINED. b/9487482 & b/35317944
+ if ((format >= HAL_PIXEL_FORMAT_RGBA_8888 && format <= HAL_PIXEL_FORMAT_BGRA_8888) &&
+ ((consumerUsage & GRALLOC_USAGE_HW_MASK) &&
+ ((consumerUsage & GRALLOC_USAGE_SW_READ_MASK) == 0))) {
+ ALOGW("%s: Camera %s: Overriding format %#x to IMPLEMENTATION_DEFINED",
+ __FUNCTION__, cameraId.string(), format);
+ format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ }
+ // Round dimensions to the nearest dimensions available for this format
+ if (flexibleConsumer && isPublicFormat(format) &&
+ !SessionConfigurationUtils::roundBufferDimensionNearest(width, height,
+ format, dataSpace, physicalCameraMetadata, /*out*/&width, /*out*/&height)) {
+ String8 msg = String8::format("Camera %s: No supported stream configurations with "
+ "format %#x defined, failed to create output stream",
+ cameraId.string(), format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (!isStreamInfoValid) {
+ streamInfo.width = width;
+ streamInfo.height = height;
+ streamInfo.format = format;
+ streamInfo.dataSpace = dataSpace;
+ streamInfo.consumerUsage = consumerUsage;
+ return binder::Status::ok();
+ }
+ if (width != streamInfo.width) {
+ String8 msg = String8::format("Camera %s:Surface width doesn't match: %d vs %d",
+ cameraId.string(), width, streamInfo.width);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (height != streamInfo.height) {
+ String8 msg = String8::format("Camera %s:Surface height doesn't match: %d vs %d",
+ cameraId.string(), height, streamInfo.height);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != streamInfo.format) {
+ String8 msg = String8::format("Camera %s:Surface format doesn't match: %d vs %d",
+ cameraId.string(), format, streamInfo.format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+ if (dataSpace != streamInfo.dataSpace) {
+ String8 msg = String8::format("Camera %s:Surface dataSpace doesn't match: %d vs %d",
+ cameraId.string(), dataSpace, streamInfo.dataSpace);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ //At the native side, there isn't a way to check whether 2 surfaces come from the same
+ //surface class type. Use usage flag to approximate the comparison.
+ if (consumerUsage != streamInfo.consumerUsage) {
+ String8 msg = String8::format(
+ "Camera %s:Surface usage flag doesn't match %" PRIu64 " vs %" PRIu64 "",
+ cameraId.string(), consumerUsage, streamInfo.consumerUsage);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ }
+ return binder::Status::ok();
+}
+
+
+void SessionConfigurationUtils::mapStreamInfo(const OutputStreamInfo &streamInfo,
+ camera3_stream_rotation_t rotation, String8 physicalId,
+ hardware::camera::device::V3_4::Stream *stream /*out*/) {
+ if (stream == nullptr) {
+ return;
+ }
+
+ stream->v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
+ stream->v3_2.width = streamInfo.width;
+ stream->v3_2.height = streamInfo.height;
+ stream->v3_2.format = Camera3Device::mapToPixelFormat(streamInfo.format);
+ auto u = streamInfo.consumerUsage;
+ camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
+ stream->v3_2.usage = Camera3Device::mapToConsumerUsage(u);
+ stream->v3_2.dataSpace = Camera3Device::mapToHidlDataspace(streamInfo.dataSpace);
+ stream->v3_2.rotation = Camera3Device::mapToStreamRotation(rotation);
+ stream->v3_2.id = -1; // Invalid stream id
+ stream->physicalCameraId = std::string(physicalId.string());
+ stream->bufferSize = 0;
+}
+
+binder::Status SessionConfigurationUtils::checkPhysicalCameraId(
+ const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+ const String8 &logicalCameraId) {
+ if (physicalCameraId.size() == 0) {
+ return binder::Status::ok();
+ }
+ if (std::find(physicalCameraIds.begin(), physicalCameraIds.end(),
+ physicalCameraId.string()) == physicalCameraIds.end()) {
+ String8 msg = String8::format("Camera %s: Camera doesn't support physicalCameraId %s.",
+ logicalCameraId.string(), physicalCameraId.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ return binder::Status::ok();
+}
+
+binder::Status SessionConfigurationUtils::checkSurfaceType(size_t numBufferProducers,
+ bool deferredConsumer, int surfaceType) {
+ if (numBufferProducers > MAX_SURFACES_PER_STREAM) {
+ ALOGE("%s: GraphicBufferProducer count %zu for stream exceeds limit of %d",
+ __FUNCTION__, numBufferProducers, MAX_SURFACES_PER_STREAM);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Surface count is too high");
+ } else if ((numBufferProducers == 0) && (!deferredConsumer)) {
+ ALOGE("%s: Number of consumers cannot be smaller than 1", __FUNCTION__);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "No valid consumers.");
+ }
+
+ bool validSurfaceType = ((surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) ||
+ (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_TEXTURE));
+
+ if (deferredConsumer && !validSurfaceType) {
+ ALOGE("%s: Target surface has invalid surfaceType = %d.", __FUNCTION__, surfaceType);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
+ }
+
+ return binder::Status::ok();
+}
+
+binder::Status SessionConfigurationUtils::checkOperatingMode(int operatingMode,
+ const CameraMetadata &staticInfo, const String8 &cameraId) {
+ if (operatingMode < 0) {
+ String8 msg = String8::format(
+ "Camera %s: Invalid operating mode %d requested", cameraId.string(), operatingMode);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+
+ bool isConstrainedHighSpeed = (operatingMode == ICameraDeviceUser::CONSTRAINED_HIGH_SPEED_MODE);
+ if (isConstrainedHighSpeed) {
+ camera_metadata_ro_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+ bool isConstrainedHighSpeedSupported = false;
+ for(size_t i = 0; i < entry.count; ++i) {
+ uint8_t capability = entry.data.u8[i];
+ if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
+ isConstrainedHighSpeedSupported = true;
+ break;
+ }
+ }
+ if (!isConstrainedHighSpeedSupported) {
+ String8 msg = String8::format(
+ "Camera %s: Try to create a constrained high speed configuration on a device"
+ " that doesn't support it.", cameraId.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+ }
+
+ return binder::Status::ok();
+}
+
binder::Status
SessionConfigurationUtils::convertToHALStreamCombination(
const SessionConfiguration& sessionConfiguration,
const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration, bool *earlyExit) {
- // TODO: http://b/148329298 Move the other dependencies from
- // CameraDeviceClient into SessionConfigurationUtils.
- return CameraDeviceClient::convertToHALStreamCombination(sessionConfiguration, logicalCameraId,
- deviceInfo, getMetadata, physicalCameraIds, streamConfiguration, earlyExit);
+
+ auto operatingMode = sessionConfiguration.getOperatingMode();
+ binder::Status res = checkOperatingMode(operatingMode, deviceInfo, logicalCameraId);
+ if (!res.isOk()) {
+ return res;
+ }
+
+ if (earlyExit == nullptr) {
+ String8 msg("earlyExit nullptr");
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ *earlyExit = false;
+ auto ret = Camera3Device::mapToStreamConfigurationMode(
+ static_cast<camera3_stream_configuration_mode_t> (operatingMode),
+ /*out*/ &streamConfiguration.operationMode);
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed mapping operating mode %d requested: %s (%d)",
+ logicalCameraId.string(), operatingMode, strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
+ }
+
+ bool isInputValid = (sessionConfiguration.getInputWidth() > 0) &&
+ (sessionConfiguration.getInputHeight() > 0) &&
+ (sessionConfiguration.getInputFormat() > 0);
+ auto outputConfigs = sessionConfiguration.getOutputConfigurations();
+ size_t streamCount = outputConfigs.size();
+ streamCount = isInputValid ? streamCount + 1 : streamCount;
+ streamConfiguration.streams.resize(streamCount);
+ size_t streamIdx = 0;
+ if (isInputValid) {
+ streamConfiguration.streams[streamIdx++] = {{/*streamId*/0,
+ hardware::camera::device::V3_2::StreamType::INPUT,
+ static_cast<uint32_t> (sessionConfiguration.getInputWidth()),
+ static_cast<uint32_t> (sessionConfiguration.getInputHeight()),
+ Camera3Device::mapToPixelFormat(sessionConfiguration.getInputFormat()),
+ /*usage*/ 0, HAL_DATASPACE_UNKNOWN,
+ hardware::camera::device::V3_2::StreamRotation::ROTATION_0},
+ /*physicalId*/ nullptr, /*bufferSize*/0};
+ }
+
+ for (const auto &it : outputConfigs) {
+ const std::vector<sp<IGraphicBufferProducer>>& bufferProducers =
+ it.getGraphicBufferProducers();
+ bool deferredConsumer = it.isDeferred();
+ String8 physicalCameraId = String8(it.getPhysicalCameraId());
+ size_t numBufferProducers = bufferProducers.size();
+ bool isStreamInfoValid = false;
+ OutputStreamInfo streamInfo;
+
+ res = checkSurfaceType(numBufferProducers, deferredConsumer, it.getSurfaceType());
+ if (!res.isOk()) {
+ return res;
+ }
+ res = checkPhysicalCameraId(physicalCameraIds, physicalCameraId,
+ logicalCameraId);
+ if (!res.isOk()) {
+ return res;
+ }
+
+ if (deferredConsumer) {
+ streamInfo.width = it.getWidth();
+ streamInfo.height = it.getHeight();
+ streamInfo.format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+ streamInfo.dataSpace = android_dataspace_t::HAL_DATASPACE_UNKNOWN;
+ auto surfaceType = it.getSurfaceType();
+ streamInfo.consumerUsage = GraphicBuffer::USAGE_HW_TEXTURE;
+ if (surfaceType == OutputConfiguration::SURFACE_TYPE_SURFACE_VIEW) {
+ streamInfo.consumerUsage |= GraphicBuffer::USAGE_HW_COMPOSER;
+ }
+ mapStreamInfo(streamInfo, CAMERA3_STREAM_ROTATION_0, physicalCameraId,
+ &streamConfiguration.streams[streamIdx++]);
+ isStreamInfoValid = true;
+
+ if (numBufferProducers == 0) {
+ continue;
+ }
+ }
+
+ for (auto& bufferProducer : bufferProducers) {
+ sp<Surface> surface;
+ const CameraMetadata &physicalDeviceInfo = getMetadata(physicalCameraId);
+ res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer,
+ logicalCameraId,
+ physicalCameraId.size() > 0 ? physicalDeviceInfo : deviceInfo );
+
+ if (!res.isOk())
+ return res;
+
+ if (!isStreamInfoValid) {
+ bool isDepthCompositeStream =
+ camera3::DepthCompositeStream::isDepthCompositeStream(surface);
+ bool isHeicCompositeStream =
+ camera3::HeicCompositeStream::isHeicCompositeStream(surface);
+ if (isDepthCompositeStream || isHeicCompositeStream) {
+ // We need to take in to account that composite streams can have
+ // additional internal camera streams.
+ std::vector<OutputStreamInfo> compositeStreams;
+ if (isDepthCompositeStream) {
+ ret = camera3::DepthCompositeStream::getCompositeStreamInfo(streamInfo,
+ deviceInfo, &compositeStreams);
+ } else {
+ ret = camera3::HeicCompositeStream::getCompositeStreamInfo(streamInfo,
+ deviceInfo, &compositeStreams);
+ }
+ if (ret != OK) {
+ String8 msg = String8::format(
+ "Camera %s: Failed adding composite streams: %s (%d)",
+ logicalCameraId.string(), strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ if (compositeStreams.size() == 0) {
+ // No internal streams means composite stream not
+ // supported.
+ *earlyExit = true;
+ return binder::Status::ok();
+ } else if (compositeStreams.size() > 1) {
+ streamCount += compositeStreams.size() - 1;
+ streamConfiguration.streams.resize(streamCount);
+ }
+
+ for (const auto& compositeStream : compositeStreams) {
+ mapStreamInfo(compositeStream,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ } else {
+ mapStreamInfo(streamInfo,
+ static_cast<camera3_stream_rotation_t> (it.getRotation()),
+ physicalCameraId, &streamConfiguration.streams[streamIdx++]);
+ }
+ isStreamInfoValid = true;
+ }
+ }
+ }
+ return binder::Status::ok();
+
}
}// namespace android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index cfb9f17..6ce2cd7 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -23,6 +23,9 @@
#include <camera/camera2/SubmitInfo.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
+#include <hardware/camera3.h>
+#include <device3/Camera3StreamInterface.h>
+
#include <stdint.h>
namespace android {
@@ -31,6 +34,41 @@
class SessionConfigurationUtils {
public:
+
+ static int64_t euclidDistSquare(int32_t x0, int32_t y0, int32_t x1, int32_t y1);
+
+ // Find the closest dimensions for a given format in available stream configurations with
+ // a width <= ROUNDING_WIDTH_CAP
+ static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
+ android_dataspace dataSpace, const CameraMetadata& info,
+ /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
+
+ //check if format is not custom format
+ static bool isPublicFormat(int32_t format);
+
+ // Create a Surface from an IGraphicBufferProducer. Returns error if
+ // IGraphicBufferProducer's property doesn't match with streamInfo
+ static binder::Status createSurfaceFromGbp(
+ camera3::OutputStreamInfo& streamInfo, bool isStreamInfoValid,
+ sp<Surface>& surface, const sp<IGraphicBufferProducer>& gbp,
+ const String8 &cameraId, const CameraMetadata &physicalCameraMetadata);
+
+ static void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
+ camera3_stream_rotation_t rotation, String8 physicalId,
+ hardware::camera::device::V3_4::Stream *stream /*out*/);
+
+ // Check that the physicalCameraId passed in is spported by the camera
+ // device.
+ static binder::Status checkPhysicalCameraId(
+ const std::vector<std::string> &physicalCameraIds, const String8 &physicalCameraId,
+ const String8 &logicalCameraId);
+
+ static binder::Status checkSurfaceType(size_t numBufferProducers,
+ bool deferredConsumer, int surfaceType);
+
+ static binder::Status checkOperatingMode(int operatingMode,
+ const CameraMetadata &staticInfo, const String8 &cameraId);
+
// utility function to convert AIDL SessionConfiguration to HIDL
// streamConfiguration. Also checks for validity of SessionConfiguration and
// returns a non-ok binder::Status if the passed in session configuration
@@ -41,6 +79,10 @@
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
hardware::camera::device::V3_4::StreamConfiguration &streamConfiguration,
bool *earlyExit);
+
+ static const int32_t MAX_SURFACES_PER_STREAM = 4;
+
+ static const int32_t ROUNDING_WIDTH_CAP = 1920;
};
} // android
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 05bbbc7..dc0773b 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -19,8 +19,6 @@
"libmedia_headers",
],
- init_rc: ["mediaswcodec.rc"],
-
cflags: [
"-Werror",
"-Wall",
diff --git a/services/mediacodec/mediaswcodec.rc b/services/mediacodec/mediaswcodec.rc
deleted file mode 100644
index 3549666..0000000
--- a/services/mediacodec/mediaswcodec.rc
+++ /dev/null
@@ -1,7 +0,0 @@
-service media.swcodec /system/bin/mediaswcodec
- class main
- user mediacodec
- group camera drmrpc mediadrm
- updatable
- ioprio rt 4
- writepid /dev/cpuset/foreground/tasks
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index aa44447..5d044bb 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -76,6 +76,7 @@
{"AUDIO_DEVICE_IN_ECHO_REFERENCE", 1LL << 27},
{"AUDIO_DEVICE_IN_DEFAULT", 1LL << 28},
// R values above.
+ {"AUDIO_DEVICE_IN_BLE_HEADSET", 1LL << 29},
};
return map;
}
@@ -121,6 +122,8 @@
{"AUDIO_DEVICE_OUT_ECHO_CANCELLER", 1LL << 29},
{"AUDIO_DEVICE_OUT_DEFAULT", 1LL << 30},
// R values above.
+ {"AUDIO_DEVICE_OUT_BLE_HEADSET", 1LL << 31},
+ {"AUDIO_DEVICE_OUT_BLE_SPAEKER", 1LL << 32},
};
return map;
}
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 48e766e..bf6e428 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -468,6 +468,7 @@
"codec",
"extractor",
"mediadrm",
+ "mediaparser",
"nuplayer",
}) {
if (key == allowedKey) {
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 0d53c5e..cdf5a4e 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -1,8 +1,53 @@
+filegroup {
+ name: "resourcemanager_aidl",
+ srcs: [
+ "aidl/android/media/IResourceManagerClient.aidl",
+ "aidl/android/media/IResourceManagerService.aidl",
+ "aidl/android/media/MediaResourceType.aidl",
+ "aidl/android/media/MediaResourceSubType.aidl",
+ "aidl/android/media/MediaResourceParcel.aidl",
+ "aidl/android/media/MediaResourcePolicyParcel.aidl",
+ ],
+ path: "aidl",
+}
+
+filegroup {
+ name: "resourceobserver_aidl",
+ srcs: [
+ "aidl/android/media/IResourceObserver.aidl",
+ "aidl/android/media/IResourceObserverService.aidl",
+ "aidl/android/media/MediaObservableEvent.aidl",
+ "aidl/android/media/MediaObservableFilter.aidl",
+ "aidl/android/media/MediaObservableType.aidl",
+ "aidl/android/media/MediaObservableParcel.aidl",
+ ],
+ path: "aidl",
+}
+
+aidl_interface {
+ name: "resourcemanager_aidl_interface",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ ":resourcemanager_aidl",
+ ],
+}
+
+aidl_interface {
+ name: "resourceobserver_aidl_interface",
+ unstable: true,
+ local_include_dir: "aidl",
+ srcs: [
+ ":resourceobserver_aidl",
+ ],
+}
+
cc_library {
name: "libresourcemanagerservice",
srcs: [
"ResourceManagerService.cpp",
+ "ResourceObserverService.cpp",
"ServiceLog.cpp",
],
@@ -15,6 +60,10 @@
"liblog",
],
+ static_libs: [
+ "resourceobserver_aidl_interface-ndk_platform",
+ ],
+
include_dirs: ["frameworks/av/include"],
cflags: [
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index db06a36..7bb606e 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -36,18 +36,54 @@
#include <unistd.h>
#include "ResourceManagerService.h"
+#include "ResourceObserverService.h"
#include "ServiceLog.h"
namespace android {
+//static
+std::mutex ResourceManagerService::sCookieLock;
+//static
+uintptr_t ResourceManagerService::sCookieCounter = 0;
+//static
+std::map<uintptr_t, sp<DeathNotifier> > ResourceManagerService::sCookieToDeathNotifierMap;
+
+class DeathNotifier : public RefBase {
+public:
+ DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
+ int pid, int64_t clientId);
+
+ virtual ~DeathNotifier() {}
+
+ // Implement death recipient
+ static void BinderDiedCallback(void* cookie);
+ virtual void binderDied();
+
+protected:
+ std::weak_ptr<ResourceManagerService> mService;
+ int mPid;
+ int64_t mClientId;
+};
+
DeathNotifier::DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
int pid, int64_t clientId)
: mService(service), mPid(pid), mClientId(clientId) {}
//static
void DeathNotifier::BinderDiedCallback(void* cookie) {
- auto thiz = static_cast<DeathNotifier*>(cookie);
- thiz->binderDied();
+ sp<DeathNotifier> notifier;
+ {
+ std::scoped_lock lock{ResourceManagerService::sCookieLock};
+ auto it = ResourceManagerService::sCookieToDeathNotifierMap.find(
+ reinterpret_cast<uintptr_t>(cookie));
+ if (it == ResourceManagerService::sCookieToDeathNotifierMap.end()) {
+ return;
+ }
+ notifier = it->second;
+ }
+ if (notifier.get() != nullptr) {
+ notifier->binderDied();
+ }
}
void DeathNotifier::binderDied() {
@@ -61,7 +97,27 @@
service->overridePid(mPid, -1);
// thiz is freed in the call below, so it must be last call referring thiz
service->removeResource(mPid, mClientId, false);
+}
+class OverrideProcessInfoDeathNotifier : public DeathNotifier {
+public:
+ OverrideProcessInfoDeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
+ int pid) : DeathNotifier(service, pid, 0) {}
+
+ virtual ~OverrideProcessInfoDeathNotifier() {}
+
+ virtual void binderDied();
+};
+
+void OverrideProcessInfoDeathNotifier::binderDied() {
+ // Don't check for pid validity since we know it's already dead.
+ std::shared_ptr<ResourceManagerService> service = mService.lock();
+ if (service == nullptr) {
+ ALOGW("ResourceManagerService is dead as well.");
+ return;
+ }
+
+ service->removeProcessInfoOverride(mPid);
}
template <typename T>
@@ -116,6 +172,7 @@
info.uid = uid;
info.clientId = clientId;
info.client = client;
+ info.cookie = 0;
info.pendingRemoval = false;
index = infos.add(clientId, info);
@@ -267,6 +324,13 @@
if (status != STATUS_OK) {
return;
}
+
+ std::shared_ptr<ResourceObserverService> observerService =
+ ResourceObserverService::instantiate();
+
+ if (observerService != nullptr) {
+ service->setObserverService(observerService);
+ }
// TODO: mediaserver main() is already starting the thread pool,
// move this to mediaserver main() when other services in mediaserver
// are converted to ndk-platform aidl.
@@ -275,6 +339,11 @@
ResourceManagerService::~ResourceManagerService() {}
+void ResourceManagerService::setObserverService(
+ const std::shared_ptr<ResourceObserverService>& observerService) {
+ mObserverService = observerService;
+}
+
Status ResourceManagerService::config(const std::vector<MediaResourcePolicyParcel>& policies) {
String8 log = String8::format("config(%s)", getString(policies).string());
mServiceLog->add(log);
@@ -358,6 +427,7 @@
}
ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
+ ResourceList resourceAdded;
for (size_t i = 0; i < resources.size(); ++i) {
const auto &res = resources[i];
@@ -379,11 +449,20 @@
} else {
mergeResources(info.resources[resType], res);
}
+ // Add it to the list of added resources for observers.
+ auto it = resourceAdded.find(resType);
+ if (it == resourceAdded.end()) {
+ resourceAdded[resType] = res;
+ } else {
+ mergeResources(it->second, res);
+ }
}
- if (info.deathNotifier == nullptr && client != nullptr) {
- info.deathNotifier = new DeathNotifier(ref<ResourceManagerService>(), pid, clientId);
- AIBinder_linkToDeath(client->asBinder().get(),
- mDeathRecipient.get(), info.deathNotifier.get());
+ if (info.cookie == 0 && client != nullptr) {
+ info.cookie = addCookieAndLink_l(client->asBinder(),
+ new DeathNotifier(ref<ResourceManagerService>(), pid, clientId));
+ }
+ if (mObserverService != nullptr && !resourceAdded.empty()) {
+ mObserverService->onResourceAdded(uid, pid, resourceAdded);
}
notifyResourceGranted(pid, resources);
return Status::ok();
@@ -415,7 +494,7 @@
}
ResourceInfo &info = infos.editValueAt(index);
-
+ ResourceList resourceRemoved;
for (size_t i = 0; i < resources.size(); ++i) {
const auto &res = resources[i];
const auto resType = std::tuple(res.type, res.subType, res.id);
@@ -427,14 +506,27 @@
// ignore if we don't have it
if (info.resources.find(resType) != info.resources.end()) {
MediaResourceParcel &resource = info.resources[resType];
+ MediaResourceParcel actualRemoved = res;
if (resource.value > res.value) {
resource.value -= res.value;
} else {
onLastRemoved(res, info);
info.resources.erase(resType);
+ actualRemoved.value = resource.value;
+ }
+
+ // Add it to the list of removed resources for observers.
+ auto it = resourceRemoved.find(resType);
+ if (it == resourceRemoved.end()) {
+ resourceRemoved[resType] = actualRemoved;
+ } else {
+ mergeResources(it->second, actualRemoved);
}
}
}
+ if (mObserverService != nullptr && !resourceRemoved.empty()) {
+ mObserverService->onResourceRemoved(info.uid, pid, resourceRemoved);
+ }
return Status::ok();
}
@@ -472,8 +564,11 @@
onLastRemoved(it->second, info);
}
- AIBinder_unlinkToDeath(info.client->asBinder().get(),
- mDeathRecipient.get(), info.deathNotifier.get());
+ removeCookieAndUnlink_l(info.client->asBinder(), info.cookie);
+
+ if (mObserverService != nullptr && !info.resources.empty()) {
+ mObserverService->onResourceRemoved(info.uid, pid, info.resources);
+ }
infos.removeItemsAt(index);
return Status::ok();
@@ -656,6 +751,83 @@
return Status::ok();
}
+Status ResourceManagerService::overrideProcessInfo(
+ const std::shared_ptr<IResourceManagerClient>& client,
+ int pid,
+ int procState,
+ int oomScore) {
+ String8 log = String8::format("overrideProcessInfo(pid %d, procState %d, oomScore %d)",
+ pid, procState, oomScore);
+ mServiceLog->add(log);
+
+ // Only allow the override if the caller already can access process state and oom scores.
+ int callingPid = AIBinder_getCallingPid();
+ if (callingPid != getpid() && (callingPid != pid || !checkCallingPermission(String16(
+ "android.permission.GET_PROCESS_STATE_AND_OOM_SCORE")))) {
+ ALOGE("Permission Denial: overrideProcessInfo method from pid=%d", callingPid);
+ return Status::fromServiceSpecificError(PERMISSION_DENIED);
+ }
+
+ if (client == nullptr) {
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ Mutex::Autolock lock(mLock);
+ removeProcessInfoOverride_l(pid);
+
+ if (!mProcessInfo->overrideProcessInfo(pid, procState, oomScore)) {
+ // Override value is rejected by ProcessInfo.
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ uintptr_t cookie = addCookieAndLink_l(client->asBinder(),
+ new OverrideProcessInfoDeathNotifier(ref<ResourceManagerService>(), pid));
+
+ mProcessInfoOverrideMap.emplace(pid, ProcessInfoOverride{cookie, client});
+
+ return Status::ok();
+}
+
+uintptr_t ResourceManagerService::addCookieAndLink_l(
+ ::ndk::SpAIBinder binder, const sp<DeathNotifier>& notifier) {
+ std::scoped_lock lock{sCookieLock};
+
+ uintptr_t cookie;
+ // Need to skip cookie 0 (if it wraps around). ResourceInfo has cookie initialized to 0
+ // indicating the death notifier is not created yet.
+ while ((cookie = ++sCookieCounter) == 0);
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(), (void*)cookie);
+ sCookieToDeathNotifierMap.emplace(cookie, notifier);
+
+ return cookie;
+}
+
+void ResourceManagerService::removeCookieAndUnlink_l(
+ ::ndk::SpAIBinder binder, uintptr_t cookie) {
+ std::scoped_lock lock{sCookieLock};
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(), (void*)cookie);
+ sCookieToDeathNotifierMap.erase(cookie);
+}
+
+void ResourceManagerService::removeProcessInfoOverride(int pid) {
+ Mutex::Autolock lock(mLock);
+
+ removeProcessInfoOverride_l(pid);
+}
+
+void ResourceManagerService::removeProcessInfoOverride_l(int pid) {
+ auto it = mProcessInfoOverrideMap.find(pid);
+ if (it == mProcessInfoOverrideMap.end()) {
+ return;
+ }
+
+ mProcessInfo->removeProcessInfoOverride(pid);
+
+ removeCookieAndUnlink_l(it->second.client->asBinder(), it->second.cookie);
+
+ mProcessInfoOverrideMap.erase(pid);
+}
+
Status ResourceManagerService::markClientForPendingRemoval(int32_t pid, int64_t clientId) {
String8 log = String8::format(
"markClientForPendingRemoval(pid %d, clientId %lld)",
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 7f18ed3..9c2636e 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -19,6 +19,7 @@
#define ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
#include <map>
+#include <mutex>
#include <aidl/android/media/BnResourceManagerService.h>
#include <arpa/inet.h>
@@ -33,6 +34,7 @@
class DeathNotifier;
class ResourceManagerService;
+class ResourceObserverService;
class ServiceLog;
struct ProcessInfoInterface;
@@ -50,7 +52,7 @@
int64_t clientId;
uid_t uid;
std::shared_ptr<IResourceManagerClient> client;
- sp<DeathNotifier> deathNotifier;
+ uintptr_t cookie{0};
ResourceList resources;
bool pendingRemoval{false};
};
@@ -59,22 +61,6 @@
typedef KeyedVector<int64_t, ResourceInfo> ResourceInfos;
typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
-class DeathNotifier : public RefBase {
-public:
- DeathNotifier(const std::shared_ptr<ResourceManagerService> &service,
- int pid, int64_t clientId);
-
- ~DeathNotifier() {}
-
- // Implement death recipient
- static void BinderDiedCallback(void* cookie);
- void binderDied();
-
-private:
- std::weak_ptr<ResourceManagerService> mService;
- int mPid;
- int64_t mClientId;
-};
class ResourceManagerService : public BnResourceManagerService {
public:
struct SystemCallbackInterface : public RefBase {
@@ -95,6 +81,8 @@
const sp<ProcessInfoInterface> &processInfo,
const sp<SystemCallbackInterface> &systemResource);
virtual ~ResourceManagerService();
+ void setObserverService(
+ const std::shared_ptr<ResourceObserverService>& observerService);
// IResourceManagerService interface
Status config(const std::vector<MediaResourcePolicyParcel>& policies) override;
@@ -125,6 +113,12 @@
int originalPid,
int newPid) override;
+ Status overrideProcessInfo(
+ const std::shared_ptr<IResourceManagerClient>& client,
+ int pid,
+ int procState,
+ int oomScore) override;
+
Status markClientForPendingRemoval(int32_t pid, int64_t clientId) override;
Status reclaimResourcesFromClientsPendingRemoval(int32_t pid) override;
@@ -133,6 +127,8 @@
private:
friend class ResourceManagerServiceTest;
+ friend class DeathNotifier;
+ friend class OverrideProcessInfoDeathNotifier;
// Reclaims resources from |clients|. Returns true if reclaim succeeded
// for all clients.
@@ -177,6 +173,12 @@
// Get priority from process's pid
bool getPriority_l(int pid, int* priority);
+ void removeProcessInfoOverride(int pid);
+
+ void removeProcessInfoOverride_l(int pid);
+ uintptr_t addCookieAndLink_l(::ndk::SpAIBinder binder, const sp<DeathNotifier>& notifier);
+ void removeCookieAndUnlink_l(::ndk::SpAIBinder binder, uintptr_t cookie);
+
mutable Mutex mLock;
sp<ProcessInfoInterface> mProcessInfo;
sp<SystemCallbackInterface> mSystemCB;
@@ -186,7 +188,17 @@
bool mSupportsSecureWithNonSecureCodec;
int32_t mCpuBoostCount;
::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+ struct ProcessInfoOverride {
+ uintptr_t cookie;
+ std::shared_ptr<IResourceManagerClient> client;
+ };
std::map<int, int> mOverridePidMap;
+ std::map<pid_t, ProcessInfoOverride> mProcessInfoOverrideMap;
+ static std::mutex sCookieLock;
+ static uintptr_t sCookieCounter GUARDED_BY(sCookieLock);
+ static std::map<uintptr_t, sp<DeathNotifier> > sCookieToDeathNotifierMap
+ GUARDED_BY(sCookieLock);
+ std::shared_ptr<ResourceObserverService> mObserverService;
};
// ----------------------------------------------------------------------------
diff --git a/services/mediaresourcemanager/ResourceObserverService.cpp b/services/mediaresourcemanager/ResourceObserverService.cpp
new file mode 100644
index 0000000..44fe72d
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceObserverService.cpp
@@ -0,0 +1,328 @@
+/**
+ *
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceObserverService"
+#include <utils/Log.h>
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/IServiceManager.h>
+#include <utils/String16.h>
+#include <aidl/android/media/MediaResourceParcel.h>
+
+#include "ResourceObserverService.h"
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator<(const MediaObservableFilter& lhs, const MediaObservableFilter &rhs) {
+ return lhs.type < rhs.type || (lhs.type == rhs.type && lhs.eventFilter < rhs.eventFilter);
+}
+}}} // namespace ::aidl::android::media
+
+namespace android {
+
+using ::aidl::android::media::MediaResourceParcel;
+using ::aidl::android::media::MediaObservableEvent;
+
+// MediaObservableEvent will be used as uint64_t flags.
+static_assert(sizeof(MediaObservableEvent) == sizeof(uint64_t));
+
+static std::vector<MediaObservableEvent> sEvents = {
+ MediaObservableEvent::kBusy,
+ MediaObservableEvent::kIdle,
+};
+
+static MediaObservableType getObservableType(const MediaResourceParcel& res) {
+ if (res.subType == MediaResourceSubType::kVideoCodec) {
+ if (res.type == MediaResourceType::kNonSecureCodec) {
+ return MediaObservableType::kVideoNonSecureCodec;
+ }
+ if (res.type == MediaResourceType::kSecureCodec) {
+ return MediaObservableType::kVideoSecureCodec;
+ }
+ }
+ return MediaObservableType::kInvalid;
+}
+
+//static
+std::mutex ResourceObserverService::sDeathRecipientLock;
+//static
+std::map<uintptr_t, std::shared_ptr<ResourceObserverService::DeathRecipient> >
+ResourceObserverService::sDeathRecipientMap;
+
+struct ResourceObserverService::DeathRecipient {
+ DeathRecipient(ResourceObserverService* _service,
+ const std::shared_ptr<IResourceObserver>& _observer)
+ : service(_service), observer(_observer) {}
+ ~DeathRecipient() {}
+
+ void binderDied() {
+ if (service != nullptr) {
+ service->unregisterObserver(observer);
+ }
+ }
+
+ ResourceObserverService* service;
+ std::shared_ptr<IResourceObserver> observer;
+};
+
+// static
+void ResourceObserverService::BinderDiedCallback(void* cookie) {
+ uintptr_t id = reinterpret_cast<uintptr_t>(cookie);
+
+ ALOGW("Observer %lld is dead", (long long)id);
+
+ std::shared_ptr<DeathRecipient> recipient;
+
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+
+ auto it = sDeathRecipientMap.find(id);
+ if (it != sDeathRecipientMap.end()) {
+ recipient = it->second;
+ }
+ }
+
+ if (recipient != nullptr) {
+ recipient->binderDied();
+ }
+}
+
+//static
+std::shared_ptr<ResourceObserverService> ResourceObserverService::instantiate() {
+ std::shared_ptr<ResourceObserverService> observerService =
+ ::ndk::SharedRefBase::make<ResourceObserverService>();
+ binder_status_t status = AServiceManager_addService(observerService->asBinder().get(),
+ ResourceObserverService::getServiceName());
+ if (status != STATUS_OK) {
+ return nullptr;
+ }
+ return observerService;
+}
+
+ResourceObserverService::ResourceObserverService()
+ : mDeathRecipient(AIBinder_DeathRecipient_new(BinderDiedCallback)) {}
+
+binder_status_t ResourceObserverService::dump(
+ int fd, const char** /*args*/, uint32_t /*numArgs*/) {
+ String8 result;
+
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ result.format("Permission Denial: "
+ "can't dump ResourceManagerService from pid=%d, uid=%d\n",
+ AIBinder_getCallingPid(),
+ AIBinder_getCallingUid());
+ write(fd, result.string(), result.size());
+ return PERMISSION_DENIED;
+ }
+
+ result.appendFormat("ResourceObserverService: %p\n", this);
+ result.appendFormat(" Registered Observers: %zu\n", mObserverInfoMap.size());
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ for (auto &observer : mObserverInfoMap) {
+ result.appendFormat(" Observer %p:\n", observer.second.binder.get());
+ for (auto &observable : observer.second.filters) {
+ String8 enabledEventsStr;
+ for (auto &event : sEvents) {
+ if (((uint64_t)observable.eventFilter & (uint64_t)event) != 0) {
+ if (!enabledEventsStr.isEmpty()) {
+ enabledEventsStr.append("|");
+ }
+ enabledEventsStr.append(toString(event).c_str());
+ }
+ }
+ result.appendFormat(" %s: %s\n",
+ toString(observable.type).c_str(), enabledEventsStr.c_str());
+ }
+ }
+ }
+
+ write(fd, result.string(), result.size());
+ return OK;
+}
+
+Status ResourceObserverService::registerObserver(
+ const std::shared_ptr<IResourceObserver>& in_observer,
+ const std::vector<MediaObservableFilter>& in_filters) {
+ if ((getpid() != AIBinder_getCallingPid()) &&
+ checkCallingPermission(
+ String16("android.permission.REGISTER_MEDIA_RESOURCE_OBSERVER")) == false) {
+ ALOGE("Permission Denial: "
+ "can't registerObserver from pid=%d, uid=%d\n",
+ AIBinder_getCallingPid(),
+ AIBinder_getCallingUid());
+ return Status::fromServiceSpecificError(PERMISSION_DENIED);
+ }
+
+ ::ndk::SpAIBinder binder = in_observer->asBinder();
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ if (mObserverInfoMap.find((uintptr_t)binder.get()) != mObserverInfoMap.end()) {
+ return Status::fromServiceSpecificError(ALREADY_EXISTS);
+ }
+
+ if (in_filters.empty()) {
+ return Status::fromServiceSpecificError(BAD_VALUE);
+ }
+
+ // Add observer info.
+ mObserverInfoMap.emplace((uintptr_t)binder.get(),
+ ObserverInfo{binder, in_observer, in_filters});
+
+ // Add observer to observable->subscribers map.
+ for (auto &filter : in_filters) {
+ for (auto &event : sEvents) {
+ if (!((uint64_t)filter.eventFilter & (uint64_t)event)) {
+ continue;
+ }
+ MediaObservableFilter key{filter.type, event};
+ mObservableToSubscribersMap[key].emplace((uintptr_t)binder.get(), in_observer);
+ }
+ }
+ }
+
+ // Add death binder and link.
+ uintptr_t cookie = (uintptr_t)binder.get();
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+ sDeathRecipientMap.emplace(
+ cookie, std::make_shared<DeathRecipient>(this, in_observer));
+ }
+
+ AIBinder_linkToDeath(binder.get(), mDeathRecipient.get(),
+ reinterpret_cast<void*>(cookie));
+
+ return Status::ok();
+}
+
+Status ResourceObserverService::unregisterObserver(
+ const std::shared_ptr<IResourceObserver>& in_observer) {
+ if ((getpid() != AIBinder_getCallingPid()) &&
+ checkCallingPermission(
+ String16("android.permission.REGISTER_MEDIA_RESOURCE_OBSERVER")) == false) {
+ ALOGE("Permission Denial: "
+ "can't unregisterObserver from pid=%d, uid=%d\n",
+ AIBinder_getCallingPid(),
+ AIBinder_getCallingUid());
+ return Status::fromServiceSpecificError(PERMISSION_DENIED);
+ }
+
+ ::ndk::SpAIBinder binder = in_observer->asBinder();
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ auto it = mObserverInfoMap.find((uintptr_t)binder.get());
+ if (it == mObserverInfoMap.end()) {
+ return Status::fromServiceSpecificError(NAME_NOT_FOUND);
+ }
+
+ // Remove observer from observable->subscribers map.
+ for (auto &filter : it->second.filters) {
+ for (auto &event : sEvents) {
+ if (!((uint64_t)filter.eventFilter & (uint64_t)event)) {
+ continue;
+ }
+ MediaObservableFilter key{filter.type, event};
+ mObservableToSubscribersMap[key].erase((uintptr_t)binder.get());
+
+ //Remove the entry if there's no more subscribers.
+ if (mObservableToSubscribersMap[key].empty()) {
+ mObservableToSubscribersMap.erase(key);
+ }
+ }
+ }
+
+ // Remove observer info.
+ mObserverInfoMap.erase(it);
+ }
+
+ // Unlink and remove death binder.
+ uintptr_t cookie = (uintptr_t)binder.get();
+ AIBinder_unlinkToDeath(binder.get(), mDeathRecipient.get(),
+ reinterpret_cast<void*>(cookie));
+
+ {
+ std::scoped_lock lock{sDeathRecipientLock};
+ sDeathRecipientMap.erase(cookie);
+ }
+
+ return Status::ok();
+}
+
+void ResourceObserverService::notifyObservers(
+ MediaObservableEvent event, int uid, int pid, const ResourceList &resources) {
+ struct CalleeInfo {
+ std::shared_ptr<IResourceObserver> observer;
+ std::vector<MediaObservableParcel> monitors;
+ };
+ // Build a consolidated list of observers to call with their respective observables.
+ std::map<uintptr_t, CalleeInfo> calleeList;
+
+ {
+ std::scoped_lock lock{mObserverLock};
+
+ for (auto &res : resources) {
+ // Skip if this resource doesn't map to any observable type.
+ MediaObservableType observableType = getObservableType(res.second);
+ if (observableType == MediaObservableType::kInvalid) {
+ continue;
+ }
+ MediaObservableFilter key{observableType, event};
+ // Skip if no one subscribed to this observable.
+ auto observableIt = mObservableToSubscribersMap.find(key);
+ if (observableIt == mObservableToSubscribersMap.end()) {
+ continue;
+ }
+ // Loop through all subsribers.
+ for (auto &subscriber : observableIt->second) {
+ auto calleeIt = calleeList.find(subscriber.first);
+ if (calleeIt == calleeList.end()) {
+ calleeList.emplace(subscriber.first, CalleeInfo{
+ subscriber.second, {{observableType, res.second.value}}});
+ } else {
+ calleeIt->second.monitors.push_back({observableType, res.second.value});
+ }
+ }
+ }
+ }
+
+ // Finally call the observers about the status change.
+ for (auto &calleeInfo : calleeList) {
+ calleeInfo.second.observer->onStatusChanged(
+ event, uid, pid, calleeInfo.second.monitors);
+ }
+}
+
+void ResourceObserverService::onResourceAdded(
+ int uid, int pid, const ResourceList &resources) {
+ notifyObservers(MediaObservableEvent::kBusy, uid, pid, resources);
+}
+
+void ResourceObserverService::onResourceRemoved(
+ int uid, int pid, const ResourceList &resources) {
+ notifyObservers(MediaObservableEvent::kIdle, uid, pid, resources);
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/ResourceObserverService.h b/services/mediaresourcemanager/ResourceObserverService.h
new file mode 100644
index 0000000..46bc5fb
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceObserverService.h
@@ -0,0 +1,95 @@
+/**
+ *
+ * Copyright 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
+#define ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
+
+#include <map>
+
+#include <aidl/android/media/BnResourceObserverService.h>
+#include "ResourceManagerService.h"
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceObserverService;
+using ::aidl::android::media::IResourceObserver;
+using ::aidl::android::media::MediaObservableFilter;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+using ::aidl::android::media::MediaObservableEvent;
+
+class ResourceObserverService : public BnResourceObserverService {
+public:
+
+ static char const *getServiceName() { return "media.resource_observer"; }
+ static std::shared_ptr<ResourceObserverService> instantiate();
+
+ virtual inline binder_status_t dump(
+ int /*fd*/, const char** /*args*/, uint32_t /*numArgs*/);
+
+ ResourceObserverService();
+ virtual ~ResourceObserverService() {}
+
+ // IResourceObserverService interface
+ Status registerObserver(const std::shared_ptr<IResourceObserver>& in_observer,
+ const std::vector<MediaObservableFilter>& in_filters) override;
+
+ Status unregisterObserver(const std::shared_ptr<IResourceObserver>& in_observer) override;
+ // ~IResourceObserverService interface
+
+ // Called by ResourceManagerService when resources are added.
+ void onResourceAdded(int uid, int pid, const ResourceList &resources);
+
+ // Called by ResourceManagerService when resources are removed.
+ void onResourceRemoved(int uid, int pid, const ResourceList &resources);
+
+private:
+ struct ObserverInfo {
+ ::ndk::SpAIBinder binder;
+ std::shared_ptr<IResourceObserver> observer;
+ std::vector<MediaObservableFilter> filters;
+ };
+ struct DeathRecipient;
+
+ // Below maps are all keyed on the observer's binder ptr value.
+ using ObserverInfoMap = std::map<uintptr_t, ObserverInfo>;
+ using SubscriberMap = std::map<uintptr_t, std::shared_ptr<IResourceObserver>>;
+
+ std::mutex mObserverLock;
+ // Binder->ObserverInfo
+ ObserverInfoMap mObserverInfoMap GUARDED_BY(mObserverLock);
+ // Observable(<type,event>)->Subscribers
+ std::map<MediaObservableFilter, SubscriberMap> mObservableToSubscribersMap
+ GUARDED_BY(mObserverLock);
+
+ ::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
+
+ // Binder death handling.
+ static std::mutex sDeathRecipientLock;
+ static std::map<uintptr_t, std::shared_ptr<DeathRecipient>> sDeathRecipientMap
+ GUARDED_BY(sDeathRecipientLock);
+ static void BinderDiedCallback(void* cookie);
+
+ void notifyObservers(MediaObservableEvent event,
+ int uid, int pid, const ResourceList &resources);
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_MEDIA_RESOURCE_OBSERVER_SERVICE_H
diff --git a/services/mediaresourcemanager/TEST_MAPPING b/services/mediaresourcemanager/TEST_MAPPING
index 418b159..52ad441 100644
--- a/services/mediaresourcemanager/TEST_MAPPING
+++ b/services/mediaresourcemanager/TEST_MAPPING
@@ -5,6 +5,9 @@
},
{
"name": "ServiceLog_test"
+ },
+ {
+ "name": "ResourceObserverService_test"
}
]
}
diff --git a/media/libmedia/aidl/android/media/IResourceManagerClient.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceManagerClient.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/IResourceManagerClient.aidl
rename to services/mediaresourcemanager/aidl/android/media/IResourceManagerClient.aidl
diff --git a/media/libmedia/aidl/android/media/IResourceManagerService.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
similarity index 79%
rename from media/libmedia/aidl/android/media/IResourceManagerService.aidl
rename to services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
index 621bd84..7a0a50f 100644
--- a/media/libmedia/aidl/android/media/IResourceManagerService.aidl
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
@@ -96,6 +96,28 @@
void overridePid(int originalPid, int newPid);
/**
+ * Override the process state and OOM score of the calling process with the
+ * the specified values. This is used by native service processes to specify
+ * these values for ResourceManagerService to use. ResourceManagerService usually
+ * gets these values from ActivityManagerService, however, ActivityManagerService
+ * doesn't track native service processes.
+ *
+ * @param client a token for the ResourceManagerService to link to the caller and
+ * receive notification if it goes away. This is needed for clearing
+ * the overrides.
+ * @param pid pid of the calling process.
+ * @param procState the process state value that ResourceManagerService should
+ * use for this pid.
+ * @param oomScore the oom score value that ResourceManagerService should
+ * use for this pid.
+ */
+ void overrideProcessInfo(
+ IResourceManagerClient client,
+ int pid,
+ int procState,
+ int oomScore);
+
+ /**
* Mark a client for pending removal
*
* @param pid pid from which the client's resources will be removed.
diff --git a/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl
new file mode 100644
index 0000000..462009a
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceObserver.aidl
@@ -0,0 +1,39 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableEvent;
+import android.media.MediaObservableParcel;
+
+/**
+ * IResourceObserver interface for receiving observable resource updates
+ * from IResourceObserverService.
+ *
+ * {@hide}
+ */
+interface IResourceObserver {
+ /**
+ * Called when an observed resource is granted to a client.
+ *
+ * @param event the status change that happened to the resource.
+ * @param uid uid to which the resource is associated.
+ * @param pid pid to which the resource is associated.
+ * @param observables the resources whose status has changed.
+ */
+ oneway void onStatusChanged(MediaObservableEvent event,
+ int uid, int pid, in MediaObservableParcel[] observables);
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl
new file mode 100644
index 0000000..08f4ca0
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceObserverService.aidl
@@ -0,0 +1,49 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.IResourceObserver;
+import android.media.MediaObservableFilter;
+
+/**
+ * IResourceObserverService interface for registering an IResourceObserver
+ * callback to receive status updates about observable media resources.
+ *
+ * {@hide}
+ */
+interface IResourceObserverService {
+
+ /**
+ * Register an observer on the IResourceObserverService to receive
+ * status updates for observable resources.
+ *
+ * @param observer the observer to register.
+ * @param filters an array of filters for resources and events to receive
+ * updates for.
+ */
+ void registerObserver(
+ IResourceObserver observer,
+ in MediaObservableFilter[] filters);
+
+ /**
+ * Unregister an observer from the IResourceObserverService.
+ * The observer will stop receiving the status updates.
+ *
+ * @param observer the observer to unregister.
+ */
+ void unregisterObserver(IResourceObserver observer);
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl
new file mode 100644
index 0000000..56ab24d
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableEvent.aidl
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Enums for media observable events.
+ *
+ * These values are used as bitmasks to indicate the events that the
+ * observer is interested in in the MediaObservableFilter objects passed to
+ * IResourceObserverService::registerObserver().
+ *
+ * {@hide}
+ */
+@Backing(type="long")
+enum MediaObservableEvent {
+ /**
+ * A media resource is granted to a client and becomes busy.
+ */
+ kBusy = 1,
+
+ /**
+ * A media resource is released by a client and becomes idle.
+ */
+ kIdle = 2,
+
+ /**
+ * A bitmask that covers all observable events defined.
+ */
+ kAll = ~0,
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl
new file mode 100644
index 0000000..38f7e39
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableFilter.aidl
@@ -0,0 +1,43 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableType;
+import android.media.MediaObservableEvent;
+
+/**
+ * Description of an observable resource and its associated events that the
+ * observer is interested in.
+ *
+ * {@hide}
+ */
+parcelable MediaObservableFilter {
+ /**
+ * Type of the observable media resource.
+ */
+ MediaObservableType type;
+
+ /**
+ * Events that the observer is interested in.
+ *
+ * This field is a bitwise-OR of the events in MediaObservableEvent. If a
+ * particular event's bit is set, it means that updates should be sent for
+ * that event. For example, if the observer is only interested in receiving
+ * updates when a resource becomes available, it should only set 'kIdle'.
+ */
+ MediaObservableEvent eventFilter;
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl
new file mode 100644
index 0000000..c4233e1
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableParcel.aidl
@@ -0,0 +1,37 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaObservableType;
+
+/**
+ * Description of an observable resource whose status has changed.
+ *
+ * {@hide}
+ */
+parcelable MediaObservableParcel {
+ /**
+ * Type of the observable media resource.
+ */
+ MediaObservableType type;// = MediaObservableType::kInvalid;
+
+ /**
+ * Number of units of the observable resource (number of codecs, bytes of
+ * graphic memory, etc.).
+ */
+ long value = 0;
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl
new file mode 100644
index 0000000..ed202da
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/MediaObservableType.aidl
@@ -0,0 +1,35 @@
+/**
+ * Copyright (c) 2020, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Type enums of observable media resources.
+ *
+ * {@hide}
+ */
+@Backing(type="int")
+enum MediaObservableType {
+ kInvalid = 0,
+
+ //kVideoStart = 1000,
+ kVideoSecureCodec = 1000,
+ kVideoNonSecureCodec = 1001,
+
+ //kAudioStart = 2000,
+
+ //kGraphicMemory = 3000,
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourceParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceParcel.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceParcel.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceParcel.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourcePolicyParcel.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourcePolicyParcel.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourceSubType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceSubType.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceSubType.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceSubType.aidl
diff --git a/media/libmedia/aidl/android/media/MediaResourceType.aidl b/services/mediaresourcemanager/aidl/android/media/MediaResourceType.aidl
similarity index 100%
rename from media/libmedia/aidl/android/media/MediaResourceType.aidl
rename to services/mediaresourcemanager/aidl/android/media/MediaResourceType.aidl
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index 6b2ef69..308ee91 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -40,3 +40,28 @@
"-Wall",
],
}
+
+cc_test {
+ name: "ResourceObserverService_test",
+ srcs: ["ResourceObserverService_test.cpp"],
+ test_suites: ["device-tests"],
+ static_libs: [
+ "libresourcemanagerservice",
+ "resourceobserver_aidl_interface-ndk_platform",
+ ],
+ shared_libs: [
+ "libbinder",
+ "libbinder_ndk",
+ "liblog",
+ "libmedia",
+ "libutils",
+ ],
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
new file mode 100644
index 0000000..4cf5f0a
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ResourceManagerService.h"
+#include <aidl/android/media/BnResourceManagerClient.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator== (const MediaResourceParcel& lhs, const MediaResourceParcel& rhs) {
+ return lhs.type == rhs.type && lhs.subType == rhs.subType &&
+ lhs.id == rhs.id && lhs.value == rhs.value;
+}
+}}}
+
+namespace android {
+
+using Status = ::ndk::ScopedAStatus;
+using ::aidl::android::media::BnResourceManagerClient;
+using ::aidl::android::media::IResourceManagerService;
+using ::aidl::android::media::IResourceManagerClient;
+using ::aidl::android::media::MediaResourceParcel;
+
+static int64_t getId(const std::shared_ptr<IResourceManagerClient>& client) {
+ return (int64_t) client.get();
+}
+
+struct TestProcessInfo : public ProcessInfoInterface {
+ TestProcessInfo() {}
+ virtual ~TestProcessInfo() {}
+
+ virtual bool getPriority(int pid, int *priority) {
+ // For testing, use pid as priority.
+ // Lower the value higher the priority.
+ *priority = pid;
+ return true;
+ }
+
+ virtual bool isValidPid(int /* pid */) {
+ return true;
+ }
+
+ virtual bool overrideProcessInfo(
+ int /* pid */, int /* procState */, int /* oomScore */) {
+ return true;
+ }
+
+ virtual void removeProcessInfoOverride(int /* pid */) {
+ }
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
+};
+
+struct TestSystemCallback :
+ public ResourceManagerService::SystemCallbackInterface {
+ TestSystemCallback() :
+ mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
+
+ enum EventType {
+ INVALID = -1,
+ VIDEO_ON = 0,
+ VIDEO_OFF = 1,
+ VIDEO_RESET = 2,
+ CPUSET_ENABLE = 3,
+ CPUSET_DISABLE = 4,
+ };
+
+ struct EventEntry {
+ EventType type;
+ int arg;
+ };
+
+ virtual void noteStartVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_ON, uid};
+ mEventCount++;
+ }
+
+ virtual void noteStopVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_OFF, uid};
+ mEventCount++;
+ }
+
+ virtual void noteResetVideo() override {
+ mLastEvent = {EventType::VIDEO_RESET, 0};
+ mEventCount++;
+ }
+
+ virtual bool requestCpusetBoost(bool enable) override {
+ mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
+ mEventCount++;
+ return true;
+ }
+
+ size_t eventCount() { return mEventCount; }
+ EventType lastEventType() { return mLastEvent.type; }
+ EventEntry lastEvent() { return mLastEvent; }
+
+protected:
+ virtual ~TestSystemCallback() {}
+
+private:
+ EventEntry mLastEvent;
+ size_t mEventCount;
+
+ DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
+};
+
+
+struct TestClient : public BnResourceManagerClient {
+ TestClient(int pid, const std::shared_ptr<ResourceManagerService> &service)
+ : mReclaimed(false), mPid(pid), mService(service) {}
+
+ Status reclaimResource(bool* _aidl_return) override {
+ mService->removeClient(mPid, getId(ref<TestClient>()));
+ mReclaimed = true;
+ *_aidl_return = true;
+ return Status::ok();
+ }
+
+ Status getName(::std::string* _aidl_return) override {
+ *_aidl_return = "test_client";
+ return Status::ok();
+ }
+
+ bool reclaimed() const {
+ return mReclaimed;
+ }
+
+ void reset() {
+ mReclaimed = false;
+ }
+
+ virtual ~TestClient() {}
+
+private:
+ bool mReclaimed;
+ int mPid;
+ std::shared_ptr<ResourceManagerService> mService;
+ DISALLOW_EVIL_CONSTRUCTORS(TestClient);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestUid1 = 1010;
+
+static const int kTestPid2 = 20;
+static const int kTestUid2 = 1011;
+
+static const int kLowPriorityPid = 40;
+static const int kMidPriorityPid = 25;
+static const int kHighPriorityPid = 10;
+
+using EventType = TestSystemCallback::EventType;
+using EventEntry = TestSystemCallback::EventEntry;
+bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
+ return lhs.type == rhs.type && lhs.arg == rhs.arg;
+}
+
+#define CHECK_STATUS_TRUE(condition) \
+ EXPECT_TRUE((condition).isOk() && (result))
+
+#define CHECK_STATUS_FALSE(condition) \
+ EXPECT_TRUE((condition).isOk() && !(result))
+
+class ResourceManagerServiceTestBase : public ::testing::Test {
+public:
+ ResourceManagerServiceTestBase()
+ : mSystemCB(new TestSystemCallback()),
+ mService(::ndk::SharedRefBase::make<ResourceManagerService>(
+ new TestProcessInfo, mSystemCB)),
+ mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, mService)),
+ mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)),
+ mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)) {
+ }
+
+ sp<TestSystemCallback> mSystemCB;
+ std::shared_ptr<ResourceManagerService> mService;
+ std::shared_ptr<IResourceManagerClient> mTestClient1;
+ std::shared_ptr<IResourceManagerClient> mTestClient2;
+ std::shared_ptr<IResourceManagerClient> mTestClient3;
+
+protected:
+ static bool isEqualResources(const std::vector<MediaResourceParcel> &resources1,
+ const ResourceList &resources2) {
+ // convert resource1 to ResourceList
+ ResourceList r1;
+ for (size_t i = 0; i < resources1.size(); ++i) {
+ const auto &res = resources1[i];
+ const auto resType = std::tuple(res.type, res.subType, res.id);
+ r1[resType] = res;
+ }
+ return r1 == resources2;
+ }
+
+ static void expectEqResourceInfo(const ResourceInfo &info,
+ int uid,
+ std::shared_ptr<IResourceManagerClient> client,
+ const std::vector<MediaResourceParcel> &resources) {
+ EXPECT_EQ(uid, info.uid);
+ EXPECT_EQ(client, info.client);
+ EXPECT_TRUE(isEqualResources(resources, info.resources));
+ }
+};
+
+} // namespace android
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index a6ecc09..a029d45 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -16,197 +16,17 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ResourceManagerService_test"
+
#include <utils/Log.h>
-#include <gtest/gtest.h>
-
+#include "ResourceManagerServiceTestUtils.h"
#include "ResourceManagerService.h"
-#include <aidl/android/media/BnResourceManagerClient.h>
-#include <media/MediaResource.h>
-#include <media/MediaResourcePolicy.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/ProcessInfoInterface.h>
-
-namespace aidl {
-namespace android {
-namespace media {
-bool operator== (const MediaResourceParcel& lhs, const MediaResourceParcel& rhs) {
- return lhs.type == rhs.type && lhs.subType == rhs.subType &&
- lhs.id == rhs.id && lhs.value == rhs.value;
-}}}}
namespace android {
-using Status = ::ndk::ScopedAStatus;
-using ::aidl::android::media::BnResourceManagerClient;
-using ::aidl::android::media::IResourceManagerService;
-using ::aidl::android::media::IResourceManagerClient;
-
-static int64_t getId(const std::shared_ptr<IResourceManagerClient>& client) {
- return (int64_t) client.get();
-}
-
-struct TestProcessInfo : public ProcessInfoInterface {
- TestProcessInfo() {}
- virtual ~TestProcessInfo() {}
-
- virtual bool getPriority(int pid, int *priority) {
- // For testing, use pid as priority.
- // Lower the value higher the priority.
- *priority = pid;
- return true;
- }
-
- virtual bool isValidPid(int /* pid */) {
- return true;
- }
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
-};
-
-struct TestSystemCallback :
- public ResourceManagerService::SystemCallbackInterface {
- TestSystemCallback() :
- mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
-
- enum EventType {
- INVALID = -1,
- VIDEO_ON = 0,
- VIDEO_OFF = 1,
- VIDEO_RESET = 2,
- CPUSET_ENABLE = 3,
- CPUSET_DISABLE = 4,
- };
-
- struct EventEntry {
- EventType type;
- int arg;
- };
-
- virtual void noteStartVideo(int uid) override {
- mLastEvent = {EventType::VIDEO_ON, uid};
- mEventCount++;
- }
-
- virtual void noteStopVideo(int uid) override {
- mLastEvent = {EventType::VIDEO_OFF, uid};
- mEventCount++;
- }
-
- virtual void noteResetVideo() override {
- mLastEvent = {EventType::VIDEO_RESET, 0};
- mEventCount++;
- }
-
- virtual bool requestCpusetBoost(bool enable) override {
- mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
- mEventCount++;
- return true;
- }
-
- size_t eventCount() { return mEventCount; }
- EventType lastEventType() { return mLastEvent.type; }
- EventEntry lastEvent() { return mLastEvent; }
-
-protected:
- virtual ~TestSystemCallback() {}
-
-private:
- EventEntry mLastEvent;
- size_t mEventCount;
-
- DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
-};
-
-
-struct TestClient : public BnResourceManagerClient {
- TestClient(int pid, const std::shared_ptr<ResourceManagerService> &service)
- : mReclaimed(false), mPid(pid), mService(service) {}
-
- Status reclaimResource(bool* _aidl_return) override {
- mService->removeClient(mPid, getId(ref<TestClient>()));
- mReclaimed = true;
- *_aidl_return = true;
- return Status::ok();
- }
-
- Status getName(::std::string* _aidl_return) override {
- *_aidl_return = "test_client";
- return Status::ok();
- }
-
- bool reclaimed() const {
- return mReclaimed;
- }
-
- void reset() {
- mReclaimed = false;
- }
-
- virtual ~TestClient() {}
-
-private:
- bool mReclaimed;
- int mPid;
- std::shared_ptr<ResourceManagerService> mService;
- DISALLOW_EVIL_CONSTRUCTORS(TestClient);
-};
-
-static const int kTestPid1 = 30;
-static const int kTestUid1 = 1010;
-
-static const int kTestPid2 = 20;
-static const int kTestUid2 = 1011;
-
-static const int kLowPriorityPid = 40;
-static const int kMidPriorityPid = 25;
-static const int kHighPriorityPid = 10;
-
-using EventType = TestSystemCallback::EventType;
-using EventEntry = TestSystemCallback::EventEntry;
-bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
- return lhs.type == rhs.type && lhs.arg == rhs.arg;
-}
-
-#define CHECK_STATUS_TRUE(condition) \
- EXPECT_TRUE((condition).isOk() && (result))
-
-#define CHECK_STATUS_FALSE(condition) \
- EXPECT_TRUE((condition).isOk() && !(result))
-
-class ResourceManagerServiceTest : public ::testing::Test {
+class ResourceManagerServiceTest : public ResourceManagerServiceTestBase {
public:
- ResourceManagerServiceTest()
- : mSystemCB(new TestSystemCallback()),
- mService(::ndk::SharedRefBase::make<ResourceManagerService>(
- new TestProcessInfo, mSystemCB)),
- mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, mService)),
- mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)),
- mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)) {
- }
-
-protected:
- static bool isEqualResources(const std::vector<MediaResourceParcel> &resources1,
- const ResourceList &resources2) {
- // convert resource1 to ResourceList
- ResourceList r1;
- for (size_t i = 0; i < resources1.size(); ++i) {
- const auto &res = resources1[i];
- const auto resType = std::tuple(res.type, res.subType, res.id);
- r1[resType] = res;
- }
- return r1 == resources2;
- }
-
- static void expectEqResourceInfo(const ResourceInfo &info,
- int uid,
- std::shared_ptr<IResourceManagerClient> client,
- const std::vector<MediaResourceParcel> &resources) {
- EXPECT_EQ(uid, info.uid);
- EXPECT_EQ(client, info.client);
- EXPECT_TRUE(isEqualResources(resources, info.resources));
- }
+ ResourceManagerServiceTest() : ResourceManagerServiceTestBase() {}
void verifyClients(bool c1, bool c2, bool c3) {
TestClient *client1 = static_cast<TestClient*>(mTestClient1.get());
@@ -905,12 +725,6 @@
EXPECT_EQ(4u, mSystemCB->eventCount());
EXPECT_EQ(EventType::CPUSET_DISABLE, mSystemCB->lastEventType());
}
-
- sp<TestSystemCallback> mSystemCB;
- std::shared_ptr<ResourceManagerService> mService;
- std::shared_ptr<IResourceManagerClient> mTestClient1;
- std::shared_ptr<IResourceManagerClient> mTestClient2;
- std::shared_ptr<IResourceManagerClient> mTestClient3;
};
TEST_F(ResourceManagerServiceTest, config) {
diff --git a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
new file mode 100644
index 0000000..4c26246
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
@@ -0,0 +1,463 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceObserverService_test"
+
+#include <iostream>
+#include <list>
+
+#include <aidl/android/media/BnResourceObserver.h>
+#include <utils/Log.h>
+#include "ResourceObserverService.h"
+#include "ResourceManagerServiceTestUtils.h"
+
+namespace aidl {
+namespace android {
+namespace media {
+bool operator==(const MediaObservableParcel& lhs, const MediaObservableParcel& rhs) {
+ return lhs.type == rhs.type && lhs.value == rhs.value;
+}
+}}} // namespace ::aidl::android::media
+
+namespace android {
+
+using ::aidl::android::media::BnResourceObserver;
+using ::aidl::android::media::MediaObservableParcel;
+using ::aidl::android::media::MediaObservableType;
+
+#define BUSY ::aidl::android::media::MediaObservableEvent::kBusy
+#define IDLE ::aidl::android::media::MediaObservableEvent::kIdle
+#define ALL ::aidl::android::media::MediaObservableEvent::kAll
+
+struct EventTracker {
+ struct Event {
+ enum { NoEvent, Busy, Idle } type = NoEvent;
+ int uid = 0;
+ int pid = 0;
+ std::vector<MediaObservableParcel> observables;
+ };
+
+ static const Event NoEvent;
+
+ static std::string toString(const MediaObservableParcel& observable) {
+ return "{" + ::aidl::android::media::toString(observable.type)
+ + ", " + std::to_string(observable.value) + "}";
+ }
+ static std::string toString(const Event& event) {
+ std::string eventStr;
+ switch (event.type) {
+ case Event::Busy:
+ eventStr = "Busy";
+ break;
+ case Event::Idle:
+ eventStr = "Idle";
+ break;
+ default:
+ return "NoEvent";
+ }
+ std::string observableStr;
+ for (auto &observable : event.observables) {
+ if (!observableStr.empty()) {
+ observableStr += ", ";
+ }
+ observableStr += toString(observable);
+ }
+ return "{" + eventStr + ", " + std::to_string(event.uid) + ", "
+ + std::to_string(event.pid) + ", {" + observableStr + "}}";
+ }
+
+ static Event Busy(int uid, int pid, const std::vector<MediaObservableParcel>& observables) {
+ return { Event::Busy, uid, pid, observables };
+ }
+ static Event Idle(int uid, int pid, const std::vector<MediaObservableParcel>& observables) {
+ return { Event::Idle, uid, pid, observables };
+ }
+
+ // Pop 1 event from front, wait for up to timeoutUs if empty.
+ const Event& pop(int64_t timeoutUs = 0) {
+ std::unique_lock lock(mLock);
+
+ if (mEventQueue.empty() && timeoutUs > 0) {
+ mCondition.wait_for(lock, std::chrono::microseconds(timeoutUs));
+ }
+
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+
+ return mPoppedEvent;
+ }
+
+ // Push 1 event to back.
+ void append(const Event& event) {
+ ALOGD("%s", toString(event).c_str());
+
+ std::unique_lock lock(mLock);
+
+ mEventQueue.push_back(event);
+ mCondition.notify_one();
+ }
+
+private:
+ std::mutex mLock;
+ std::condition_variable mCondition;
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+};
+
+const EventTracker::Event EventTracker::NoEvent;
+
+// Operators for GTest macros.
+bool operator==(const EventTracker::Event& lhs, const EventTracker::Event& rhs) {
+ return lhs.type == rhs.type && lhs.uid == rhs.uid && lhs.pid == rhs.pid &&
+ lhs.observables == rhs.observables;
+}
+
+std::ostream& operator<<(std::ostream& str, const EventTracker::Event& v) {
+ str << EventTracker::toString(v);
+ return str;
+}
+
+struct TestObserver : public BnResourceObserver, public EventTracker {
+ TestObserver(const char *name) : mName(name) {}
+ ~TestObserver() = default;
+ Status onStatusChanged(MediaObservableEvent event, int32_t uid, int32_t pid,
+ const std::vector<MediaObservableParcel>& observables) override {
+ ALOGD("%s: %s", mName.c_str(), __FUNCTION__);
+ if (event == MediaObservableEvent::kBusy) {
+ append(Busy(uid, pid, observables));
+ } else {
+ append(Idle(uid, pid, observables));
+ }
+
+ return Status::ok();
+ }
+ std::string mName;
+};
+
+class ResourceObserverServiceTest : public ResourceManagerServiceTestBase {
+public:
+ ResourceObserverServiceTest() : ResourceManagerServiceTestBase(),
+ mObserverService(::ndk::SharedRefBase::make<ResourceObserverService>()),
+ mTestObserver1(::ndk::SharedRefBase::make<TestObserver>("observer1")),
+ mTestObserver2(::ndk::SharedRefBase::make<TestObserver>("observer2")),
+ mTestObserver3(::ndk::SharedRefBase::make<TestObserver>("observer3")) {
+ mService->setObserverService(mObserverService);
+ }
+
+ void registerObservers(MediaObservableEvent filter = ALL) {
+ std::vector<MediaObservableFilter> filters1, filters2, filters3;
+ filters1 = {{MediaObservableType::kVideoSecureCodec, filter}};
+ filters2 = {{MediaObservableType::kVideoNonSecureCodec, filter}};
+ filters3 = {{MediaObservableType::kVideoSecureCodec, filter},
+ {MediaObservableType::kVideoNonSecureCodec, filter}};
+
+ // mTestObserver1 monitors secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // mTestObserver2 monitors non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver2, filters2).isOk());
+
+ // mTestObserver3 monitors both secure & non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver3, filters3).isOk());
+ }
+
+protected:
+ std::shared_ptr<ResourceObserverService> mObserverService;
+ std::shared_ptr<TestObserver> mTestObserver1;
+ std::shared_ptr<TestObserver> mTestObserver2;
+ std::shared_ptr<TestObserver> mTestObserver3;
+};
+
+TEST_F(ResourceObserverServiceTest, testRegisterObserver) {
+ std::vector<MediaObservableFilter> filters1;
+ Status status;
+
+ // Register with empty observables should fail.
+ status = mObserverService->registerObserver(mTestObserver1, filters1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), BAD_VALUE);
+
+ // mTestObserver1 monitors secure video codecs.
+ filters1 = {{MediaObservableType::kVideoSecureCodec, ALL}};
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // Register duplicates should fail.
+ status = mObserverService->registerObserver(mTestObserver1, filters1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), ALREADY_EXISTS);
+}
+
+TEST_F(ResourceObserverServiceTest, testUnregisterObserver) {
+ std::vector<MediaObservableFilter> filters1;
+ Status status;
+
+ // Unregister without registering first should fail.
+ status = mObserverService->unregisterObserver(mTestObserver1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), NAME_NOT_FOUND);
+
+ // mTestObserver1 monitors secure video codecs.
+ filters1 = {{MediaObservableType::kVideoSecureCodec, ALL}};
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+ EXPECT_TRUE(mObserverService->unregisterObserver(mTestObserver1).isOk());
+
+ // Unregister again should fail.
+ status = mObserverService->unregisterObserver(mTestObserver1);
+ EXPECT_FALSE(status.isOk());
+ EXPECT_EQ(status.getServiceSpecificError(), NAME_NOT_FOUND);
+}
+
+TEST_F(ResourceObserverServiceTest, testAddResourceBasic) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+ // Add secure video codec.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+
+ // Add non-secure video codec.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+
+ // Add secure & non-secure video codecs.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+
+ // Add additional audio codecs, should be ignored.
+ resources.push_back(MediaResource::CodecResource(1 /*secure*/, 0 /*video*/));
+ resources.push_back(MediaResource::CodecResource(0 /*secure*/, 0 /*video*/));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables3));
+}
+
+TEST_F(ResourceObserverServiceTest, testAddResourceMultiple) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add multiple secure & non-secure video codecs.
+ // Multiple entries of the same type should be merged, count should be propagated correctly.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 3 /*count*/)};
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 2}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 3}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 2},
+ {MediaObservableType::kVideoNonSecureCodec, 3}};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+}
+
+TEST_F(ResourceObserverServiceTest, testRemoveResourceBasic) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+ // Add secure video codec to client1.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
+ // Remove secure video codec. observer 1&3 should receive updates.
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
+ // Remove secure video codec again, should have no event.
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove client1, should have no event.
+ mService->removeClient(kTestPid1, getId(mTestClient1));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+
+ // Add non-secure video codec to client2.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ // Remove client2, observer 2&3 should receive updates.
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ // Remove non-secure codec after client2 removed, should have no event.
+ mService->removeResource(kTestPid2, getId(mTestClient2), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove client2 again, should have no event.
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+
+ // Add secure & non-secure video codecs, plus audio codecs (that's ignored).
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 0 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+ // Remove one audio codec, should have no event.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 0 /*video*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
+ // Remove the other audio codec and the secure video codec, only secure video codec
+ // removal should be reported.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ // Remove client3 entirely. Non-secure video codec removal should be reported.
+ mService->removeClient(kTestPid2, getId(mTestClient3));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+}
+
+TEST_F(ResourceObserverServiceTest, testRemoveResourceMultiple) {
+ registerObservers();
+
+ std::vector<MediaObservableParcel> observables1, observables2, observables3;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add multiple secure & non-secure video codecs, plus audio codecs (that's ignored).
+ // (ResourceManager will merge these internally.)
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 4 /*count*/),
+ MediaResource::CodecResource(1 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 0 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 4}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 4}};
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
+ // Remove one audio codec, 2 secure video codecs and 2 non-secure video codecs.
+ // 1 secure video codec removal and 2 non-secure video codec removals should be reported.
+ resources = {MediaResource::CodecResource(0 /*secure*/, 0 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/, 2 /*count*/)};
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 2}};
+ observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
+ {MediaObservableType::kVideoNonSecureCodec, 2}};
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables3));
+ // Remove client3 entirely. 2 non-secure video codecs removal should be reported.
+ mService->removeClient(kTestPid2, getId(mTestClient3));
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+}
+
+TEST_F(ResourceObserverServiceTest, testEventFilters) {
+ // Register observers with different event filters.
+ std::vector<MediaObservableFilter> filters1, filters2, filters3;
+ filters1 = {{MediaObservableType::kVideoSecureCodec, BUSY}};
+ filters2 = {{MediaObservableType::kVideoNonSecureCodec, IDLE}};
+ filters3 = {{MediaObservableType::kVideoSecureCodec, IDLE},
+ {MediaObservableType::kVideoNonSecureCodec, BUSY}};
+
+ // mTestObserver1 monitors secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver1, filters1).isOk());
+
+ // mTestObserver2 monitors non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver2, filters2).isOk());
+
+ // mTestObserver3 monitors both secure & non-secure video codecs.
+ EXPECT_TRUE(mObserverService->registerObserver(mTestObserver3, filters3).isOk());
+
+ std::vector<MediaObservableParcel> observables1, observables2;
+ observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
+ observables2 = {{MediaObservableType::kVideoNonSecureCodec, 1}};
+
+ std::vector<MediaResourceParcel> resources;
+
+ // Add secure & non-secure video codecs.
+ resources = {MediaResource::CodecResource(1 /*secure*/, 1 /*video*/),
+ MediaResource::CodecResource(0 /*secure*/, 1 /*video*/)};
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
+
+ // Remove secure & non-secure video codecs.
+ mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
+ EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
+ EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh b/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..1c4ae98
--- /dev/null
+++ b/services/mediaresourcemanager/test/build_and_run_all_unit_tests.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ "$SYNC_FINISHED" != true ]; then
+ if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+ fi
+
+ # ensure we have mm
+ . $ANDROID_BUILD_TOP/build/envsetup.sh
+
+ mm
+
+ echo "waiting for device"
+
+ adb root && adb wait-for-device remount && adb sync
+fi
+
+echo "========================================"
+
+echo "testing ResourceManagerService"
+#adb shell /data/nativetest64/ResourceManagerService_test/ResourceManagerService_test
+adb shell /data/nativetest/ResourceManagerService_test/ResourceManagerService_test
+
+echo "testing ServiceLog"
+#adb shell /data/nativetest64/ServiceLog_test/ServiceLog_test
+adb shell /data/nativetest/ServiceLog_test/ServiceLog_test
+
+echo "testing ResourceObserverService"
+#adb shell /data/nativetest64/ResourceObserverService_test/ResourceObserverService_test
+adb shell /data/nativetest/ResourceObserverService_test/ResourceObserverService_test
diff --git a/services/mediatranscoding/Android.bp b/services/mediatranscoding/Android.bp
index 8cf2d62..2dbcf5a 100644
--- a/services/mediatranscoding/Android.bp
+++ b/services/mediatranscoding/Android.bp
@@ -2,16 +2,25 @@
cc_library_shared {
name: "libmediatranscodingservice",
- srcs: ["MediaTranscodingService.cpp"],
+ srcs: [
+ "MediaTranscodingService.cpp",
+ "SimulatedTranscoder.cpp",
+ ],
shared_libs: [
"libbase",
+ "libbinder",
"libbinder_ndk",
+ "libcutils",
"liblog",
"libmediatranscoding",
"libutils",
],
+ export_shared_lib_headers: [
+ "libmediatranscoding",
+ ],
+
static_libs: [
"mediatranscoding_aidl_interface-ndk_platform",
],
diff --git a/services/mediatranscoding/MediaTranscodingService.cpp b/services/mediatranscoding/MediaTranscodingService.cpp
index 82d4161..56f327e 100644
--- a/services/mediatranscoding/MediaTranscodingService.cpp
+++ b/services/mediatranscoding/MediaTranscodingService.cpp
@@ -16,13 +16,22 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaTranscodingService"
-#include <MediaTranscodingService.h>
+#include "MediaTranscodingService.h"
+
#include <android/binder_manager.h>
#include <android/binder_process.h>
-#include <private/android_filesystem_config.h>
+#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
+#include <media/TranscoderWrapper.h>
+#include <media/TranscodingClientManager.h>
+#include <media/TranscodingResourcePolicy.h>
+#include <media/TranscodingSessionController.h>
+#include <media/TranscodingUidPolicy.h>
#include <utils/Log.h>
#include <utils/Vector.h>
+#include "SimulatedTranscoder.h"
+
namespace android {
// Convenience methods for constructing binder::Status objects for error returns
@@ -31,23 +40,17 @@
errorCode, \
String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, ##__VA_ARGS__))
-// Can MediaTranscoding service trust the caller based on the calling UID?
-// TODO(hkuang): Add MediaProvider's UID.
-static bool isTrustedCallingUid(uid_t uid) {
- switch (uid) {
- case AID_ROOT: // root user
- case AID_SYSTEM:
- case AID_SHELL:
- case AID_MEDIA: // mediaserver
- return true;
- default:
- return false;
- }
-}
-
-MediaTranscodingService::MediaTranscodingService()
- : mTranscodingClientManager(TranscodingClientManager::getInstance()) {
+MediaTranscodingService::MediaTranscodingService(
+ const std::shared_ptr<TranscoderInterface>& transcoder)
+ : mUidPolicy(new TranscodingUidPolicy()),
+ mResourcePolicy(new TranscodingResourcePolicy()),
+ mSessionController(
+ new TranscodingSessionController(transcoder, mUidPolicy, mResourcePolicy)),
+ mClientManager(new TranscodingClientManager(mSessionController)) {
ALOGV("MediaTranscodingService is created");
+ transcoder->setCallback(mSessionController);
+ mUidPolicy->setCallback(mSessionController);
+ mResourcePolicy->setCallback(mSessionController);
}
MediaTranscodingService::~MediaTranscodingService() {
@@ -56,6 +59,17 @@
binder_status_t MediaTranscodingService::dump(int fd, const char** /*args*/, uint32_t /*numArgs*/) {
String8 result;
+
+ // TODO(b/161549994): Remove libbinder dependencies for mainline.
+ if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+ result.format(
+ "Permission Denial: "
+ "can't dump MediaTranscodingService from pid=%d, uid=%d\n",
+ AIBinder_getCallingPid(), AIBinder_getCallingUid());
+ write(fd, result.string(), result.size());
+ return PERMISSION_DENIED;
+ }
+
const size_t SIZE = 256;
char buffer[SIZE];
@@ -64,14 +78,22 @@
write(fd, result.string(), result.size());
Vector<String16> args;
- mTranscodingClientManager.dumpAllClients(fd, args);
+ mClientManager->dumpAllClients(fd, args);
+ mSessionController->dumpAllSessions(fd, args);
return OK;
}
//static
void MediaTranscodingService::instantiate() {
+ std::shared_ptr<TranscoderInterface> transcoder;
+ if (property_get_bool("debug.transcoding.simulated_transcoder", false)) {
+ transcoder = std::make_shared<SimulatedTranscoder>();
+ } else {
+ transcoder = std::make_shared<TranscoderWrapper>();
+ }
+
std::shared_ptr<MediaTranscodingService> service =
- ::ndk::SharedRefBase::make<MediaTranscodingService>();
+ ::ndk::SharedRefBase::make<MediaTranscodingService>(transcoder);
binder_status_t status =
AServiceManager_addService(service->asBinder().get(), getServiceName());
if (status != STATUS_OK) {
@@ -80,118 +102,31 @@
}
Status MediaTranscodingService::registerClient(
- const std::shared_ptr<ITranscodingServiceClient>& in_client,
- const std::string& in_opPackageName, int32_t in_clientUid, int32_t in_clientPid,
- int32_t* _aidl_return) {
- if (in_client == nullptr) {
- ALOGE("Client can not be null");
- *_aidl_return = kInvalidJobId;
- return Status::fromServiceSpecificError(ERROR_ILLEGAL_ARGUMENT);
- }
-
- int32_t callingPid = AIBinder_getCallingPid();
- int32_t callingUid = AIBinder_getCallingUid();
-
- // Check if we can trust clientUid. Only privilege caller could forward the uid on app client's behalf.
- if (in_clientUid == USE_CALLING_UID) {
- in_clientUid = callingUid;
- } else if (!isTrustedCallingUid(callingUid)) {
- ALOGE("MediaTranscodingService::registerClient failed (calling PID %d, calling UID %d) "
- "rejected "
- "(don't trust clientUid %d)",
- in_clientPid, in_clientUid, in_clientUid);
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "Untrusted caller (calling PID %d, UID %d) trying to "
- "register client",
- in_clientPid, in_clientUid);
- }
-
- // Check if we can trust clientPid. Only privilege caller could forward the pid on app client's behalf.
- if (in_clientPid == USE_CALLING_PID) {
- in_clientPid = callingPid;
- } else if (!isTrustedCallingUid(callingUid)) {
- ALOGE("MediaTranscodingService::registerClient client failed (calling PID %d, calling UID "
- "%d) rejected "
- "(don't trust clientPid %d)",
- in_clientPid, in_clientUid, in_clientPid);
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "Untrusted caller (calling PID %d, UID %d) trying to "
- "register client",
- in_clientPid, in_clientUid);
- }
-
- // We know the clientId must be equal to its pid as we assigned client's pid as its clientId.
- int32_t clientId = in_clientPid;
-
- // Checks if the client already registers.
- if (mTranscodingClientManager.isClientIdRegistered(clientId)) {
- return Status::fromServiceSpecificError(ERROR_ALREADY_EXISTS);
+ const std::shared_ptr<ITranscodingClientCallback>& in_callback,
+ const std::string& in_clientName, const std::string& in_opPackageName,
+ std::shared_ptr<ITranscodingClient>* _aidl_return) {
+ if (in_callback == nullptr) {
+ *_aidl_return = nullptr;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "Client callback cannot be null!");
}
// Creates the client and uses its process id as client id.
- std::unique_ptr<TranscodingClientManager::ClientInfo> newClient =
- std::make_unique<TranscodingClientManager::ClientInfo>(
- in_client, clientId, in_clientPid, in_clientUid, in_opPackageName);
- status_t err = mTranscodingClientManager.addClient(std::move(newClient));
+ std::shared_ptr<ITranscodingClient> newClient;
+
+ status_t err =
+ mClientManager->addClient(in_callback, in_clientName, in_opPackageName, &newClient);
if (err != OK) {
- *_aidl_return = kInvalidClientId;
+ *_aidl_return = nullptr;
return STATUS_ERROR_FMT(err, "Failed to add client to TranscodingClientManager");
}
- ALOGD("Assign client: %s pid: %d, uid: %d with id: %d", in_opPackageName.c_str(), in_clientPid,
- in_clientUid, clientId);
-
- *_aidl_return = clientId;
- return Status::ok();
-}
-
-Status MediaTranscodingService::unregisterClient(int32_t clientId, bool* _aidl_return) {
- ALOGD("unregisterClient id: %d", clientId);
- int32_t callingUid = AIBinder_getCallingUid();
- int32_t callingPid = AIBinder_getCallingPid();
-
- // Only the client with clientId or the trusted caller could unregister the client.
- if (callingPid != clientId) {
- if (!isTrustedCallingUid(callingUid)) {
- ALOGE("Untrusted caller (calling PID %d, UID %d) trying to "
- "unregister client with id: %d",
- callingUid, callingPid, clientId);
- *_aidl_return = true;
- return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
- "Untrusted caller (calling PID %d, UID %d) trying to "
- "unregister client with id: %d",
- callingUid, callingPid, clientId);
- }
- }
-
- *_aidl_return = (mTranscodingClientManager.removeClient(clientId) == OK);
+ *_aidl_return = newClient;
return Status::ok();
}
Status MediaTranscodingService::getNumOfClients(int32_t* _aidl_return) {
ALOGD("MediaTranscodingService::getNumOfClients");
- *_aidl_return = mTranscodingClientManager.getNumOfClients();
- return Status::ok();
-}
-
-Status MediaTranscodingService::submitRequest(int32_t /*clientId*/,
- const TranscodingRequestParcel& /*request*/,
- TranscodingJobParcel* /*job*/,
- int32_t* /*_aidl_return*/) {
- // TODO(hkuang): Add implementation.
- return Status::ok();
-}
-
-Status MediaTranscodingService::cancelJob(int32_t /*in_clientId*/, int32_t /*in_jobId*/,
- bool* /*_aidl_return*/) {
- // TODO(hkuang): Add implementation.
- return Status::ok();
-}
-
-Status MediaTranscodingService::getJobWithId(int32_t /*in_jobId*/,
- TranscodingJobParcel* /*out_job*/,
- bool* /*_aidl_return*/) {
- // TODO(hkuang): Add implementation.
+ *_aidl_return = mClientManager->getNumOfClients();
return Status::ok();
}
diff --git a/services/mediatranscoding/MediaTranscodingService.h b/services/mediatranscoding/MediaTranscodingService.h
index cc69727..428f777 100644
--- a/services/mediatranscoding/MediaTranscodingService.h
+++ b/services/mediatranscoding/MediaTranscodingService.h
@@ -19,44 +19,39 @@
#include <aidl/android/media/BnMediaTranscodingService.h>
#include <binder/IServiceManager.h>
-#include <media/TranscodingClientManager.h>
namespace android {
using Status = ::ndk::ScopedAStatus;
using ::aidl::android::media::BnMediaTranscodingService;
-using ::aidl::android::media::ITranscodingServiceClient;
-using ::aidl::android::media::TranscodingJobParcel;
+using ::aidl::android::media::ITranscodingClient;
+using ::aidl::android::media::ITranscodingClientCallback;
using ::aidl::android::media::TranscodingRequestParcel;
+using ::aidl::android::media::TranscodingSessionParcel;
+class TranscodingClientManager;
+class TranscodingSessionController;
+class TranscoderInterface;
+class UidPolicyInterface;
+class ResourcePolicyInterface;
class MediaTranscodingService : public BnMediaTranscodingService {
public:
- static constexpr int32_t kInvalidJobId = -1;
+ static constexpr int32_t kInvalidSessionId = -1;
static constexpr int32_t kInvalidClientId = -1;
- MediaTranscodingService();
+ MediaTranscodingService(const std::shared_ptr<TranscoderInterface>& transcoder);
virtual ~MediaTranscodingService();
static void instantiate();
static const char* getServiceName() { return "media.transcoding"; }
- Status registerClient(const std::shared_ptr<ITranscodingServiceClient>& in_client,
- const std::string& in_opPackageName, int32_t in_clientUid,
- int32_t in_clientPid, int32_t* _aidl_return) override;
-
- Status unregisterClient(int32_t clientId, bool* _aidl_return) override;
+ Status registerClient(const std::shared_ptr<ITranscodingClientCallback>& in_callback,
+ const std::string& in_clientName, const std::string& in_opPackageName,
+ std::shared_ptr<ITranscodingClient>* _aidl_return) override;
Status getNumOfClients(int32_t* _aidl_return) override;
- Status submitRequest(int32_t in_clientId, const TranscodingRequestParcel& in_request,
- TranscodingJobParcel* out_job, int32_t* _aidl_return) override;
-
- Status cancelJob(int32_t in_clientId, int32_t in_jobId, bool* _aidl_return) override;
-
- Status getJobWithId(int32_t in_jobId, TranscodingJobParcel* out_job,
- bool* _aidl_return) override;
-
virtual inline binder_status_t dump(int /*fd*/, const char** /*args*/, uint32_t /*numArgs*/);
private:
@@ -64,7 +59,10 @@
mutable std::mutex mServiceLock;
- TranscodingClientManager& mTranscodingClientManager;
+ std::shared_ptr<UidPolicyInterface> mUidPolicy;
+ std::shared_ptr<ResourcePolicyInterface> mResourcePolicy;
+ std::shared_ptr<TranscodingSessionController> mSessionController;
+ std::shared_ptr<TranscodingClientManager> mClientManager;
};
} // namespace android
diff --git a/services/mediatranscoding/SimulatedTranscoder.cpp b/services/mediatranscoding/SimulatedTranscoder.cpp
new file mode 100644
index 0000000..03ee886
--- /dev/null
+++ b/services/mediatranscoding/SimulatedTranscoder.cpp
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimulatedTranscoder"
+#include "SimulatedTranscoder.h"
+
+#include <utils/Log.h>
+
+#include <thread>
+
+namespace android {
+
+//static
+const char* SimulatedTranscoder::toString(Event::Type type) {
+ switch (type) {
+ case Event::Start:
+ return "Start";
+ case Event::Pause:
+ return "Pause";
+ case Event::Resume:
+ return "Resume";
+ default:
+ break;
+ }
+ return "(unknown)";
+}
+
+SimulatedTranscoder::SimulatedTranscoder() {
+ std::thread(&SimulatedTranscoder::threadLoop, this).detach();
+}
+
+void SimulatedTranscoder::setCallback(const std::shared_ptr<TranscoderCallbackInterface>& cb) {
+ mCallback = cb;
+}
+
+void SimulatedTranscoder::start(
+ ClientIdType clientId, SessionIdType sessionId, const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& /*clientCallback*/) {
+ if (request.testConfig.has_value() && request.testConfig->processingTotalTimeMs > 0) {
+ mSessionProcessingTimeMs = request.testConfig->processingTotalTimeMs;
+ }
+ ALOGV("%s: session {%d}: processingTime: %lld", __FUNCTION__, sessionId,
+ (long long)mSessionProcessingTimeMs);
+ queueEvent(Event::Start, clientId, sessionId, [=] {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onStarted(clientId, sessionId);
+ }
+ });
+}
+
+void SimulatedTranscoder::pause(ClientIdType clientId, SessionIdType sessionId) {
+ queueEvent(Event::Pause, clientId, sessionId, [=] {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onPaused(clientId, sessionId);
+ }
+ });
+}
+
+void SimulatedTranscoder::resume(
+ ClientIdType clientId, SessionIdType sessionId, const TranscodingRequestParcel& /*request*/,
+ const std::shared_ptr<ITranscodingClientCallback>& /*clientCallback*/) {
+ queueEvent(Event::Resume, clientId, sessionId, [=] {
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ callback->onResumed(clientId, sessionId);
+ }
+ });
+}
+
+void SimulatedTranscoder::stop(ClientIdType clientId, SessionIdType sessionId) {
+ queueEvent(Event::Stop, clientId, sessionId, nullptr);
+}
+
+void SimulatedTranscoder::queueEvent(Event::Type type, ClientIdType clientId,
+ SessionIdType sessionId, std::function<void()> runnable) {
+ ALOGV("%s: session {%lld, %d}: %s", __FUNCTION__, (long long)clientId, sessionId,
+ toString(type));
+
+ auto lock = std::scoped_lock(mLock);
+
+ mQueue.push_back({type, clientId, sessionId, runnable});
+ mCondition.notify_one();
+}
+
+void SimulatedTranscoder::threadLoop() {
+ bool running = false;
+ std::chrono::microseconds remainingUs(kSessionDurationUs);
+ std::chrono::system_clock::time_point lastRunningTime;
+ Event lastRunningEvent;
+
+ std::unique_lock<std::mutex> lock(mLock);
+ // SimulatedTranscoder currently lives in the transcoding service, as long as
+ // MediaTranscodingService itself.
+ while (true) {
+ // Wait for the next event.
+ while (mQueue.empty()) {
+ if (!running) {
+ mCondition.wait(lock);
+ continue;
+ }
+ // If running, wait for the remaining life of this session. Report finish if timed out.
+ std::cv_status status = mCondition.wait_for(lock, remainingUs);
+ if (status == std::cv_status::timeout) {
+ running = false;
+
+ auto callback = mCallback.lock();
+ if (callback != nullptr) {
+ lock.unlock();
+ callback->onFinish(lastRunningEvent.clientId, lastRunningEvent.sessionId);
+ lock.lock();
+ }
+ } else {
+ // Advance last running time and remaining time. This is needed to guard
+ // against bad events (which will be ignored) or spurious wakeups, in that
+ // case we don't want to wait for the same time again.
+ auto now = std::chrono::system_clock::now();
+ remainingUs -= (now - lastRunningTime);
+ lastRunningTime = now;
+ }
+ }
+
+ // Handle the events, adjust state and send updates to client accordingly.
+ while (!mQueue.empty()) {
+ Event event = *mQueue.begin();
+ mQueue.pop_front();
+
+ ALOGV("%s: session {%lld, %d}: %s", __FUNCTION__, (long long)event.clientId,
+ event.sessionId, toString(event.type));
+
+ if (!running && (event.type == Event::Start || event.type == Event::Resume)) {
+ running = true;
+ lastRunningTime = std::chrono::system_clock::now();
+ lastRunningEvent = event;
+ if (event.type == Event::Start) {
+ remainingUs = std::chrono::milliseconds(mSessionProcessingTimeMs);
+ }
+ } else if (running && (event.type == Event::Pause || event.type == Event::Stop)) {
+ running = false;
+ remainingUs -= (std::chrono::system_clock::now() - lastRunningTime);
+ } else {
+ ALOGW("%s: discarding bad event: session {%lld, %d}: %s", __FUNCTION__,
+ (long long)event.clientId, event.sessionId, toString(event.type));
+ continue;
+ }
+
+ if (event.runnable != nullptr) {
+ lock.unlock();
+ event.runnable();
+ lock.lock();
+ }
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/mediatranscoding/SimulatedTranscoder.h b/services/mediatranscoding/SimulatedTranscoder.h
new file mode 100644
index 0000000..ba2bba0
--- /dev/null
+++ b/services/mediatranscoding/SimulatedTranscoder.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SIMULATED_TRANSCODER_H
+#define ANDROID_MEDIA_SIMULATED_TRANSCODER_H
+
+#include <android-base/thread_annotations.h>
+#include <media/TranscoderInterface.h>
+
+#include <list>
+#include <mutex>
+
+namespace android {
+
+/**
+ * SimulatedTranscoder is currently used to instantiate MediaTranscodingService
+ * on service side for testing, so that we could actually test the IPC calls of
+ * MediaTranscodingService to expose issues that's observable only over IPC.
+ * SimulatedTranscoder is used when useSimulatedTranscoder in TranscodingTestConfig
+ * is set to true.
+ *
+ * SimulatedTranscoder simulates session execution by reporting finish after kSessionDurationUs.
+ * Session lifecycle events are reported via progress updates with special progress
+ * numbers (equal to the Event's type).
+ */
+class SimulatedTranscoder : public TranscoderInterface {
+public:
+ struct Event {
+ enum Type { NoEvent, Start, Pause, Resume, Stop, Finished, Failed } type;
+ ClientIdType clientId;
+ SessionIdType sessionId;
+ std::function<void()> runnable;
+ };
+
+ static constexpr int64_t kSessionDurationUs = 1000000;
+
+ SimulatedTranscoder();
+
+ // TranscoderInterface
+ void setCallback(const std::shared_ptr<TranscoderCallbackInterface>& cb) override;
+ void start(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) override;
+ void pause(ClientIdType clientId, SessionIdType sessionId) override;
+ void resume(ClientIdType clientId, SessionIdType sessionId,
+ const TranscodingRequestParcel& request,
+ const std::shared_ptr<ITranscodingClientCallback>& clientCallback) override;
+ void stop(ClientIdType clientId, SessionIdType sessionId) override;
+ // ~TranscoderInterface
+
+private:
+ std::weak_ptr<TranscoderCallbackInterface> mCallback;
+ std::mutex mLock;
+ std::condition_variable mCondition;
+ std::list<Event> mQueue GUARDED_BY(mLock);
+
+ // Minimum time spent on transcode the video. This is used just for testing.
+ int64_t mSessionProcessingTimeMs = kSessionDurationUs / 1000;
+
+ static const char* toString(Event::Type type);
+ void queueEvent(Event::Type type, ClientIdType clientId, SessionIdType sessionId,
+ std::function<void()> runnable);
+ void threadLoop();
+};
+
+} // namespace android
+
+#endif // ANDROID_MEDIA_SIMULATED_TRANSCODER_H
diff --git a/services/mediatranscoding/tests/Android.bp b/services/mediatranscoding/tests/Android.bp
index e0e040c..6497685 100644
--- a/services/mediatranscoding/tests/Android.bp
+++ b/services/mediatranscoding/tests/Android.bp
@@ -19,17 +19,40 @@
"liblog",
"libutils",
"libmediatranscodingservice",
+ "libcutils",
],
static_libs: [
"mediatranscoding_aidl_interface-ndk_platform",
],
+
+ required: [
+ "TranscodingUidPolicy_TestAppA",
+ "TranscodingUidPolicy_TestAppB",
+ "TranscodingUidPolicy_TestAppC",
+ ],
}
-// MediaTranscodingService unit test
+// MediaTranscodingService unit test using simulated transcoder
cc_test {
- name: "mediatranscodingservice_tests",
+ name: "mediatranscodingservice_simulated_tests",
defaults: ["mediatranscodingservice_test_defaults"],
- srcs: ["mediatranscodingservice_tests.cpp"],
-}
\ No newline at end of file
+ srcs: ["mediatranscodingservice_simulated_tests.cpp"],
+}
+
+// MediaTranscodingService unit test using real transcoder
+cc_test {
+ name: "mediatranscodingservice_real_tests",
+ defaults: ["mediatranscodingservice_test_defaults"],
+
+ srcs: ["mediatranscodingservice_real_tests.cpp"],
+}
+
+// MediaTranscodingService unit test related to resource management
+cc_test {
+ name: "mediatranscodingservice_resource_tests",
+ defaults: ["mediatranscodingservice_test_defaults"],
+
+ srcs: ["mediatranscodingservice_resource_tests.cpp"],
+}
diff --git a/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
new file mode 100644
index 0000000..5f4f645
--- /dev/null
+++ b/services/mediatranscoding/tests/MediaTranscodingServiceTestHelper.h
@@ -0,0 +1,495 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscodingService.
+
+#include <aidl/android/media/BnTranscodingClientCallback.h>
+#include <aidl/android/media/IMediaTranscodingService.h>
+#include <aidl/android/media/ITranscodingClient.h>
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+#include <aidl/android/media/TranscodingSessionParcel.h>
+#include <aidl/android/media/TranscodingSessionPriority.h>
+#include <android-base/logging.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/PermissionController.h>
+#include <cutils/multiuser.h>
+#include <fcntl.h>
+#include <gtest/gtest.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <utils/Log.h>
+
+#include <iostream>
+#include <list>
+
+#include "SimulatedTranscoder.h"
+
+namespace android {
+
+namespace media {
+
+using Status = ::ndk::ScopedAStatus;
+using aidl::android::media::BnTranscodingClientCallback;
+using aidl::android::media::IMediaTranscodingService;
+using aidl::android::media::ITranscodingClient;
+using aidl::android::media::ITranscodingClientCallback;
+using aidl::android::media::TranscodingRequestParcel;
+using aidl::android::media::TranscodingSessionParcel;
+using aidl::android::media::TranscodingSessionPriority;
+using aidl::android::media::TranscodingVideoTrackFormat;
+
+constexpr int32_t kClientUseCallingPid = IMediaTranscodingService::USE_CALLING_PID;
+
+constexpr uid_t kClientUid = 5000;
+#define UID(n) (kClientUid + (n))
+
+constexpr pid_t kClientPid = 10000;
+#define PID(n) (kClientPid + (n))
+
+constexpr int32_t kClientId = 0;
+#define CLIENT(n) (kClientId + (n))
+
+constexpr const char* kClientName = "TestClient";
+constexpr const char* kClientPackageA = "com.android.tests.transcoding.testapp.A";
+constexpr const char* kClientPackageB = "com.android.tests.transcoding.testapp.B";
+constexpr const char* kClientPackageC = "com.android.tests.transcoding.testapp.C";
+
+constexpr const char* kTestActivityName = "/com.android.tests.transcoding.MainActivity";
+
+static status_t getUidForPackage(String16 packageName, userid_t userId, /*inout*/ uid_t& uid) {
+ PermissionController pc;
+ uid = pc.getPackageUid(packageName, 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(packageName).string());
+ return BAD_VALUE;
+ }
+
+ if (userId < 0) {
+ ALOGE("Invalid user: %d", userId);
+ return BAD_VALUE;
+ }
+
+ uid = multiuser_get_uid(userId, uid);
+ return NO_ERROR;
+}
+
+struct ShellHelper {
+ static bool RunCmd(const std::string& cmdStr) {
+ int ret = system(cmdStr.c_str());
+ if (ret != 0) {
+ ALOGE("Failed to run cmd: %s, exitcode %d", cmdStr.c_str(), ret);
+ return false;
+ }
+ return true;
+ }
+
+ static bool Start(const char* packageName, const char* activityName) {
+ return RunCmd("am start -W " + std::string(packageName) + std::string(activityName) +
+ " &> /dev/null");
+ }
+
+ static bool Stop(const char* packageName) {
+ return RunCmd("am force-stop " + std::string(packageName));
+ }
+};
+
+struct EventTracker {
+ struct Event {
+ enum { NoEvent, Start, Pause, Resume, Finished, Failed } type;
+ int64_t clientId;
+ int32_t sessionId;
+ };
+
+#define DECLARE_EVENT(action) \
+ static Event action(int32_t clientId, int32_t sessionId) { \
+ return {Event::action, clientId, sessionId}; \
+ }
+
+ DECLARE_EVENT(Start);
+ DECLARE_EVENT(Pause);
+ DECLARE_EVENT(Resume);
+ DECLARE_EVENT(Finished);
+ DECLARE_EVENT(Failed);
+
+ static constexpr Event NoEvent = {Event::NoEvent, 0, 0};
+
+ static std::string toString(const Event& event) {
+ std::string eventStr;
+ switch (event.type) {
+ case Event::Start:
+ eventStr = "Start";
+ break;
+ case Event::Pause:
+ eventStr = "Pause";
+ break;
+ case Event::Resume:
+ eventStr = "Resume";
+ break;
+ case Event::Finished:
+ eventStr = "Finished";
+ break;
+ case Event::Failed:
+ eventStr = "Failed";
+ break;
+ default:
+ return "NoEvent";
+ }
+ return "session {" + std::to_string(event.clientId) + ", " +
+ std::to_string(event.sessionId) + "}: " + eventStr;
+ }
+
+ // Pop 1 event from front, wait for up to timeoutUs if empty.
+ const Event& pop(int64_t timeoutUs = 0) {
+ std::unique_lock lock(mLock);
+
+ if (mEventQueue.empty() && timeoutUs > 0) {
+ mCondition.wait_for(lock, std::chrono::microseconds(timeoutUs));
+ }
+
+ if (mEventQueue.empty()) {
+ mPoppedEvent = NoEvent;
+ } else {
+ mPoppedEvent = *mEventQueue.begin();
+ mEventQueue.pop_front();
+ }
+
+ return mPoppedEvent;
+ }
+
+ bool waitForSpecificEventAndPop(const Event& target, std::list<Event>* outEvents,
+ int64_t timeoutUs = 0) {
+ std::unique_lock lock(mLock);
+
+ auto startTime = std::chrono::system_clock::now();
+
+ std::list<Event>::iterator it;
+ while (((it = std::find(mEventQueue.begin(), mEventQueue.end(), target)) ==
+ mEventQueue.end()) &&
+ timeoutUs > 0) {
+ std::cv_status status = mCondition.wait_for(lock, std::chrono::microseconds(timeoutUs));
+ if (status == std::cv_status::timeout) {
+ break;
+ }
+ std::chrono::microseconds elapsedTime = std::chrono::system_clock::now() - startTime;
+ timeoutUs -= elapsedTime.count();
+ }
+
+ if (it == mEventQueue.end()) {
+ return false;
+ }
+ *outEvents = std::list<Event>(mEventQueue.begin(), std::next(it));
+ mEventQueue.erase(mEventQueue.begin(), std::next(it));
+ return true;
+ }
+
+ // Push 1 event to back.
+ void append(const Event& event,
+ const TranscodingErrorCode err = TranscodingErrorCode::kNoError) {
+ ALOGD("%s", toString(event).c_str());
+
+ std::unique_lock lock(mLock);
+
+ mEventQueue.push_back(event);
+ mLastErr = err;
+ mCondition.notify_one();
+ }
+
+ void updateProgress(int progress) {
+ std::unique_lock lock(mLock);
+ mLastProgress = progress;
+ mUpdateCount++;
+ }
+
+ int getUpdateCount(int* lastProgress) {
+ std::unique_lock lock(mLock);
+ *lastProgress = mLastProgress;
+ return mUpdateCount;
+ }
+
+ TranscodingErrorCode getLastError() {
+ std::unique_lock lock(mLock);
+ return mLastErr;
+ }
+
+private:
+ std::mutex mLock;
+ std::condition_variable mCondition;
+ Event mPoppedEvent;
+ std::list<Event> mEventQueue;
+ TranscodingErrorCode mLastErr;
+ int mUpdateCount = 0;
+ int mLastProgress = -1;
+};
+
+// Operators for GTest macros.
+bool operator==(const EventTracker::Event& lhs, const EventTracker::Event& rhs) {
+ return lhs.type == rhs.type && lhs.clientId == rhs.clientId && lhs.sessionId == rhs.sessionId;
+}
+
+std::ostream& operator<<(std::ostream& str, const EventTracker::Event& v) {
+ str << EventTracker::toString(v);
+ return str;
+}
+
+static constexpr bool success = true;
+static constexpr bool fail = false;
+
+struct TestClientCallback : public BnTranscodingClientCallback,
+ public EventTracker,
+ public std::enable_shared_from_this<TestClientCallback> {
+ TestClientCallback(const char* packageName, int32_t id)
+ : mClientId(id), mClientPid(PID(id)), mClientUid(UID(id)), mPackageName(packageName) {
+ ALOGI("TestClientCallback %d created: pid %d, uid %d", id, PID(id), UID(id));
+
+ // Use package uid if that's available.
+ uid_t packageUid;
+ if (getUidForPackage(String16(packageName), 0 /*userId*/, packageUid) == NO_ERROR) {
+ mClientUid = packageUid;
+ }
+ }
+
+ virtual ~TestClientCallback() { ALOGI("TestClientCallback %d destroyed", mClientId); }
+
+ Status openFileDescriptor(const std::string& in_fileUri, const std::string& in_mode,
+ ::ndk::ScopedFileDescriptor* _aidl_return) override {
+ ALOGD("@@@ openFileDescriptor: %s", in_fileUri.c_str());
+ int fd;
+ if (in_mode == "w" || in_mode == "rw") {
+ int kOpenFlags;
+ if (in_mode == "w") {
+ // Write-only, create file if non-existent, truncate existing file.
+ kOpenFlags = O_WRONLY | O_CREAT | O_TRUNC;
+ } else {
+ // Read-Write, create if non-existent, no truncate (service will truncate if needed)
+ kOpenFlags = O_RDWR | O_CREAT;
+ }
+ // User R+W permission.
+ constexpr int kFileMode = S_IRUSR | S_IWUSR;
+ fd = open(in_fileUri.c_str(), kOpenFlags, kFileMode);
+ } else {
+ fd = open(in_fileUri.c_str(), O_RDONLY);
+ }
+ _aidl_return->set(fd);
+ return Status::ok();
+ }
+
+ Status onTranscodingStarted(int32_t in_sessionId) override {
+ append(EventTracker::Start(mClientId, in_sessionId));
+ return Status::ok();
+ }
+
+ Status onTranscodingPaused(int32_t in_sessionId) override {
+ append(EventTracker::Pause(mClientId, in_sessionId));
+ return Status::ok();
+ }
+
+ Status onTranscodingResumed(int32_t in_sessionId) override {
+ append(EventTracker::Resume(mClientId, in_sessionId));
+ return Status::ok();
+ }
+
+ Status onTranscodingFinished(
+ int32_t in_sessionId,
+ const ::aidl::android::media::TranscodingResultParcel& /* in_result */) override {
+ append(Finished(mClientId, in_sessionId));
+ return Status::ok();
+ }
+
+ Status onTranscodingFailed(int32_t in_sessionId,
+ ::aidl::android::media::TranscodingErrorCode in_errorCode) override {
+ append(Failed(mClientId, in_sessionId), in_errorCode);
+ return Status::ok();
+ }
+
+ Status onAwaitNumberOfSessionsChanged(int32_t /* in_sessionId */,
+ int32_t /* in_oldAwaitNumber */,
+ int32_t /* in_newAwaitNumber */) override {
+ return Status::ok();
+ }
+
+ Status onProgressUpdate(int32_t /* in_sessionId */, int32_t in_progress) override {
+ updateProgress(in_progress);
+ return Status::ok();
+ }
+
+ Status registerClient(const char* packageName,
+ const std::shared_ptr<IMediaTranscodingService>& service) {
+ // Override the default uid if the package uid is found.
+ uid_t uid;
+ if (getUidForPackage(String16(packageName), 0 /*userId*/, uid) == NO_ERROR) {
+ mClientUid = uid;
+ }
+
+ ALOGD("registering %s with uid %d", packageName, mClientUid);
+
+ std::shared_ptr<ITranscodingClient> client;
+ Status status =
+ service->registerClient(shared_from_this(), kClientName, packageName, &client);
+
+ mClient = status.isOk() ? client : nullptr;
+ return status;
+ }
+
+ Status unregisterClient() {
+ Status status;
+ if (mClient != nullptr) {
+ status = mClient->unregister();
+ mClient = nullptr;
+ }
+ return status;
+ }
+
+ template <bool expectation = success>
+ bool submit(int32_t sessionId, const char* sourceFilePath, const char* destinationFilePath,
+ TranscodingSessionPriority priority = TranscodingSessionPriority::kNormal,
+ int bitrateBps = -1, int overridePid = -1, int overrideUid = -1) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ TranscodingRequestParcel request;
+ TranscodingSessionParcel session;
+
+ request.sourceFilePath = sourceFilePath;
+ request.destinationFilePath = destinationFilePath;
+ request.priority = priority;
+ request.clientPid = (overridePid == -1) ? mClientPid : overridePid;
+ request.clientUid = (overrideUid == -1) ? mClientUid : overrideUid;
+ if (bitrateBps > 0) {
+ request.requestedVideoTrackFormat.emplace(TranscodingVideoTrackFormat());
+ request.requestedVideoTrackFormat->bitrateBps = bitrateBps;
+ }
+ Status status = mClient->submitRequest(request, &session, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+ if (shouldSucceed) {
+ EXPECT_EQ(session.sessionId, sessionId);
+ }
+
+ return status.isOk() && (result == shouldSucceed) &&
+ (!shouldSucceed || session.sessionId == sessionId);
+ }
+
+ template <bool expectation = success>
+ bool cancel(int32_t sessionId) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ Status status = mClient->cancelSession(sessionId, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+
+ return status.isOk() && (result == shouldSucceed);
+ }
+
+ template <bool expectation = success>
+ bool getSession(int32_t sessionId, const char* sourceFilePath,
+ const char* destinationFilePath) {
+ constexpr bool shouldSucceed = (expectation == success);
+ bool result;
+ TranscodingSessionParcel session;
+ Status status = mClient->getSessionWithId(sessionId, &session, &result);
+
+ EXPECT_TRUE(status.isOk());
+ EXPECT_EQ(result, shouldSucceed);
+ if (shouldSucceed) {
+ EXPECT_EQ(session.sessionId, sessionId);
+ EXPECT_EQ(session.request.sourceFilePath, sourceFilePath);
+ }
+
+ return status.isOk() && (result == shouldSucceed) &&
+ (!shouldSucceed || (session.sessionId == sessionId &&
+ session.request.sourceFilePath == sourceFilePath &&
+ session.request.destinationFilePath == destinationFilePath));
+ }
+
+ int32_t mClientId;
+ pid_t mClientPid;
+ uid_t mClientUid;
+ std::string mPackageName;
+ std::shared_ptr<ITranscodingClient> mClient;
+};
+
+class MediaTranscodingServiceTestBase : public ::testing::Test {
+public:
+ MediaTranscodingServiceTestBase() { ALOGI("MediaTranscodingServiceTestBase created"); }
+
+ virtual ~MediaTranscodingServiceTestBase() {
+ ALOGI("MediaTranscodingServiceTestBase destroyed");
+ }
+
+ void SetUp() override {
+ // Need thread pool to receive callbacks, otherwise oneway callbacks are
+ // silently ignored.
+ ABinderProcess_startThreadPool();
+ ::ndk::SpAIBinder binder(AServiceManager_getService("media.transcoding"));
+ mService = IMediaTranscodingService::fromBinder(binder);
+ if (mService == nullptr) {
+ ALOGE("Failed to connect to the media.trascoding service.");
+ return;
+ }
+
+ mClient1 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageA, 1);
+ mClient2 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageB, 2);
+ mClient3 = ::ndk::SharedRefBase::make<TestClientCallback>(kClientPackageC, 3);
+ }
+
+ Status registerOneClient(const std::shared_ptr<TestClientCallback>& callback) {
+ ALOGD("registering %s with uid %d", callback->mPackageName.c_str(), callback->mClientUid);
+
+ std::shared_ptr<ITranscodingClient> client;
+ Status status =
+ mService->registerClient(callback, kClientName, callback->mPackageName, &client);
+
+ if (status.isOk()) {
+ callback->mClient = client;
+ } else {
+ callback->mClient = nullptr;
+ }
+ return status;
+ }
+
+ void registerMultipleClients() {
+ // Register 3 clients.
+ EXPECT_TRUE(registerOneClient(mClient1).isOk());
+ EXPECT_TRUE(registerOneClient(mClient2).isOk());
+ EXPECT_TRUE(registerOneClient(mClient3).isOk());
+
+ // Check the number of clients.
+ int32_t numOfClients;
+ Status status = mService->getNumOfClients(&numOfClients);
+ EXPECT_TRUE(status.isOk());
+ EXPECT_GE(numOfClients, 3);
+ }
+
+ void unregisterMultipleClients() {
+ // Unregister the clients.
+ EXPECT_TRUE(mClient1->unregisterClient().isOk());
+ EXPECT_TRUE(mClient2->unregisterClient().isOk());
+ EXPECT_TRUE(mClient3->unregisterClient().isOk());
+ }
+
+ void deleteFile(const char* path) { unlink(path); }
+
+ std::shared_ptr<IMediaTranscodingService> mService;
+ std::shared_ptr<TestClientCallback> mClient1;
+ std::shared_ptr<TestClientCallback> mClient2;
+ std::shared_ptr<TestClientCallback> mClient3;
+};
+
+} // namespace media
+} // namespace android
diff --git a/services/mediatranscoding/tests/README.txt b/services/mediatranscoding/tests/README.txt
new file mode 100644
index 0000000..cde465e
--- /dev/null
+++ b/services/mediatranscoding/tests/README.txt
@@ -0,0 +1,8 @@
+mediatranscodingservice_simulated_tests:
+ Tests media transcoding service with simulated transcoder.
+
+mediatranscodingservice_real_tests:
+ Tests media transcoding service with real transcoder. Uses the same test assets
+ as the MediaTranscoder unit tests. Before running the test, please make sure
+ to push the test assets to /sdcard:
+ adb push $TOP/frameworks/av/media/libmediatranscoding/tests/assets /data/local/tmp/TranscodingTestAssets
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/Android.bp b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/Android.bp
new file mode 100644
index 0000000..95a94fc
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/Android.bp
@@ -0,0 +1,23 @@
+android_test_helper_app {
+ name: "TranscodingUidPolicy_TestAppA",
+ manifest: "TestAppA.xml",
+ static_libs: ["androidx.test.rules"],
+ sdk_version: "test_current",
+ srcs: ["src/**/*.java"],
+}
+
+android_test_helper_app {
+ name: "TranscodingUidPolicy_TestAppB",
+ manifest: "TestAppB.xml",
+ static_libs: ["androidx.test.rules"],
+ sdk_version: "test_current",
+ srcs: ["src/**/*.java"],
+}
+
+android_test_helper_app {
+ name: "TranscodingUidPolicy_TestAppC",
+ manifest: "TestAppC.xml",
+ static_libs: ["androidx.test.rules"],
+ sdk_version: "test_current",
+ srcs: ["src/**/*.java"],
+}
\ No newline at end of file
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml
new file mode 100644
index 0000000..0dff171
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppA.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.tests.transcoding.testapp.A"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <application android:label="TestAppA">
+ <activity android:name="com.android.tests.transcoding.MainActivity"
+ android:exported="true">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT"/>
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ <activity android:name="com.android.tests.transcoding.ResourcePolicyTestActivity"
+ android:exported="true">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT"/>
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+</manifest>
+
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppB.xml b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppB.xml
new file mode 100644
index 0000000..4baa35a
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppB.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.tests.transcoding.testapp.B"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <application android:label="TestAppB">
+ <activity android:name="com.android.tests.transcoding.MainActivity"
+ android:exported="true">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT"/>
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+</manifest>
+
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppC.xml b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppC.xml
new file mode 100644
index 0000000..3dde3af
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/TestAppC.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.tests.transcoding.testapp.C"
+ android:versionCode="1"
+ android:versionName="1.0" >
+
+ <application android:label="TestAppC">
+ <activity android:name="com.android.tests.transcoding.MainActivity"
+ android:exported="true">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT"/>
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+</manifest>
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java
new file mode 100644
index 0000000..b79164d
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/MainActivity.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.tests.transcoding;
+
+import android.app.Activity;
+import android.content.Intent;
+import android.os.Bundle;
+import android.util.Log;
+
+/**
+ * This is an empty activity for testing the UID policy of media transcoding service.
+ */
+public class MainActivity extends Activity {
+ private static final String TAG = "MainActivity";
+
+ // Called at the start of the full lifetime.
+ @Override
+ public void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ // Initialize Activity and inflate the UI.
+ }
+
+ // Called after onCreate has finished, use to restore UI state
+ @Override
+ public void onRestoreInstanceState(Bundle savedInstanceState) {
+ super.onRestoreInstanceState(savedInstanceState);
+ // Restore UI state from the savedInstanceState.
+ // This bundle has also been passed to onCreate.
+ // Will only be called if the Activity has been
+ // killed by the system since it was last visible.
+ }
+
+ // Called before subsequent visible lifetimes
+ // for an activity process.
+ @Override
+ public void onRestart() {
+ super.onRestart();
+ // Load changes knowing that the Activity has already
+ // been visible within this process.
+ }
+
+ // Called at the start of the visible lifetime.
+ @Override
+ public void onStart() {
+ super.onStart();
+ // Apply any required UI change now that the Activity is visible.
+ }
+
+ // Called at the start of the active lifetime.
+ @Override
+ public void onResume() {
+ super.onResume();
+ // Resume any paused UI updates, threads, or processes required
+ // by the Activity but suspended when it was inactive.
+ }
+
+ // Called to save UI state changes at the
+ // end of the active lifecycle.
+ @Override
+ public void onSaveInstanceState(Bundle savedInstanceState) {
+ // Save UI state changes to the savedInstanceState.
+ // This bundle will be passed to onCreate and
+ // onRestoreInstanceState if the process is
+ // killed and restarted by the run time.
+ super.onSaveInstanceState(savedInstanceState);
+ }
+
+ // Called at the end of the active lifetime.
+ @Override
+ public void onPause() {
+ // Suspend UI updates, threads, or CPU intensive processes
+ // that don't need to be updated when the Activity isn't
+ // the active foreground Activity.
+ super.onPause();
+ }
+
+ // Called at the end of the visible lifetime.
+ @Override
+ public void onStop() {
+ // Suspend remaining UI updates, threads, or processing
+ // that aren't required when the Activity isn't visible.
+ // Persist all edits or state changes
+ // as after this call the process is likely to be killed.
+ super.onStop();
+ }
+
+ // Sometimes called at the end of the full lifetime.
+ @Override
+ public void onDestroy() {
+ // Clean up any resources including ending threads,
+ // closing database connections etc.
+ super.onDestroy();
+ }
+}
diff --git a/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java
new file mode 100644
index 0000000..c9e2ddb
--- /dev/null
+++ b/services/mediatranscoding/tests/TranscodingUidPolicyTestApp/src/com/android/tests/transcoding/ResourcePolicyTestActivity.java
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.tests.transcoding;
+
+import android.app.Activity;
+import android.media.MediaCodec;
+import android.media.MediaCodecInfo;
+import android.media.MediaCodecInfo.CodecCapabilities;
+import android.media.MediaCodecInfo.VideoCapabilities;
+import android.media.MediaCodecList;
+import android.media.MediaFormat;
+import android.os.Bundle;
+import android.util.Log;
+import java.io.IOException;
+import java.util.Vector;
+
+public class ResourcePolicyTestActivity extends Activity {
+ public static final int TYPE_NONSECURE = 0;
+ public static final int TYPE_SECURE = 1;
+ public static final int TYPE_MIX = 2;
+
+ protected String TAG;
+ private static final int FRAME_RATE = 10;
+ private static final int IFRAME_INTERVAL = 10; // 10 seconds between I-frames
+ private static final String MIME = MediaFormat.MIMETYPE_VIDEO_AVC;
+ private static final int TIMEOUT_MS = 5000;
+
+ private Vector<MediaCodec> mCodecs = new Vector<MediaCodec>();
+
+ private class TestCodecCallback extends MediaCodec.Callback {
+ @Override
+ public void onInputBufferAvailable(MediaCodec codec, int index) {
+ Log.d(TAG, "onInputBufferAvailable " + codec.toString());
+ }
+
+ @Override
+ public void onOutputBufferAvailable(
+ MediaCodec codec, int index, MediaCodec.BufferInfo info) {
+ Log.d(TAG, "onOutputBufferAvailable " + codec.toString());
+ }
+
+ @Override
+ public void onError(MediaCodec codec, MediaCodec.CodecException e) {
+ Log.d(TAG, "onError " + codec.toString() + " errorCode " + e.getErrorCode());
+ }
+
+ @Override
+ public void onOutputFormatChanged(MediaCodec codec, MediaFormat format) {
+ Log.d(TAG, "onOutputFormatChanged " + codec.toString());
+ }
+ }
+
+ private MediaCodec.Callback mCallback = new TestCodecCallback();
+
+ private MediaFormat getTestFormat(CodecCapabilities caps, boolean securePlayback) {
+ VideoCapabilities vcaps = caps.getVideoCapabilities();
+ int width = vcaps.getSupportedWidths().getLower();
+ int height = vcaps.getSupportedHeightsFor(width).getLower();
+ int bitrate = vcaps.getBitrateRange().getLower();
+
+ MediaFormat format = MediaFormat.createVideoFormat(MIME, width, height);
+ format.setInteger(MediaFormat.KEY_COLOR_FORMAT, caps.colorFormats[0]);
+ format.setInteger(MediaFormat.KEY_BIT_RATE, bitrate);
+ format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
+ format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
+ format.setFeatureEnabled(CodecCapabilities.FEATURE_SecurePlayback, securePlayback);
+ return format;
+ }
+
+ private MediaCodecInfo getTestCodecInfo(boolean securePlayback) {
+ // Use avc decoder for testing.
+ boolean isEncoder = false;
+
+ MediaCodecList mcl = new MediaCodecList(MediaCodecList.ALL_CODECS);
+ for (MediaCodecInfo info : mcl.getCodecInfos()) {
+ if (info.isEncoder() != isEncoder) {
+ continue;
+ }
+ CodecCapabilities caps;
+ try {
+ caps = info.getCapabilitiesForType(MIME);
+ boolean securePlaybackSupported =
+ caps.isFeatureSupported(CodecCapabilities.FEATURE_SecurePlayback);
+ boolean securePlaybackRequired =
+ caps.isFeatureRequired(CodecCapabilities.FEATURE_SecurePlayback);
+ if ((securePlayback && securePlaybackSupported)
+ || (!securePlayback && !securePlaybackRequired)) {
+ Log.d(TAG, "securePlayback " + securePlayback + " will use " + info.getName());
+ } else {
+ Log.d(TAG, "securePlayback " + securePlayback + " skip " + info.getName());
+ continue;
+ }
+ } catch (IllegalArgumentException e) {
+ // mime is not supported
+ continue;
+ }
+ return info;
+ }
+
+ return null;
+ }
+
+ protected int allocateCodecs(int max) {
+ Bundle extras = getIntent().getExtras();
+ int type = TYPE_NONSECURE;
+ if (extras != null) {
+ type = extras.getInt("test-type", type);
+ Log.d(TAG, "type is: " + type);
+ }
+
+ boolean shouldSkip = false;
+ boolean securePlayback;
+ if (type == TYPE_NONSECURE || type == TYPE_MIX) {
+ securePlayback = false;
+ MediaCodecInfo info = getTestCodecInfo(securePlayback);
+ if (info != null) {
+ allocateCodecs(max, info, securePlayback);
+ } else {
+ shouldSkip = true;
+ }
+ }
+
+ if (!shouldSkip) {
+ if (type == TYPE_SECURE || type == TYPE_MIX) {
+ securePlayback = true;
+ MediaCodecInfo info = getTestCodecInfo(securePlayback);
+ if (info != null) {
+ allocateCodecs(max, info, securePlayback);
+ } else {
+ shouldSkip = true;
+ }
+ }
+ }
+
+ if (shouldSkip) {
+ Log.d(TAG, "test skipped as there's no supported codec.");
+ finishWithResult(RESULT_OK);
+ }
+
+ Log.d(TAG, "allocateCodecs returned " + mCodecs.size());
+ return mCodecs.size();
+ }
+
+ protected void allocateCodecs(int max, MediaCodecInfo info, boolean securePlayback) {
+ String name = info.getName();
+ CodecCapabilities caps = info.getCapabilitiesForType(MIME);
+ MediaFormat format = getTestFormat(caps, securePlayback);
+ MediaCodec codec = null;
+ for (int i = mCodecs.size(); i < max; ++i) {
+ try {
+ Log.d(TAG, "Create codec " + name + " #" + i);
+ codec = MediaCodec.createByCodecName(name);
+ codec.setCallback(mCallback);
+ Log.d(TAG, "Configure codec " + format);
+ codec.configure(format, null, null, 0);
+ Log.d(TAG, "Start codec " + format);
+ codec.start();
+ mCodecs.add(codec);
+ codec = null;
+ } catch (IllegalArgumentException e) {
+ Log.d(TAG, "IllegalArgumentException " + e.getMessage());
+ break;
+ } catch (IOException e) {
+ Log.d(TAG, "IOException " + e.getMessage());
+ break;
+ } catch (MediaCodec.CodecException e) {
+ Log.d(TAG, "CodecException 0x" + Integer.toHexString(e.getErrorCode()));
+ break;
+ } finally {
+ if (codec != null) {
+ Log.d(TAG, "release codec");
+ codec.release();
+ codec = null;
+ }
+ }
+ }
+ }
+
+ protected void finishWithResult(int result) {
+ for (int i = 0; i < mCodecs.size(); ++i) {
+ Log.d(TAG, "release codec #" + i);
+ mCodecs.get(i).release();
+ }
+ mCodecs.clear();
+ setResult(result);
+ finish();
+ Log.d(TAG, "activity finished");
+ }
+
+ private void doUseCodecs() {
+ int current = 0;
+ try {
+ for (current = 0; current < mCodecs.size(); ++current) {
+ mCodecs.get(current).getName();
+ }
+ } catch (MediaCodec.CodecException e) {
+ Log.d(TAG, "useCodecs got CodecException 0x" + Integer.toHexString(e.getErrorCode()));
+ if (e.getErrorCode() == MediaCodec.CodecException.ERROR_RECLAIMED) {
+ Log.d(TAG, "Remove codec " + current + " from the list");
+ mCodecs.get(current).release();
+ mCodecs.remove(current);
+ mGotReclaimedException = true;
+ mUseCodecs = false;
+ }
+ return;
+ }
+ }
+
+ private Thread mWorkerThread;
+ private volatile boolean mUseCodecs = true;
+ private volatile boolean mGotReclaimedException = false;
+ protected void useCodecs() {
+ mWorkerThread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ long start = System.currentTimeMillis();
+ long timeSinceStartedMs = 0;
+ while (mUseCodecs && (timeSinceStartedMs < TIMEOUT_MS)) {
+ doUseCodecs();
+ try {
+ Thread.sleep(50 /* millis */);
+ } catch (InterruptedException e) {
+ }
+ timeSinceStartedMs = System.currentTimeMillis() - start;
+ }
+ if (mGotReclaimedException) {
+ Log.d(TAG, "Got expected reclaim exception.");
+ }
+ finishWithResult(RESULT_OK);
+ }
+ });
+ mWorkerThread.start();
+ }
+
+ private static final int MAX_INSTANCES = 32;
+
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ TAG = "ResourcePolicyTestActivity";
+
+ Log.d(TAG, "onCreate called.");
+ super.onCreate(savedInstanceState);
+
+ if (allocateCodecs(MAX_INSTANCES) == MAX_INSTANCES) {
+ // haven't reached the limit with MAX_INSTANCES, no need to wait for reclaim exception.
+ //mWaitForReclaim = false;
+ Log.d(TAG, "Didn't hit resource limitation");
+ }
+
+ useCodecs();
+ }
+
+ @Override
+ protected void onDestroy() {
+ Log.d(TAG, "onDestroy called.");
+ super.onDestroy();
+ }
+}
diff --git a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
old mode 100644
new mode 100755
index bcdc7f7..1b42a22
--- a/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
+++ b/services/mediatranscoding/tests/build_and_run_all_unit_tests.sh
@@ -13,11 +13,35 @@
mm
-echo "waiting for device"
+# Push the files onto the device.
+. $ANDROID_BUILD_TOP/frameworks/av/media/libmediatranscoding/tests/assets/push_assets.sh
-adb root && adb wait-for-device remount && adb sync
+echo "[==========] installing test apps"
+adb root
+adb install -t -r -g -d $ANDROID_TARGET_OUT_TESTCASES/TranscodingUidPolicy_TestAppA/arm64/TranscodingUidPolicy_TestAppA.apk
+adb install -t -r -g -d $ANDROID_TARGET_OUT_TESTCASES/TranscodingUidPolicy_TestAppB/arm64/TranscodingUidPolicy_TestAppB.apk
+adb install -t -r -g -d $ANDROID_TARGET_OUT_TESTCASES/TranscodingUidPolicy_TestAppC/arm64/TranscodingUidPolicy_TestAppC.apk
-echo "========================================"
+echo "[==========] waiting for device and sync"
+adb wait-for-device remount && adb sync
-echo "testing mediatranscodingservice"
-adb shell /data/nativetest64/mediatranscodingservice_tests/mediatranscodingservice_tests
+echo "[==========] running simulated tests"
+adb shell setprop debug.transcoding.simulated_transcoder true
+adb shell kill -9 `pid media.transcoding`
+#adb shell /data/nativetest64/mediatranscodingservice_simulated_tests/mediatranscodingservice_simulated_tests
+adb shell /data/nativetest/mediatranscodingservice_simulated_tests/mediatranscodingservice_simulated_tests
+
+echo "[==========] running real tests"
+adb shell setprop debug.transcoding.simulated_transcoder false
+adb shell kill -9 `pid media.transcoding`
+#adb shell /data/nativetest64/mediatranscodingservice_real_tests/mediatranscodingservice_real_tests
+adb shell /data/nativetest/mediatranscodingservice_real_tests/mediatranscodingservice_real_tests
+
+echo "[==========] running resource tests"
+adb shell kill -9 `pid media.transcoding`
+#adb shell /data/nativetest64/mediatranscodingservice_resource_tests/mediatranscodingservice_resource_tests
+adb shell /data/nativetest/mediatranscodingservice_resource_tests/mediatranscodingservice_resource_tests
+
+echo "[==========] removing debug properties"
+adb shell setprop debug.transcoding.simulated_transcoder \"\"
+adb shell kill -9 `pid media.transcoding`
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
new file mode 100644
index 0000000..0550d77
--- /dev/null
+++ b/services/mediatranscoding/tests/mediatranscodingservice_real_tests.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscodingService.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscodingServiceRealTest"
+
+#include "MediaTranscodingServiceTestHelper.h"
+
+/*
+ * Tests media transcoding service with real transcoder.
+ *
+ * Uses the same test assets as the MediaTranscoder unit tests. Before running the test,
+ * please make sure to push the test assets to /sdcard:
+ *
+ * adb push $TOP/frameworks/av/media/libmediatranscoding/transcoder/tests/assets /data/local/tmp/TranscodingTestAssets
+ */
+namespace android {
+
+namespace media {
+
+constexpr int64_t kPaddingUs = 400000;
+constexpr int64_t kSessionWithPaddingUs = 10000000 + kPaddingUs;
+constexpr int32_t kBitRate = 8 * 1000 * 1000; // 8Mbs
+
+constexpr const char* kShortSrcPath =
+ "/data/local/tmp/TranscodingTestAssets/cubicle_avc_480x240_aac_24KHz.mp4";
+constexpr const char* kLongSrcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+
+#define OUTPATH(name) "/data/local/tmp/MediaTranscodingService_" #name ".MP4"
+
+class MediaTranscodingServiceRealTest : public MediaTranscodingServiceTestBase {
+public:
+ MediaTranscodingServiceRealTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
+
+ virtual ~MediaTranscodingServiceRealTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
+};
+
+TEST_F(MediaTranscodingServiceRealTest, TestInvalidSource) {
+ registerMultipleClients();
+
+ const char* srcPath = "bad_file_uri";
+ const char* dstPath = OUTPATH(TestInvalidSource);
+ deleteFile(dstPath);
+
+ // Submit one session.
+ EXPECT_TRUE(
+ mClient1->submit(0, srcPath, dstPath, TranscodingSessionPriority::kNormal, kBitRate));
+
+ // Check expected error.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Failed(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->getLastError(), TranscodingErrorCode::kErrorIO);
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestPassthru) {
+ registerMultipleClients();
+
+ const char* dstPath = OUTPATH(TestPassthru);
+ deleteFile(dstPath);
+
+ // Submit one session.
+ EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath));
+
+ // Wait for session to finish.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideo) {
+ registerMultipleClients();
+
+ const char* dstPath = OUTPATH(TestTranscodeVideo);
+ deleteFile(dstPath);
+
+ // Submit one session.
+ EXPECT_TRUE(mClient1->submit(0, kShortSrcPath, dstPath, TranscodingSessionPriority::kNormal,
+ kBitRate));
+
+ // Wait for session to finish.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestTranscodeVideoProgress) {
+ registerMultipleClients();
+
+ const char* dstPath = OUTPATH(TestTranscodeVideoProgress);
+ deleteFile(dstPath);
+
+ // Submit one session.
+ EXPECT_TRUE(mClient1->submit(0, kLongSrcPath, dstPath, TranscodingSessionPriority::kNormal,
+ kBitRate));
+
+ // Wait for session to finish.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+
+ // Check the progress update messages are received. For this clip (around ~15 second long),
+ // expect at least 10 updates, and the last update should be 100.
+ int lastProgress;
+ EXPECT_GE(mClient1->getUpdateCount(&lastProgress), 10);
+ EXPECT_EQ(lastProgress, 100);
+
+ unregisterMultipleClients();
+}
+
+/*
+ * Test cancel immediately after start.
+ */
+TEST_F(MediaTranscodingServiceRealTest, TestCancelImmediately) {
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* srcPath1 = kShortSrcPath;
+ const char* dstPath0 = OUTPATH(TestCancelImmediately_Session0);
+ const char* dstPath1 = OUTPATH(TestCancelImmediately_Session1);
+
+ deleteFile(dstPath0);
+ deleteFile(dstPath1);
+ // Submit one session, should start immediately.
+ EXPECT_TRUE(
+ mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getSession(0, srcPath0, dstPath0));
+
+ // Test cancel session immediately, getSession should fail after cancel.
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->getSession<fail>(0, "", ""));
+
+ // Submit new session, new session should start immediately and finish.
+ EXPECT_TRUE(
+ mClient1->submit(1, srcPath1, dstPath1, TranscodingSessionPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+
+ unregisterMultipleClients();
+}
+
+/*
+ * Test cancel in the middle of transcoding.
+ */
+TEST_F(MediaTranscodingServiceRealTest, TestCancelWhileRunning) {
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* srcPath1 = kShortSrcPath;
+ const char* dstPath0 = OUTPATH(TestCancelWhileRunning_Session0);
+ const char* dstPath1 = OUTPATH(TestCancelWhileRunning_Session1);
+
+ deleteFile(dstPath0);
+ deleteFile(dstPath1);
+ // Submit two sessions, session 0 should start immediately, session 1 should be queued.
+ EXPECT_TRUE(
+ mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
+ EXPECT_TRUE(
+ mClient1->submit(1, srcPath1, dstPath1, TranscodingSessionPriority::kNormal, kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getSession(0, srcPath0, dstPath0));
+ EXPECT_TRUE(mClient1->getSession(1, srcPath1, dstPath1));
+
+ // Session 0 (longtest) shouldn't finish in 1 seconds.
+ EXPECT_EQ(mClient1->pop(1000000), EventTracker::NoEvent);
+
+ // Now cancel session 0. Session 1 should start immediately and finish.
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->getSession<fail>(0, "", ""));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceRealTest, TestPauseResumeSingleClient) {
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* srcPath1 = kShortSrcPath;
+ const char* dstPath0 = OUTPATH(TestPauseResumeSingleClient_Session0);
+ const char* dstPath1 = OUTPATH(TestPauseResumeSingleClient_Session1);
+ deleteFile(dstPath0);
+ deleteFile(dstPath1);
+
+ // Submit one offline session, should start immediately.
+ EXPECT_TRUE(mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kUnspecified,
+ kBitRate));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ // Test get session after starts.
+ EXPECT_TRUE(mClient1->getSession(0, srcPath0, dstPath0));
+
+ // Submit one realtime session.
+ EXPECT_TRUE(
+ mClient1->submit(1, srcPath1, dstPath1, TranscodingSessionPriority::kNormal, kBitRate));
+
+ // Offline session should pause.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_TRUE(mClient1->getSession(0, srcPath0, dstPath0));
+
+ // Realtime session should start immediately, and run to finish.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+
+ // Test get session after finish fails.
+ EXPECT_TRUE(mClient1->getSession<fail>(1, "", ""));
+
+ // Then offline session should resume.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+ // Test get session after resume.
+ EXPECT_TRUE(mClient1->getSession(0, srcPath0, dstPath0));
+
+ // Offline session should finish.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ // Test get session after finish fails.
+ EXPECT_TRUE(mClient1->getSession<fail>(0, "", ""));
+
+ unregisterMultipleClients();
+}
+
+/*
+ * Basic test for pause/resume with two clients, with one session each.
+ * Top app's session should preempt the other app's session.
+ */
+TEST_F(MediaTranscodingServiceRealTest, TestPauseResumeMultiClients) {
+ ALOGD("TestPauseResumeMultiClients starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* srcPath1 = kShortSrcPath;
+ const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
+ const char* dstPath1 = OUTPATH(TestPauseResumeMultiClients_Client1);
+ deleteFile(dstPath0);
+ deleteFile(dstPath1);
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit session to Client1.
+ ALOGD("Submitting session to client1 (app A) ...");
+ EXPECT_TRUE(
+ mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
+
+ // Client1's session should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+
+ // Client1's session should continue to run, since Client2 (app B) doesn't have any session.
+ EXPECT_EQ(mClient1->pop(1000000), EventTracker::NoEvent);
+
+ // Submit session to Client2.
+ ALOGD("Submitting session to client2 (app B) ...");
+ EXPECT_TRUE(
+ mClient2->submit(0, srcPath1, dstPath1, TranscodingSessionPriority::kNormal, kBitRate));
+
+ // Client1's session should pause, client2's session should start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+
+ // Client2's session should finish, then Client1's session should resume.
+ EXPECT_EQ(mClient2->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+
+ // Client1's session should finish.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ ALOGD("TestPauseResumeMultiClients finished.");
+}
+
+} // namespace media
+} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp
new file mode 100644
index 0000000..bf99efc
--- /dev/null
+++ b/services/mediatranscoding/tests/mediatranscodingservice_resource_tests.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscodingService.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscodingServiceRealTest"
+
+#include "MediaTranscodingServiceTestHelper.h"
+
+/*
+ * Tests media transcoding service with real transcoder.
+ *
+ * Uses the same test assets as the MediaTranscoder unit tests. Before running the test,
+ * please make sure to push the test assets to /sdcard:
+ *
+ * adb push $TOP/frameworks/av/media/libmediatranscoding/transcoder/tests/assets /data/local/tmp/TranscodingTestAssets
+ */
+namespace android {
+
+namespace media {
+
+constexpr int64_t kPaddingUs = 400000;
+constexpr int32_t kBitRate = 8 * 1000 * 1000; // 8Mbs
+
+constexpr const char* kLongSrcPath = "/data/local/tmp/TranscodingTestAssets/longtest_15s.mp4";
+
+constexpr const char* kResourcePolicyTestActivity =
+ "/com.android.tests.transcoding.ResourcePolicyTestActivity";
+
+#define OUTPATH(name) "/data/local/tmp/MediaTranscodingService_" #name ".MP4"
+
+class MediaTranscodingServiceResourceTest : public MediaTranscodingServiceTestBase {
+public:
+ MediaTranscodingServiceResourceTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
+
+ virtual ~MediaTranscodingServiceResourceTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
+};
+
+/**
+ * Basic testing for handling resource lost.
+ *
+ * This test starts a transcoding session (that's somewhat long and takes several seconds),
+ * then launches an activity that allocates video codec instances until it hits insufficient
+ * resource error. Because the activity is running in foreground,
+ * ResourceManager would reclaim codecs from transcoding service which should
+ * cause the session to be paused. The activity will hold the codecs for a few seconds
+ * before releasing them, and the transcoding service should be able to resume
+ * and complete the session.
+ */
+TEST_F(MediaTranscodingServiceResourceTest, TestResourceLost) {
+ ALOGD("TestResourceLost starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+
+ registerMultipleClients();
+
+ const char* srcPath0 = kLongSrcPath;
+ const char* dstPath0 = OUTPATH(TestPauseResumeMultiClients_Client0);
+ deleteFile(dstPath0);
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit session to Client1.
+ ALOGD("Submitting session to client1 (app A) ...");
+ EXPECT_TRUE(
+ mClient1->submit(0, srcPath0, dstPath0, TranscodingSessionPriority::kNormal, kBitRate));
+
+ // Client1's session should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Launch ResourcePolicyTestActivity, which will try to allocate up to 32
+ // instances, which should trigger insufficient resources on most devices.
+ // (Note that it's possible that the device supports a very high number of
+ // resource instances, in which case we'll simply require that the session completes.)
+ ALOGD("Launch ResourcePolicyTestActivity...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kResourcePolicyTestActivity));
+
+ // The basic requirement is that the session should complete. Wait for finish
+ // event to come and pop up all events received.
+ std::list<EventTracker::Event> events;
+ EXPECT_TRUE(mClient1->waitForSpecificEventAndPop(EventTracker::Finished(CLIENT(1), 0), &events,
+ 15000000));
+
+ // If there is only 1 event, it must be finish (otherwise waitForSpecificEventAndPop
+ // woudldn't pop up anything), and we're ok.
+ //
+ // TODO: If there is only 1 event (finish), and no pause/resume happened, we need
+ // to verify that the ResourcePolicyTestActivity actually was able to allocate
+ // all 32 instances without hitting insufficient resources. Otherwise, it could
+ // be that ResourceManager was not able to reclaim codecs from the transcoding
+ // service at all, which means the resource management is broken.
+ if (events.size() > 1) {
+ EXPECT_TRUE(events.size() >= 3);
+ size_t i = 0;
+ for (auto& event : events) {
+ if (i == 0) {
+ EXPECT_EQ(event, EventTracker::Pause(CLIENT(1), 0));
+ } else if (i == events.size() - 2) {
+ EXPECT_EQ(event, EventTracker::Resume(CLIENT(1), 0));
+ } else if (i == events.size() - 1) {
+ EXPECT_EQ(event, EventTracker::Finished(CLIENT(1), 0));
+ } else {
+ EXPECT_TRUE(event == EventTracker::Pause(CLIENT(1), 0) ||
+ event == EventTracker::Resume(CLIENT(1), 0));
+ }
+ i++;
+ }
+ }
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+}
+
+} // namespace media
+} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
new file mode 100644
index 0000000..7dfda44
--- /dev/null
+++ b/services/mediatranscoding/tests/mediatranscodingservice_simulated_tests.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Unit Test for MediaTranscodingService.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaTranscodingServiceSimulatedTest"
+
+#include <aidl/android/media/BnTranscodingClientCallback.h>
+#include <aidl/android/media/IMediaTranscodingService.h>
+#include <aidl/android/media/ITranscodingClient.h>
+#include <aidl/android/media/ITranscodingClientCallback.h>
+#include <aidl/android/media/TranscodingRequestParcel.h>
+#include <aidl/android/media/TranscodingSessionParcel.h>
+#include <aidl/android/media/TranscodingSessionPriority.h>
+#include <android-base/logging.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+#include <binder/PermissionController.h>
+#include <cutils/multiuser.h>
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+#include <iostream>
+#include <list>
+
+#include "MediaTranscodingServiceTestHelper.h"
+#include "SimulatedTranscoder.h"
+
+namespace android {
+
+namespace media {
+
+// Note that -1 is valid and means using calling pid/uid for the service. But only privilege caller
+// could use them. This test is not a privilege caller.
+constexpr int32_t kInvalidClientPid = -5;
+constexpr int32_t kInvalidClientUid = -10;
+constexpr const char* kInvalidClientName = "";
+constexpr const char* kInvalidClientOpPackageName = "";
+
+constexpr int64_t kPaddingUs = 1000000;
+constexpr int64_t kSessionWithPaddingUs = SimulatedTranscoder::kSessionDurationUs + kPaddingUs;
+
+constexpr const char* kClientOpPackageName = "TestClientPackage";
+
+class MediaTranscodingServiceSimulatedTest : public MediaTranscodingServiceTestBase {
+public:
+ MediaTranscodingServiceSimulatedTest() { ALOGI("MediaTranscodingServiceResourceTest created"); }
+
+ virtual ~MediaTranscodingServiceSimulatedTest() {
+ ALOGI("MediaTranscodingServiceResourceTest destroyed");
+ }
+};
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterNullClient) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ // Register the client with null callback.
+ Status status = mService->registerClient(nullptr, kClientName, kClientOpPackageName, &client);
+ EXPECT_FALSE(status.isOk());
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterClientWithInvalidClientName) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ // Register the client with the service.
+ Status status = mService->registerClient(mClient1, kInvalidClientName,
+ kInvalidClientOpPackageName, &client);
+ EXPECT_FALSE(status.isOk());
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterClientWithInvalidClientPackageName) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ // Register the client with the service.
+ Status status =
+ mService->registerClient(mClient1, kClientName, kInvalidClientOpPackageName, &client);
+ EXPECT_FALSE(status.isOk());
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterOneClient) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
+ EXPECT_TRUE(status.isOk());
+
+ // Validate the client.
+ EXPECT_TRUE(client != nullptr);
+
+ // Check the number of Clients.
+ int32_t numOfClients;
+ status = mService->getNumOfClients(&numOfClients);
+ EXPECT_TRUE(status.isOk());
+ EXPECT_GE(numOfClients, 1);
+
+ // Unregister the client.
+ status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterClientTwice) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
+ EXPECT_TRUE(status.isOk());
+
+ // Validate the client.
+ EXPECT_TRUE(client != nullptr);
+
+ // Register the client again and expects failure.
+ std::shared_ptr<ITranscodingClient> client1;
+ status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client1);
+ EXPECT_FALSE(status.isOk());
+
+ // Unregister the client.
+ status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestRegisterMultipleClients) {
+ registerMultipleClients();
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestSessionIdIndependence) {
+ registerMultipleClients();
+
+ // Submit 2 requests on client1 first.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file", "test_destination_file"));
+
+ // Submit 2 requests on client2, sessionId should be independent for each client.
+ EXPECT_TRUE(mClient2->submit(0, "test_source_file", "test_destination_file"));
+ EXPECT_TRUE(mClient2->submit(1, "test_source_file", "test_destination_file"));
+
+ // Cancel all sessions.
+ EXPECT_TRUE(mClient1->cancel(0));
+ EXPECT_TRUE(mClient1->cancel(1));
+ EXPECT_TRUE(mClient2->cancel(0));
+ EXPECT_TRUE(mClient2->cancel(1));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestSubmitCancelSessions) {
+ registerMultipleClients();
+
+ // Test sessionId assignment.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file"));
+
+ // Test submit bad request (no valid sourceFilePath) fails.
+ EXPECT_TRUE(mClient1->submit<fail>(0, "", ""));
+
+ // Test submit bad request (no valid sourceFilePath) fails.
+ EXPECT_TRUE(mClient1->submit<fail>(0, "src", "dst", TranscodingSessionPriority::kNormal,
+ 1000000, kInvalidClientPid, kInvalidClientUid));
+
+ // Test cancel non-existent session fails.
+ EXPECT_TRUE(mClient1->cancel<fail>(100));
+
+ // Session 0 should start immediately and finish in 2 seconds, followed by Session 1 start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ // Test cancel valid sessionId in random order.
+ // Test cancel finished session fails.
+ EXPECT_TRUE(mClient1->cancel(2));
+ EXPECT_TRUE(mClient1->cancel<fail>(0));
+ EXPECT_TRUE(mClient1->cancel(1));
+
+ // Test cancel session again fails.
+ EXPECT_TRUE(mClient1->cancel<fail>(1));
+
+ // Test no more events arriving after cancel.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::NoEvent);
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestGetSessions) {
+ registerMultipleClients();
+
+ // Submit 3 requests.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+
+ // Test get sessions by id.
+ EXPECT_TRUE(mClient1->getSession(2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->getSession(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->getSession(0, "test_source_file_0", "test_destination_file_0"));
+
+ // Test get session by invalid id fails.
+ EXPECT_TRUE(mClient1->getSession<fail>(100, "", ""));
+ EXPECT_TRUE(mClient1->getSession<fail>(-1, "", ""));
+
+ // Test get session after cancel fails.
+ EXPECT_TRUE(mClient1->cancel(2));
+ EXPECT_TRUE(mClient1->getSession<fail>(2, "", ""));
+
+ // Session 0 should start immediately and finish in 2 seconds, followed by Session 1 start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ // Test get session after finish fails.
+ EXPECT_TRUE(mClient1->getSession<fail>(0, "", ""));
+
+ // Test get the remaining session 1.
+ EXPECT_TRUE(mClient1->getSession(1, "test_source_file_1", "test_destination_file_1"));
+
+ // Cancel remaining session 1.
+ EXPECT_TRUE(mClient1->cancel(1));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestSubmitCancelWithOfflineSessions) {
+ registerMultipleClients();
+
+ // Submit some offline sessions first.
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0",
+ TranscodingSessionPriority::kUnspecified));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1",
+ TranscodingSessionPriority::kUnspecified));
+
+ // Session 0 should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ // Submit more real-time sessions.
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+ EXPECT_TRUE(mClient1->submit(3, "test_source_file_3", "test_destination_file_3"));
+
+ // Session 0 should pause immediately and session 2 should start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
+
+ // Session 2 should finish in 2 seconds and session 3 should start.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 3));
+
+ // Cancel session 3 now
+ EXPECT_TRUE(mClient1->cancel(3));
+
+ // Session 0 should resume and finish in 2 seconds, followed by session 1 start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ // Cancel remaining session 1.
+ EXPECT_TRUE(mClient1->cancel(1));
+
+ unregisterMultipleClients();
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestClientUseAfterUnregister) {
+ std::shared_ptr<ITranscodingClient> client;
+
+ // Register a client, then unregister.
+ Status status = mService->registerClient(mClient1, kClientName, kClientOpPackageName, &client);
+ EXPECT_TRUE(status.isOk());
+
+ status = client->unregister();
+ EXPECT_TRUE(status.isOk());
+
+ // Test various operations on the client, should fail with ERROR_DISCONNECTED.
+ TranscodingSessionParcel session;
+ bool result;
+ status = client->getSessionWithId(0, &session, &result);
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ status = client->cancelSession(0, &result);
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+
+ TranscodingRequestParcel request;
+ status = client->submitRequest(request, &session, &result);
+ EXPECT_EQ(status.getServiceSpecificError(), IMediaTranscodingService::ERROR_DISCONNECTED);
+}
+
+TEST_F(MediaTranscodingServiceSimulatedTest, TestTranscodingUidPolicy) {
+ ALOGD("TestTranscodingUidPolicy starting...");
+
+ EXPECT_TRUE(ShellHelper::RunCmd("input keyevent KEYCODE_WAKEUP"));
+ EXPECT_TRUE(ShellHelper::RunCmd("wm dismiss-keyguard"));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ registerMultipleClients();
+
+ ALOGD("Moving app A to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Submit 3 requests.
+ ALOGD("Submitting session to client1 (app A) ...");
+ EXPECT_TRUE(mClient1->submit(0, "test_source_file_0", "test_destination_file_0"));
+ EXPECT_TRUE(mClient1->submit(1, "test_source_file_1", "test_destination_file_1"));
+ EXPECT_TRUE(mClient1->submit(2, "test_source_file_2", "test_destination_file_2"));
+
+ // Session 0 should start immediately.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 0));
+
+ ALOGD("Moving app B to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageB, kTestActivityName));
+
+ // Session 0 should continue and finish in 2 seconds, then session 1 should start.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 1));
+
+ ALOGD("Submitting session to client2 (app B) ...");
+ EXPECT_TRUE(mClient2->submit(0, "test_source_file_0", "test_destination_file_0"));
+
+ // Client1's session should pause, client2's session should start.
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Pause(CLIENT(1), 1));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Start(CLIENT(2), 0));
+
+ ALOGD("Moving app A back to top...");
+ EXPECT_TRUE(ShellHelper::Start(kClientPackageA, kTestActivityName));
+
+ // Client2's session should pause, client1's session 1 should resume.
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Pause(CLIENT(2), 0));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Resume(CLIENT(1), 1));
+
+ // Client2's session 1 should finish in 2 seconds, then its session 2 should start.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 1));
+ EXPECT_EQ(mClient1->pop(kPaddingUs), EventTracker::Start(CLIENT(1), 2));
+
+ // After client2's sessions finish, client1's session should resume.
+ EXPECT_EQ(mClient1->pop(kSessionWithPaddingUs), EventTracker::Finished(CLIENT(1), 2));
+ EXPECT_EQ(mClient2->pop(kPaddingUs), EventTracker::Resume(CLIENT(2), 0));
+
+ unregisterMultipleClients();
+
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageA));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageB));
+ EXPECT_TRUE(ShellHelper::Stop(kClientPackageC));
+
+ ALOGD("TestTranscodingUidPolicy finished.");
+}
+
+} // namespace media
+} // namespace android
diff --git a/services/mediatranscoding/tests/mediatranscodingservice_tests.cpp b/services/mediatranscoding/tests/mediatranscodingservice_tests.cpp
deleted file mode 100644
index 5a791fe..0000000
--- a/services/mediatranscoding/tests/mediatranscodingservice_tests.cpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Unit Test for MediaTranscoding Service.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaTranscodingServiceTest"
-
-#include <aidl/android/media/BnTranscodingServiceClient.h>
-#include <aidl/android/media/IMediaTranscodingService.h>
-#include <aidl/android/media/ITranscodingServiceClient.h>
-#include <android-base/logging.h>
-#include <android-base/unique_fd.h>
-#include <android/binder_ibinder_jni.h>
-#include <android/binder_manager.h>
-#include <android/binder_process.h>
-#include <cutils/ashmem.h>
-#include <gtest/gtest.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <utils/Log.h>
-
-namespace android {
-
-namespace media {
-
-using Status = ::ndk::ScopedAStatus;
-using aidl::android::media::BnTranscodingServiceClient;
-using aidl::android::media::IMediaTranscodingService;
-using aidl::android::media::ITranscodingServiceClient;
-
-constexpr int32_t kInvalidClientId = -5;
-
-// Note that -1 is valid and means using calling pid/uid for the service. But only privilege caller could
-// use them. This test is not a privilege caller.
-constexpr int32_t kInvalidClientPid = -5;
-constexpr int32_t kInvalidClientUid = -5;
-constexpr const char* kInvalidClientOpPackageName = "";
-
-constexpr int32_t kClientUseCallingPid = -1;
-constexpr int32_t kClientUseCallingUid = -1;
-constexpr const char* kClientOpPackageName = "TestClient";
-
-class MediaTranscodingServiceTest : public ::testing::Test {
-public:
- MediaTranscodingServiceTest() { ALOGD("MediaTranscodingServiceTest created"); }
-
- void SetUp() override {
- ::ndk::SpAIBinder binder(AServiceManager_getService("media.transcoding"));
- mService = IMediaTranscodingService::fromBinder(binder);
- if (mService == nullptr) {
- ALOGE("Failed to connect to the media.trascoding service.");
- return;
- }
- }
-
- ~MediaTranscodingServiceTest() { ALOGD("MediaTranscodingingServiceTest destroyed"); }
-
- std::shared_ptr<IMediaTranscodingService> mService = nullptr;
-};
-
-struct TestClient : public BnTranscodingServiceClient {
- TestClient(const std::shared_ptr<IMediaTranscodingService>& service) : mService(service) {
- ALOGD("TestClient Created");
- }
-
- Status getName(std::string* _aidl_return) override {
- *_aidl_return = "test_client";
- return Status::ok();
- }
-
- Status onTranscodingFinished(
- int32_t /* in_jobId */,
- const ::aidl::android::media::TranscodingResultParcel& /* in_result */) override {
- return Status::ok();
- }
-
- Status onTranscodingFailed(
- int32_t /* in_jobId */,
- ::aidl::android::media::TranscodingErrorCode /*in_errorCode */) override {
- return Status::ok();
- }
-
- Status onAwaitNumberOfJobsChanged(int32_t /* in_jobId */, int32_t /* in_oldAwaitNumber */,
- int32_t /* in_newAwaitNumber */) override {
- return Status::ok();
- }
-
- Status onProgressUpdate(int32_t /* in_jobId */, int32_t /* in_progress */) override {
- return Status::ok();
- }
-
- virtual ~TestClient() { ALOGI("TestClient destroyed"); };
-
-private:
- std::shared_ptr<IMediaTranscodingService> mService;
-};
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterNullClient) {
- std::shared_ptr<ITranscodingServiceClient> client = nullptr;
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingUid,
- kClientUseCallingPid, &clientId);
- EXPECT_FALSE(status.isOk());
-}
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterClientWithInvalidClientPid) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingUid,
- kInvalidClientPid, &clientId);
- EXPECT_FALSE(status.isOk());
-}
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterClientWithInvalidClientUid) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kInvalidClientUid,
- kClientUseCallingPid, &clientId);
- EXPECT_FALSE(status.isOk());
-}
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterClientWithInvalidClientPackageName) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kInvalidClientOpPackageName,
- kClientUseCallingUid, kClientUseCallingPid, &clientId);
- EXPECT_FALSE(status.isOk());
-}
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterOneClient) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingPid,
- kClientUseCallingUid, &clientId);
- ALOGD("client id is %d", clientId);
- EXPECT_TRUE(status.isOk());
-
- // Validate the clientId.
- EXPECT_TRUE(clientId > 0);
-
- // Check the number of Clients.
- int32_t numOfClients;
- status = mService->getNumOfClients(&numOfClients);
- EXPECT_TRUE(status.isOk());
- EXPECT_EQ(1, numOfClients);
-
- // Unregister the client.
- bool res;
- status = mService->unregisterClient(clientId, &res);
- EXPECT_TRUE(status.isOk());
- EXPECT_TRUE(res);
-}
-
-TEST_F(MediaTranscodingServiceTest, TestUnRegisterClientWithInvalidClientId) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingUid,
- kClientUseCallingPid, &clientId);
- ALOGD("client id is %d", clientId);
- EXPECT_TRUE(status.isOk());
-
- // Validate the clientId.
- EXPECT_TRUE(clientId > 0);
-
- // Check the number of Clients.
- int32_t numOfClients;
- status = mService->getNumOfClients(&numOfClients);
- EXPECT_TRUE(status.isOk());
- EXPECT_EQ(1, numOfClients);
-
- // Unregister the client with invalid ID
- bool res;
- mService->unregisterClient(kInvalidClientId, &res);
- EXPECT_FALSE(res);
-
- // Unregister the valid client.
- mService->unregisterClient(clientId, &res);
-}
-
-TEST_F(MediaTranscodingServiceTest, TestRegisterClientTwice) {
- std::shared_ptr<ITranscodingServiceClient> client =
- ::ndk::SharedRefBase::make<TestClient>(mService);
- EXPECT_TRUE(client != nullptr);
-
- // Register the client with the service.
- int32_t clientId = 0;
- Status status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingUid,
- kClientUseCallingPid, &clientId);
- EXPECT_TRUE(status.isOk());
-
- // Validate the clientId.
- EXPECT_TRUE(clientId > 0);
-
- // Register the client again and expects failure.
- status = mService->registerClient(client, kClientOpPackageName, kClientUseCallingUid,
- kClientUseCallingPid, &clientId);
- EXPECT_FALSE(status.isOk());
-
- // Unregister the valid client.
- bool res;
- mService->unregisterClient(clientId, &res);
-}
-
-} // namespace media
-} // namespace android
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 9d9ca63..3ec8dea 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -198,7 +198,7 @@
for (const auto& serviceStream : streamsToClose) {
aaudio_handle_t handle = serviceStream->getHandle();
ALOGW("binderDied() close abandoned stream 0x%08X\n", handle);
- aaudioService->closeStream(handle);
+ aaudioService->asAAudioServiceInterface().closeStream(handle);
}
// mStreams should be empty now
}
diff --git a/services/oboeservice/AAudioClientTracker.h b/services/oboeservice/AAudioClientTracker.h
index 943b809..facfc3b 100644
--- a/services/oboeservice/AAudioClientTracker.h
+++ b/services/oboeservice/AAudioClientTracker.h
@@ -24,7 +24,7 @@
#include <utils/Singleton.h>
#include <aaudio/AAudio.h>
-#include "binding/IAAudioClient.h"
+#include <aaudio/IAAudioClient.h>
#include "AAudioService.h"
namespace aaudio {
@@ -46,7 +46,7 @@
*/
std::string dump() const;
- aaudio_result_t registerClient(pid_t pid, const android::sp<android::IAAudioClient>& client);
+ aaudio_result_t registerClient(pid_t pid, const android::sp<IAAudioClient>& client);
void unregisterClient(pid_t pid);
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 1c03b7f..ad4b830 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -33,25 +33,21 @@
using android::FifoBuffer;
using android::fifo_frames_t;
-AAudioMixer::~AAudioMixer() {
- delete[] mOutputBuffer;
-}
-
void AAudioMixer::allocate(int32_t samplesPerFrame, int32_t framesPerBurst) {
mSamplesPerFrame = samplesPerFrame;
mFramesPerBurst = framesPerBurst;
int32_t samplesPerBuffer = samplesPerFrame * framesPerBurst;
- mOutputBuffer = new float[samplesPerBuffer];
+ mOutputBuffer = std::make_unique<float[]>(samplesPerBuffer);
mBufferSizeInBytes = samplesPerBuffer * sizeof(float);
}
void AAudioMixer::clear() {
- memset(mOutputBuffer, 0, mBufferSizeInBytes);
+ memset(mOutputBuffer.get(), 0, mBufferSizeInBytes);
}
-int32_t AAudioMixer::mix(int streamIndex, FifoBuffer *fifo, bool allowUnderflow) {
+int32_t AAudioMixer::mix(int streamIndex, std::shared_ptr<FifoBuffer> fifo, bool allowUnderflow) {
WrappingBuffer wrappingBuffer;
- float *destination = mOutputBuffer;
+ float *destination = mOutputBuffer.get();
#if AAUDIO_MIXER_ATRACE_ENABLED
ATRACE_BEGIN("aaMix");
@@ -117,5 +113,5 @@
}
float *AAudioMixer::getOutputBuffer() {
- return mOutputBuffer;
+ return mOutputBuffer.get();
}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index d5abc5b..1a120f2 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -25,7 +25,6 @@
class AAudioMixer {
public:
AAudioMixer() {}
- ~AAudioMixer();
void allocate(int32_t samplesPerFrame, int32_t framesPerBurst);
@@ -38,7 +37,7 @@
* @param allowUnderflow if true then allow mixer to advance read index past the write index
* @return frames read from this stream
*/
- int32_t mix(int streamIndex, android::FifoBuffer *fifo, bool allowUnderflow);
+ int32_t mix(int streamIndex, std::shared_ptr<android::FifoBuffer> fifo, bool allowUnderflow);
float *getOutputBuffer();
@@ -47,7 +46,7 @@
private:
void mixPart(float *destination, float *source, int32_t numFrames);
- float *mOutputBuffer = nullptr;
+ std::unique_ptr<float[]> mOutputBuffer;
int32_t mSamplesPerFrame = 0;
int32_t mFramesPerBurst = 0;
int32_t mBufferSizeInBytes = 0;
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 22cdb35..69e58f6 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -32,26 +32,26 @@
#include "AAudioService.h"
#include "AAudioServiceStreamMMAP.h"
#include "AAudioServiceStreamShared.h"
-#include "binding/IAAudioService.h"
using namespace android;
using namespace aaudio;
#define MAX_STREAMS_PER_PROCESS 8
+#define AIDL_RETURN(x) *_aidl_return = (x); return Status::ok();
+
using android::AAudioService;
+using binder::Status;
android::AAudioService::AAudioService()
- : BnAAudioService() {
+ : BnAAudioService(),
+ mAdapter(this) {
mAudioClient.clientUid = getuid(); // TODO consider using geteuid()
mAudioClient.clientPid = getpid();
mAudioClient.packageName = String16("");
AAudioClientTracker::getInstance().setAAudioService(this);
}
-AAudioService::~AAudioService() {
-}
-
status_t AAudioService::dump(int fd, const Vector<String16>& args) {
std::string result;
@@ -72,18 +72,21 @@
return NO_ERROR;
}
-void AAudioService::registerClient(const sp<IAAudioClient>& client) {
+Status AAudioService::registerClient(const sp<IAAudioClient> &client) {
pid_t pid = IPCThreadState::self()->getCallingPid();
AAudioClientTracker::getInstance().registerClient(pid, client);
+ return Status::ok();
}
-bool AAudioService::isCallerInService() {
- return mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
- mAudioClient.clientUid == IPCThreadState::self()->getCallingUid();
-}
+Status
+AAudioService::openStream(const StreamRequest &_request, StreamParameters* _paramsOut,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
-aaudio_handle_t AAudioService::openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput) {
+ // Create wrapper objects for simple usage of the parcelables.
+ const AAudioStreamRequest request(_request);
+ AAudioStreamConfiguration paramsOut;
+
// A lock in is used to order the opening of endpoints when an
// EXCLUSIVE endpoint is stolen. We want the order to be:
// 1) Thread A opens exclusive MMAP endpoint
@@ -108,13 +111,13 @@
if (count >= MAX_STREAMS_PER_PROCESS) {
ALOGE("openStream(): exceeded max streams per process %d >= %d",
count, MAX_STREAMS_PER_PROCESS);
- return AAUDIO_ERROR_UNAVAILABLE;
+ AIDL_RETURN(AAUDIO_ERROR_UNAVAILABLE);
}
}
if (sharingMode != AAUDIO_SHARING_MODE_EXCLUSIVE && sharingMode != AAUDIO_SHARING_MODE_SHARED) {
ALOGE("openStream(): unrecognized sharing mode = %d", sharingMode);
- return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ AIDL_RETURN(AAUDIO_ERROR_ILLEGAL_ARGUMENT);
}
if (sharingMode == AAUDIO_SHARING_MODE_EXCLUSIVE
@@ -147,29 +150,124 @@
if (result != AAUDIO_OK) {
serviceStream.clear();
- return result;
+ AIDL_RETURN(result);
} else {
aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
serviceStream->setHandle(handle);
pid_t pid = request.getProcessId();
AAudioClientTracker::getInstance().registerClientStream(pid, serviceStream);
- configurationOutput.copyFrom(*serviceStream);
+ paramsOut.copyFrom(*serviceStream);
+ *_paramsOut = std::move(paramsOut).parcelable();
// Log open in MediaMetrics after we have the handle because we need the handle to
// create the metrics ID.
serviceStream->logOpen(handle);
ALOGV("%s(): return handle = 0x%08X", __func__, handle);
- return handle;
+ AIDL_RETURN(handle);
}
}
-aaudio_result_t AAudioService::closeStream(aaudio_handle_t streamHandle) {
+Status AAudioService::closeStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
// Check permission and ownership first.
sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("closeStream(0x%0x), illegal stream handle", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
}
- return closeStream(serviceStream);
+ AIDL_RETURN(closeStream(serviceStream));
+}
+
+Status AAudioService::getStreamDescription(int32_t streamHandle, Endpoint* endpoint,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AudioEndpointParcelable endpointParcelable;
+ aaudio_result_t result = serviceStream->getDescription(endpointParcelable);
+ if (result == AAUDIO_OK) {
+ *endpoint = std::move(endpointParcelable).parcelable();
+ }
+ AIDL_RETURN(result);
+}
+
+Status AAudioService::startStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->start());
+}
+
+Status AAudioService::pauseStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->pause());
+}
+
+Status AAudioService::stopStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->stop());
+}
+
+Status AAudioService::flushStream(int32_t streamHandle, int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->flush());
+}
+
+Status AAudioService::registerAudioThread(int32_t streamHandle, int32_t clientThreadId, int64_t periodNanoseconds,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ int32_t priority = isCallerInService()
+ ? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
+ AIDL_RETURN(serviceStream->registerAudioThread(clientThreadId, priority));
+}
+
+Status AAudioService::unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
+ int32_t *_aidl_return) {
+ static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
+
+ sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ if (serviceStream.get() == nullptr) {
+ ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
+ AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
+ }
+ AIDL_RETURN(serviceStream->unregisterAudioThread(clientThreadId));
+}
+
+bool AAudioService::isCallerInService() {
+ return mAudioClient.clientPid == IPCThreadState::self()->getCallingPid() &&
+ mAudioClient.clientUid == IPCThreadState::self()->getCallingUid();
}
aaudio_result_t AAudioService::closeStream(sp<AAudioServiceStreamBase> serviceStream) {
@@ -205,76 +303,6 @@
return serviceStream;
}
-aaudio_result_t AAudioService::getStreamDescription(
- aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->getDescription(parcelable);
-}
-
-aaudio_result_t AAudioService::startStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->start();
-}
-
-aaudio_result_t AAudioService::pauseStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->pause();
-}
-
-aaudio_result_t AAudioService::stopStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->stop();
-}
-
-aaudio_result_t AAudioService::flushStream(aaudio_handle_t streamHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->flush();
-}
-
-aaudio_result_t AAudioService::registerAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId,
- int64_t /* periodNanoseconds */) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- int32_t priority = isCallerInService()
- ? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
- return serviceStream->registerAudioThread(clientThreadId, priority);
-}
-
-aaudio_result_t AAudioService::unregisterAudioThread(aaudio_handle_t streamHandle,
- pid_t clientThreadId) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
- if (serviceStream.get() == nullptr) {
- ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
- return AAUDIO_ERROR_INVALID_HANDLE;
- }
- return serviceStream->unregisterAudioThread(clientThreadId);
-}
-
aaudio_result_t AAudioService::startClient(aaudio_handle_t streamHandle,
const android::AudioClient& client,
const audio_attributes_t *attr,
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index caf48a5..7c1b796 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -24,69 +24,71 @@
#include <media/AudioClient.h>
#include <aaudio/AAudio.h>
+#include <aaudio/BnAAudioService.h>
#include "binding/AAudioCommon.h"
+#include "binding/AAudioBinderAdapter.h"
#include "binding/AAudioServiceInterface.h"
-#include "binding/IAAudioService.h"
#include "AAudioServiceStreamBase.h"
#include "AAudioStreamTracker.h"
namespace android {
+#define AAUDIO_SERVICE_NAME "media.aaudio"
+
class AAudioService :
public BinderService<AAudioService>,
- public BnAAudioService,
- public aaudio::AAudioServiceInterface
+ public aaudio::BnAAudioService
{
friend class BinderService<AAudioService>;
public:
AAudioService();
- virtual ~AAudioService();
+ virtual ~AAudioService() = default;
+
+ aaudio::AAudioServiceInterface& asAAudioServiceInterface() {
+ return mAdapter;
+ }
static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
virtual status_t dump(int fd, const Vector<String16>& args) override;
- virtual void registerClient(const sp<IAAudioClient>& client);
+ binder::Status registerClient(const ::android::sp<::aaudio::IAAudioClient>& client) override;
- aaudio::aaudio_handle_t openStream(const aaudio::AAudioStreamRequest &request,
- aaudio::AAudioStreamConfiguration &configurationOutput)
- override;
+ binder::Status openStream(const ::aaudio::StreamRequest& request,
+ ::aaudio::StreamParameters* paramsOut,
+ int32_t* _aidl_return) override;
- /*
- * This is called from Binder. It checks for permissions
- * and converts the handle passed through Binder to a stream pointer.
- */
- aaudio_result_t closeStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status closeStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t getStreamDescription(
- aaudio::aaudio_handle_t streamHandle,
- aaudio::AudioEndpointParcelable &parcelable) override;
+ binder::Status
+ getStreamDescription(int32_t streamHandle, ::aaudio::Endpoint* endpoint,
+ int32_t* _aidl_return) override;
- aaudio_result_t startStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status startStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t pauseStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status pauseStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t stopStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status stopStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t flushStream(aaudio::aaudio_handle_t streamHandle) override;
+ binder::Status flushStream(int32_t streamHandle, int32_t* _aidl_return) override;
- aaudio_result_t registerAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t tid,
- int64_t periodNanoseconds) override;
+ binder::Status
+ registerAudioThread(int32_t streamHandle, int32_t clientThreadId, int64_t periodNanoseconds,
+ int32_t* _aidl_return) override;
- aaudio_result_t unregisterAudioThread(aaudio::aaudio_handle_t streamHandle,
- pid_t tid) override;
+ binder::Status unregisterAudioThread(int32_t streamHandle, int32_t clientThreadId,
+ int32_t* _aidl_return) override;
aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
const android::AudioClient& client,
const audio_attributes_t *attr,
- audio_port_handle_t *clientHandle) override;
+ audio_port_handle_t *clientHandle);
aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
- audio_port_handle_t clientHandle) override;
+ audio_port_handle_t clientHandle);
// ===============================================================================
// The following public methods are only called from the service and NOT by Binder.
@@ -101,6 +103,29 @@
aaudio_result_t closeStream(sp<aaudio::AAudioServiceStreamBase> serviceStream);
private:
+ class Adapter : public aaudio::AAudioBinderAdapter {
+ public:
+ explicit Adapter(AAudioService *service)
+ : aaudio::AAudioBinderAdapter(service),
+ mService(service) {}
+
+ aaudio_result_t startClient(aaudio::aaudio_handle_t streamHandle,
+ const android::AudioClient &client,
+ const audio_attributes_t *attr,
+ audio_port_handle_t *clientHandle) override {
+ return mService->startClient(streamHandle, client, attr, clientHandle);
+ }
+
+ aaudio_result_t stopClient(aaudio::aaudio_handle_t streamHandle,
+ audio_port_handle_t clientHandle) override {
+ return mService->stopClient(streamHandle, clientHandle);
+ }
+
+ private:
+ AAudioService* const mService;
+ };
+
+ Adapter mAdapter;
/** @return true if the client is the audioserver
*/
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index ceefe93..b139be1 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -182,11 +182,12 @@
: AUDIO_SOURCE_DEFAULT;
audio_flags_mask_t flags;
if (direction == AAUDIO_DIRECTION_OUTPUT) {
- flags = AUDIO_FLAG_LOW_LATENCY
- | AAudioConvert_allowCapturePolicyToAudioFlagsMask(params->getAllowedCapturePolicy());
+ flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
+ | AAudioConvert_allowCapturePolicyToAudioFlagsMask(
+ params->getAllowedCapturePolicy()));
} else {
- flags = AUDIO_FLAG_LOW_LATENCY
- | AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive());
+ flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
+ | AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive()));
}
return {
.content_type = contentType,
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 220584c..bc769f0 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -35,22 +35,17 @@
using namespace android; // TODO just import names needed
using namespace aaudio; // TODO just import names needed
-AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService &audioService)
- : AAudioServiceEndpointShared(
- (AudioStreamInternal *)(new AudioStreamInternalCapture(audioService, true))) {
-}
-
-AAudioServiceEndpointCapture::~AAudioServiceEndpointCapture() {
- delete[] mDistributionBuffer;
+AAudioServiceEndpointCapture::AAudioServiceEndpointCapture(AAudioService& audioService)
+ : AAudioServiceEndpointShared(
+ new AudioStreamInternalCapture(audioService.asAAudioServiceInterface(), true)) {
}
aaudio_result_t AAudioServiceEndpointCapture::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAudioServiceEndpointShared::open(request);
if (result == AAUDIO_OK) {
- delete[] mDistributionBuffer;
int distributionBufferSizeBytes = getStreamInternal()->getFramesPerBurst()
* getStreamInternal()->getBytesPerFrame();
- mDistributionBuffer = new uint8_t[distributionBufferSizeBytes];
+ mDistributionBuffer = std::make_unique<uint8_t[]>(distributionBufferSizeBytes);
}
return result;
}
@@ -67,9 +62,12 @@
int64_t mmapFramesRead = getStreamInternal()->getFramesRead();
// Read audio data from stream using a blocking read.
- result = getStreamInternal()->read(mDistributionBuffer, getFramesPerBurst(), timeoutNanos);
+ result = getStreamInternal()->read(mDistributionBuffer.get(),
+ getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
- ALOGV("%s() read() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
+ ALOGD("%s() read() returned AAUDIO_ERROR_DISCONNECTED", __func__);
+ // We do not need the returned vector.
+ (void) AAudioServiceEndpointShared::disconnectRegisteredStreams();
break;
} else if (result != getFramesPerBurst()) {
ALOGW("callbackLoop() read %d / %d",
@@ -79,48 +77,14 @@
// Distribute data to each active stream.
{ // brackets are for lock_guard
-
std::lock_guard <std::mutex> lock(mLockStreams);
for (const auto& clientStream : mRegisteredStreams) {
if (clientStream->isRunning() && !clientStream->isSuspended()) {
- int64_t clientFramesWritten = 0;
-
sp<AAudioServiceStreamShared> streamShared =
static_cast<AAudioServiceStreamShared *>(clientStream.get());
-
- {
- // Lock the AudioFifo to protect against close.
- std::lock_guard <std::mutex> lock(streamShared->getAudioDataQueueLock());
-
- FifoBuffer *fifo = streamShared->getAudioDataFifoBuffer_l();
- if (fifo != nullptr) {
-
- // Determine offset between framePosition in client's stream
- // vs the underlying MMAP stream.
- clientFramesWritten = fifo->getWriteCounter();
- // There are two indices that refer to the same frame.
- int64_t positionOffset = mmapFramesRead - clientFramesWritten;
- streamShared->setTimestampPositionOffset(positionOffset);
-
- // Is the buffer too full to write a burst?
- if (fifo->getEmptyFramesAvailable() <
- getFramesPerBurst()) {
- streamShared->incrementXRunCount();
- } else {
- fifo->write(mDistributionBuffer, getFramesPerBurst());
- }
- clientFramesWritten = fifo->getWriteCounter();
- }
- }
-
- if (clientFramesWritten > 0) {
- // This timestamp represents the completion of data being written into the
- // client buffer. It is sent to the client and used in the timing model
- // to decide when data will be available to read.
- Timestamp timestamp(clientFramesWritten, AudioClock::getNanoseconds());
- streamShared->markTransferTime(timestamp);
- }
-
+ streamShared->writeDataIfRoom(mmapFramesRead,
+ mDistributionBuffer.get(),
+ getFramesPerBurst());
}
}
}
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
index 2bbe81d..2ca43cf 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.h
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -17,6 +17,8 @@
#ifndef AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
#define AAUDIO_SERVICE_ENDPOINT_CAPTURE_H
+#include <memory>
+
#include "client/AudioStreamInternal.h"
#include "client/AudioStreamInternalCapture.h"
@@ -28,15 +30,14 @@
class AAudioServiceEndpointCapture : public AAudioServiceEndpointShared {
public:
explicit AAudioServiceEndpointCapture(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpointCapture();
+ virtual ~AAudioServiceEndpointCapture() = default;
aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
-
void *callbackLoop() override;
private:
- uint8_t *mDistributionBuffer = nullptr;
+ std::unique_ptr<uint8_t[]> mDistributionBuffer;
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 04c6453..5bccfd5 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -378,3 +378,18 @@
parcelable.mDownDataQueueParcelable.setCapacityInFrames(getBufferCapacity());
return AAUDIO_OK;
}
+
+aaudio_result_t AAudioServiceEndpointMMAP::getExternalPosition(uint64_t *positionFrames,
+ int64_t *timeNanos)
+{
+ if (!mExternalPositionSupported) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ status_t status = mMmapStream->getExternalPosition(positionFrames, timeNanos);
+ if (status == INVALID_OPERATION) {
+ // getExternalPosition is not supported. Set mExternalPositionSupported as false
+ // so that the call will not go to the HAL next time.
+ mExternalPositionSupported = false;
+ }
+ return AAudioConvert_androidToAAudioResult(status);
+}
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index b6003b6..a2a0922 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -85,6 +85,8 @@
return mHardwareTimeOffsetNanos;
}
+ aaudio_result_t getExternalPosition(uint64_t *positionFrames, int64_t *timeNanos);
+
private:
MonotonicCounter mFramesTransferred;
@@ -101,6 +103,8 @@
int64_t mHardwareTimeOffsetNanos = 0; // TODO get from HAL
+ bool mExternalPositionSupported = true;
+
};
} /* namespace aaudio */
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index dfe7193..6ddc30b 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -41,10 +41,9 @@
#define BURSTS_PER_BUFFER_DEFAULT 2
-AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService &audioService)
- : AAudioServiceEndpointShared(
- (AudioStreamInternal *)(new AudioStreamInternalPlay(audioService, true))) {
-}
+AAudioServiceEndpointPlay::AAudioServiceEndpointPlay(AAudioService& audioService)
+ : AAudioServiceEndpointShared(
+ new AudioStreamInternalPlay(audioService.asAAudioServiceInterface(), true)) {}
aaudio_result_t AAudioServiceEndpointPlay::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAudioServiceEndpointShared::open(request);
@@ -100,9 +99,10 @@
{
// Lock the AudioFifo to protect against close.
std::lock_guard <std::mutex> lock(streamShared->getAudioDataQueueLock());
-
- FifoBuffer *fifo = streamShared->getAudioDataFifoBuffer_l();
- if (fifo != nullptr) {
+ std::shared_ptr<SharedRingBuffer> audioDataQueue
+ = streamShared->getAudioDataQueue_l();
+ std::shared_ptr<FifoBuffer> fifo;
+ if (audioDataQueue && (fifo = audioDataQueue->getFifoBuffer())) {
// Determine offset between framePosition in client's stream
// vs the underlying MMAP stream.
@@ -145,7 +145,9 @@
result = getStreamInternal()->write(mMixer.getOutputBuffer(),
getFramesPerBurst(), timeoutNanos);
if (result == AAUDIO_ERROR_DISCONNECTED) {
- ALOGV("%s() write() returned AAUDIO_ERROR_DISCONNECTED, break", __func__);
+ ALOGD("%s() write() returned AAUDIO_ERROR_DISCONNECTED", __func__);
+ // We do not need the returned vector.
+ (void) AAudioServiceEndpointShared::disconnectRegisteredStreams();
break;
} else if (result != getFramesPerBurst()) {
ALOGW("callbackLoop() wrote %d / %d",
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 663dae2..9736091 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -26,7 +26,6 @@
#include <media/TypeConverter.h>
#include <mediautils/SchedulingPolicyService.h>
-#include "binding/IAAudioService.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioGlobal.h"
#include "utility/AudioClock.h"
@@ -46,8 +45,7 @@
*/
AAudioServiceStreamBase::AAudioServiceStreamBase(AAudioService &audioService)
- : mUpMessageQueue(nullptr)
- , mTimestampThread("AATime")
+ : mTimestampThread("AATime")
, mAtomicStreamTimestamp()
, mAudioService(audioService) {
mMmapClient.clientUid = -1;
@@ -56,6 +54,8 @@
}
AAudioServiceStreamBase::~AAudioServiceStreamBase() {
+ ALOGD("%s() called", __func__);
+
// May not be set if open failed.
if (mMetricsId.size() > 0) {
mediametrics::LogItem(mMetricsId)
@@ -140,7 +140,7 @@
return AAUDIO_ERROR_INVALID_STATE;
}
- mUpMessageQueue = new SharedRingBuffer();
+ mUpMessageQueue = std::make_shared<SharedRingBuffer>();
result = mUpMessageQueue->allocate(sizeof(AAudioServiceMessage),
QUEUE_UP_CAPACITY_COMMANDS);
if (result != AAUDIO_OK) {
@@ -179,6 +179,8 @@
return AAUDIO_OK;
}
+ // This will call stopTimestampThread() and also stop the stream,
+ // just in case it was not already stopped.
stop_l();
aaudio_result_t result = AAUDIO_OK;
@@ -194,13 +196,6 @@
mServiceEndpoint.clear(); // endpoint will hold the pointer after this method returns.
}
- {
- std::lock_guard<std::mutex> lock(mUpMessageQueueLock);
- stopTimestampThread();
- delete mUpMessageQueue;
- mUpMessageQueue = nullptr;
- }
-
setState(AAUDIO_STREAM_STATE_CLOSED);
mediametrics::LogItem(mMetricsId)
@@ -514,12 +509,8 @@
ALOGE("%s(): mUpMessageQueue null! - stream not open", __func__);
return true;
}
- int32_t framesAvailable = mUpMessageQueue->getFifoBuffer()
- ->getFullFramesAvailable();
- int32_t capacity = mUpMessageQueue->getFifoBuffer()
- ->getBufferCapacityInFrames();
// Is it half full or more
- return framesAvailable >= (capacity / 2);
+ return mUpMessageQueue->getFractionalFullness() >= 0.5;
}
aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessage *command) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index 94cc980..f9efc2a 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -24,9 +24,10 @@
#include <utils/RefBase.h>
#include "fifo/FifoBuffer.h"
-#include "binding/IAAudioService.h"
#include "binding/AudioEndpointParcelable.h"
#include "binding/AAudioServiceMessage.h"
+#include "binding/AAudioStreamRequest.h"
+#include "core/AAudioStreamParameters.h"
#include "utility/AAudioUtilities.h"
#include "utility/AudioClock.h"
@@ -284,8 +285,8 @@
pid_t mRegisteredClientThread = ILLEGAL_THREAD_ID;
- SharedRingBuffer* mUpMessageQueue;
std::mutex mUpMessageQueueLock;
+ std::shared_ptr<SharedRingBuffer> mUpMessageQueue;
AAudioThread mTimestampThread;
// This is used by one thread to tell another thread to exit. So it must be atomic.
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp
index 54d7d06..57dc1ab 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.cpp
+++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <atomic>
+#include <inttypes.h>
#include <iomanip>
#include <iostream>
#include <stdint.h>
@@ -162,7 +163,8 @@
return result;
}
-// Get timestamp that was written by getFreeRunningPosition()
+// Get timestamp from presentation position.
+// If it fails, get timestamp that was written by getFreeRunningPosition()
aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames,
int64_t *timeNanos) {
@@ -174,7 +176,17 @@
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
- // TODO Get presentation timestamp from the HAL
+ // Disable this code temporarily because the HAL is not returning
+ // a useful result.
+#if 0
+ uint64_t position;
+ if (serviceEndpointMMAP->getExternalPosition(&position, timeNanos) == AAUDIO_OK) {
+ ALOGD("%s() getExternalPosition() says pos = %" PRIi64 ", time = %" PRIi64,
+ __func__, position, *timeNanos);
+ *positionFrames = (int64_t) position;
+ return AAUDIO_OK;
+ } else
+#endif
if (mAtomicStreamTimestamp.isValid()) {
Timestamp timestamp = mAtomicStreamTimestamp.read();
*positionFrames = timestamp.getPosition();
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index f2cf016..031468e 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -24,8 +24,6 @@
#include <aaudio/AAudio.h>
-#include "binding/IAAudioService.h"
-
#include "binding/AAudioServiceMessage.h"
#include "AAudioServiceStreamBase.h"
#include "AAudioServiceStreamShared.h"
@@ -59,12 +57,7 @@
result << AAudioServiceStreamBase::dump();
- auto fifo = mAudioDataQueue->getFifoBuffer();
- int32_t readCounter = fifo->getReadCounter();
- int32_t writeCounter = fifo->getWriteCounter();
- result << std::setw(10) << writeCounter;
- result << std::setw(10) << readCounter;
- result << std::setw(8) << (writeCounter - readCounter);
+ result << mAudioDataQueue->dump();
result << std::setw(8) << getXRunCount();
return result.str();
@@ -180,7 +173,7 @@
{
std::lock_guard<std::mutex> lock(mAudioDataQueueLock);
// Create audio data shared memory buffer for client.
- mAudioDataQueue = new SharedRingBuffer();
+ mAudioDataQueue = std::make_shared<SharedRingBuffer>();
result = mAudioDataQueue->allocate(calculateBytesPerFrame(), getBufferCapacity());
if (result != AAUDIO_OK) {
ALOGE("%s() could not allocate FIFO with %d frames",
@@ -203,18 +196,6 @@
return result;
}
-aaudio_result_t AAudioServiceStreamShared::close_l() {
- aaudio_result_t result = AAudioServiceStreamBase::close_l();
-
- {
- std::lock_guard<std::mutex> lock(mAudioDataQueueLock);
- delete mAudioDataQueue;
- mAudioDataQueue = nullptr;
- }
-
- return result;
-}
-
/**
* Get an immutable description of the data queue created by this service.
*/
@@ -273,3 +254,37 @@
*positionFrames = position;
return result;
}
+
+void AAudioServiceStreamShared::writeDataIfRoom(int64_t mmapFramesRead,
+ const void *buffer, int32_t numFrames) {
+ int64_t clientFramesWritten = 0;
+
+ // Lock the AudioFifo to protect against close.
+ std::lock_guard <std::mutex> lock(mAudioDataQueueLock);
+
+ if (mAudioDataQueue != nullptr) {
+ std::shared_ptr<FifoBuffer> fifo = mAudioDataQueue->getFifoBuffer();
+ // Determine offset between framePosition in client's stream
+ // vs the underlying MMAP stream.
+ clientFramesWritten = fifo->getWriteCounter();
+ // There are two indices that refer to the same frame.
+ int64_t positionOffset = mmapFramesRead - clientFramesWritten;
+ setTimestampPositionOffset(positionOffset);
+
+ // Is the buffer too full to write a burst?
+ if (fifo->getEmptyFramesAvailable() < getFramesPerBurst()) {
+ incrementXRunCount();
+ } else {
+ fifo->write(buffer, numFrames);
+ }
+ clientFramesWritten = fifo->getWriteCounter();
+ }
+
+ if (clientFramesWritten > 0) {
+ // This timestamp represents the completion of data being written into the
+ // client buffer. It is sent to the client and used in the timing model
+ // to decide when data will be available to read.
+ Timestamp timestamp(clientFramesWritten, AudioClock::getNanoseconds());
+ markTransferTime(timestamp);
+ }
+}
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index abcb782..5b1f8da 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -52,23 +52,23 @@
aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
- aaudio_result_t close_l() override;
-
/**
- * This must be locked when calling getAudioDataFifoBuffer_l() and while
- * using the FifoBuffer it returns.
+ * This must be locked when calling getAudioDataQueue_l() and while
+ * using the FifoBuffer it contains.
*/
std::mutex &getAudioDataQueueLock() {
return mAudioDataQueueLock;
}
+ void writeDataIfRoom(int64_t mmapFramesRead, const void *buffer, int32_t numFrames);
+
/**
- * This must only be call under getAudioDataQueueLock().
+ * This must only be called under getAudioDataQueueLock().
* @return
*/
- android::FifoBuffer *getAudioDataFifoBuffer_l() { return (mAudioDataQueue == nullptr)
- ? nullptr
- : mAudioDataQueue->getFifoBuffer(); }
+ std::shared_ptr<SharedRingBuffer> getAudioDataQueue_l() {
+ return mAudioDataQueue;
+ }
/* Keep a record of when a buffer transfer completed.
* This allows for a more accurate timing model.
@@ -106,7 +106,8 @@
int32_t framesPerBurst);
private:
- SharedRingBuffer *mAudioDataQueue = nullptr; // protected by mAudioDataQueueLock
+
+ std::shared_ptr<SharedRingBuffer> mAudioDataQueue; // protected by mAudioDataQueueLock
std::mutex mAudioDataQueueLock;
std::atomic<int64_t> mTimestampPositionOffset;
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 8b1e2c0..31e590e 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -55,6 +55,11 @@
"libcutils",
"liblog",
"libutils",
+ "aaudio-aidl-cpp",
+ ],
+
+ export_shared_lib_headers: [
+ "libaaudio_internal",
],
header_libs: [
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index 2454446..c1d4e16 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -18,6 +18,8 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <iomanip>
+#include <iostream>
#include <sys/mman.h>
#include "binding/RingBufferParcelable.h"
@@ -30,8 +32,8 @@
SharedRingBuffer::~SharedRingBuffer()
{
+ mFifoBuffer.reset(); // uses mSharedMemory
if (mSharedMemory != nullptr) {
- delete mFifoBuffer;
munmap(mSharedMemory, mSharedMemorySizeInBytes);
mSharedMemory = nullptr;
}
@@ -58,16 +60,18 @@
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
- // Map the fd to memory addresses.
- mSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+ // Map the fd to memory addresses. Use a temporary pointer to keep the mmap result and update
+ // it to `mSharedMemory` only when mmap operate successfully.
+ uint8_t* tmpPtr = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
PROT_READ|PROT_WRITE,
MAP_SHARED,
mFileDescriptor.get(), 0);
- if (mSharedMemory == MAP_FAILED) {
+ if (tmpPtr == MAP_FAILED) {
ALOGE("allocate() mmap() failed %d", errno);
mFileDescriptor.reset();
return AAUDIO_ERROR_INTERNAL; // TODO convert errno to a better AAUDIO_ERROR;
}
+ mSharedMemory = tmpPtr;
// Get addresses for our counters and data from the shared memory.
fifo_counter_t *readCounterAddress =
@@ -76,7 +80,7 @@
(fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
uint8_t *dataAddress = &mSharedMemory[SHARED_RINGBUFFER_DATA_OFFSET];
- mFifoBuffer = new FifoBuffer(bytesPerFrame, capacityInFrames,
+ mFifoBuffer = std::make_shared<FifoBufferIndirect>(bytesPerFrame, capacityInFrames,
readCounterAddress, writeCounterAddress, dataAddress);
return AAUDIO_OK;
}
@@ -94,3 +98,19 @@
ringBufferParcelable.setFramesPerBurst(1);
ringBufferParcelable.setCapacityInFrames(mCapacityInFrames);
}
+
+double SharedRingBuffer::getFractionalFullness() const {
+ int32_t framesAvailable = mFifoBuffer->getFullFramesAvailable();
+ int32_t capacity = mFifoBuffer->getBufferCapacityInFrames();
+ return framesAvailable / (double) capacity;
+}
+
+std::string SharedRingBuffer::dump() const {
+ std::stringstream result;
+ int32_t readCounter = mFifoBuffer->getReadCounter();
+ int32_t writeCounter = mFifoBuffer->getWriteCounter();
+ result << std::setw(10) << writeCounter;
+ result << std::setw(10) << readCounter;
+ result << std::setw(8) << (writeCounter - readCounter);
+ return result.str();
+}
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index 79169bc..c3a9bb7 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -18,8 +18,9 @@
#define AAUDIO_SHARED_RINGBUFFER_H
#include <android-base/unique_fd.h>
-#include <stdint.h>
#include <cutils/ashmem.h>
+#include <stdint.h>
+#include <string>
#include <sys/mman.h>
#include "fifo/FifoBuffer.h"
@@ -47,15 +48,25 @@
void fillParcelable(AudioEndpointParcelable &endpointParcelable,
RingBufferParcelable &ringBufferParcelable);
- android::FifoBuffer * getFifoBuffer() {
+ /**
+ * Return available frames as a fraction of the capacity.
+ * @return fullness between 0.0 and 1.0
+ */
+ double getFractionalFullness() const;
+
+ // dump: write# read# available
+ std::string dump() const;
+
+ std::shared_ptr<android::FifoBuffer> getFifoBuffer() {
return mFifoBuffer;
}
private:
android::base::unique_fd mFileDescriptor;
- android::FifoBuffer *mFifoBuffer = nullptr;
- uint8_t *mSharedMemory = nullptr;
+ std::shared_ptr<android::FifoBufferIndirect> mFifoBuffer;
+ uint8_t *mSharedMemory = nullptr; // mmap
int32_t mSharedMemorySizeInBytes = 0;
+ // size of memory used for data vs counters
int32_t mDataMemorySizeInBytes = 0;
android::fifo_frames_t mCapacityInFrames = 0;
};