Merge "Add new parameter capability to onUidStateChanged()"
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index af8ec06..ec8f049 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -38,13 +38,18 @@
namespace.platform.isolated = true
namespace.platform.search.paths = /system/${LIB}
-namespace.platform.search.paths += /apex/com.android.runtime/${LIB}
namespace.platform.asan.search.paths = /data/asan/system/${LIB}
namespace.platform.asan.search.paths += /system/${LIB}
+
+# TODO(b/140790209): These directories are wrong in R and later because they
+# only contain Bionic internal libraries dependencies that should not be
+# accessed from the outside. However, they may be necessary for APEX builds that
+# are pushed to Q. Remove them as soon as Q compatibility is no longer required.
+namespace.platform.search.paths += /apex/com.android.runtime/${LIB}
namespace.platform.asan.search.paths += /apex/com.android.runtime/${LIB}
# /system/lib/libc.so, etc are symlinks to /apex/com.android.lib/lib/bionic/libc.so, etc.
-# Add /apex/... pat to the permitted paths because linker uses realpath(3)
+# Add /apex/... path to the permitted paths because linker uses realpath(3)
# to check the accessibility of the lib. We could add this to search.paths
# instead but that makes the resolution of bionic libs be dependent on
# the order of /system/lib and /apex/... in search.paths. If /apex/...
@@ -131,3 +136,9 @@
# Add a link for libz.so which is llndk on devices where VNDK is not enforced.
namespace.sphal.link.platform.shared_libs += libz.so
+
+# With VNDK APEX, /system/${LIB}/vndk-sp${VNDK_VER} is a symlink to the following.
+# Add /apex/... path to the permitted paths because linker uses realpath(3)
+# to check the accessibility of the lib.
+namespace.sphal.permitted.paths += /apex/com.android.vndk.${VNDK_APEX_VER}/${LIB}
+namespace.sphal.asan.permitted.paths += /apex/com.android.vndk.${VNDK_APEX_VER}/${LIB}
diff --git a/apex/manifest.json b/apex/manifest.json
index 3011ee8..ddd642e 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,4 +1,4 @@
{
"name": "com.android.media",
- "version": 290000000
+ "version": 300000000
}
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index 83a5178..2320fd7 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,4 +1,4 @@
{
"name": "com.android.media.swcodec",
- "version": 290000000
+ "version": 300000000
}
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index c6c35ef..84d1d93 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -347,6 +347,20 @@
return c->setPreviewCallbackTarget(callbackProducer);
}
+status_t Camera::setAudioRestriction(int32_t mode)
+{
+ sp <::android::hardware::ICamera> c = mCamera;
+ if (c == 0) return NO_INIT;
+ return c->setAudioRestriction(mode);
+}
+
+int32_t Camera::getGlobalAudioRestriction()
+{
+ sp <::android::hardware::ICamera> c = mCamera;
+ if (c == 0) return NO_INIT;
+ return c->getGlobalAudioRestriction();
+}
+
// callback from camera service
void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
{
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index f0945c7..b83edf7 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -56,6 +56,8 @@
SET_VIDEO_BUFFER_TARGET,
RELEASE_RECORDING_FRAME_HANDLE,
RELEASE_RECORDING_FRAME_HANDLE_BATCH,
+ SET_AUDIO_RESTRICTION,
+ GET_GLOBAL_AUDIO_RESTRICTION,
};
class BpCamera: public BpInterface<ICamera>
@@ -191,6 +193,21 @@
}
}
+ status_t setAudioRestriction(int32_t mode) {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+ data.writeInt32(mode);
+ remote()->transact(SET_AUDIO_RESTRICTION, data, &reply);
+ return reply.readInt32();
+ }
+
+ int32_t getGlobalAudioRestriction() {
+ Parcel data, reply;
+ data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+ remote()->transact(GET_GLOBAL_AUDIO_RESTRICTION, data, &reply);
+ return reply.readInt32();
+ }
+
status_t setVideoBufferMode(int32_t videoBufferMode)
{
ALOGV("setVideoBufferMode: %d", videoBufferMode);
@@ -494,6 +511,17 @@
reply->writeInt32(setVideoTarget(st));
return NO_ERROR;
} break;
+ case SET_AUDIO_RESTRICTION: {
+ CHECK_INTERFACE(ICamera, data, reply);
+ int32_t mode = data.readInt32();
+ reply->writeInt32(setAudioRestriction(mode));
+ return NO_ERROR;
+ } break;
+ case GET_GLOBAL_AUDIO_RESTRICTION: {
+ CHECK_INTERFACE(ICamera, data, reply);
+ reply->writeInt32(getGlobalAudioRestriction());
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
index 49dfde8..93549e0 100644
--- a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -155,4 +155,26 @@
void updateOutputConfiguration(int streamId, in OutputConfiguration outputConfiguration);
void finalizeOutputConfigurations(int streamId, in OutputConfiguration outputConfiguration);
+
+
+ // Keep in sync with public API in
+ // frameworks/base/core/java/android/hardware/camera2/CameraDevice.java
+ const int AUDIO_RESTRICTION_NONE = 0;
+ const int AUDIO_RESTRICTION_VIBRATION = 1;
+ const int AUDIO_RESTRICTION_VIBRATION_SOUND = 3;
+
+ /**
+ * Set audio restriction mode for this camera device.
+ *
+ * @param mode the audio restriction mode ID as above
+ *
+ */
+ void setCameraAudioRestriction(int mode);
+
+ /**
+ * Get global audio restriction mode for all camera clients.
+ *
+ * @return the currently applied system-wide audio restriction mode
+ */
+ int getGlobalAudioRestriction();
}
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index ecaba3a..320c499 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -17,6 +17,10 @@
srcs: ["main_cameraserver.cpp"],
+ header_libs: [
+ "libmedia_headers",
+ ],
+
shared_libs: [
"libcameraservice",
"liblog",
@@ -25,7 +29,6 @@
"libgui",
"libbinder",
"libhidlbase",
- "libhidltransport",
"android.hardware.camera.common@1.0",
"android.hardware.camera.provider@2.4",
"android.hardware.camera.provider@2.5",
diff --git a/camera/cameraserver/main_cameraserver.cpp b/camera/cameraserver/main_cameraserver.cpp
index 53b3d84..cef8ef5 100644
--- a/camera/cameraserver/main_cameraserver.cpp
+++ b/camera/cameraserver/main_cameraserver.cpp
@@ -34,6 +34,7 @@
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
CameraService::instantiate();
+ ALOGI("ServiceManager: %p done instantiate", sm.get());
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 430aa1c..2cdb617 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -167,6 +167,9 @@
sp<ICameraRecordingProxy> getRecordingProxy();
+ status_t setAudioRestriction(int32_t mode);
+ int32_t getGlobalAudioRestriction();
+
// ICameraClient interface
virtual void notifyCallback(int32_t msgType, int32_t ext, int32_t ext2);
virtual void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
diff --git a/camera/include/camera/android/hardware/ICamera.h b/camera/include/camera/android/hardware/ICamera.h
index 80823d6..ec19e5d 100644
--- a/camera/include/camera/android/hardware/ICamera.h
+++ b/camera/include/camera/android/hardware/ICamera.h
@@ -140,6 +140,12 @@
// Set the video buffer producer for camera to use in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
virtual status_t setVideoTarget(
const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+ // Set the audio restriction mode
+ virtual status_t setAudioRestriction(int32_t mode) = 0;
+
+ // Get the global audio restriction mode
+ virtual int32_t getGlobalAudioRestriction() = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index a2ee65d..d8220eb 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -107,7 +107,6 @@
],
shared_libs: [
- "libhwbinder",
"libfmq",
"libhidlbase",
"libhardware",
@@ -143,7 +142,6 @@
vendor: true,
srcs: ["ndk_vendor/tests/AImageReaderVendorTest.cpp"],
shared_libs: [
- "libhwbinder",
"libcamera2ndk_vendor",
"libcamera_metadata",
"libmediandk",
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index d24cb81..46a8dae 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -29,7 +29,7 @@
#include "ACameraCaptureSession.inc"
ACameraDevice::~ACameraDevice() {
- mDevice->stopLooper();
+ mDevice->stopLooperAndDisconnect();
}
namespace android {
@@ -112,19 +112,7 @@
}
}
-// Device close implementaiton
-CameraDevice::~CameraDevice() {
- sp<ACameraCaptureSession> session = mCurrentSession.promote();
- {
- Mutex::Autolock _l(mDeviceLock);
- if (!isClosed()) {
- disconnectLocked(session);
- }
- LOG_ALWAYS_FATAL_IF(mCbLooper != nullptr,
- "CameraDevice looper should've been stopped before ~CameraDevice");
- mCurrentSession = nullptr;
- }
-}
+CameraDevice::~CameraDevice() { }
void
CameraDevice::postSessionMsgAndCleanup(sp<AMessage>& msg) {
@@ -892,8 +880,14 @@
return;
}
-void CameraDevice::stopLooper() {
+void CameraDevice::stopLooperAndDisconnect() {
Mutex::Autolock _l(mDeviceLock);
+ sp<ACameraCaptureSession> session = mCurrentSession.promote();
+ if (!isClosed()) {
+ disconnectLocked(session);
+ }
+ mCurrentSession = nullptr;
+
if (mCbLooper != nullptr) {
mCbLooper->unregisterHandler(mHandler->id());
mCbLooper->stop();
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 7a35bf0..6c2ceb3 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -40,6 +40,7 @@
#include <camera/NdkCameraManager.h>
#include <camera/NdkCameraCaptureSession.h>
+
#include "ACameraMetadata.h"
namespace android {
@@ -110,7 +111,7 @@
inline ACameraDevice* getWrapper() const { return mWrapper; };
// Stop the looper thread and unregister the handler
- void stopLooper();
+ void stopLooperAndDisconnect();
private:
friend ACameraCaptureSession;
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 488641d..68fe045 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3543,11 +3543,19 @@
* output capture result.</p>
* <p>This control is only effective if ACAMERA_CONTROL_AE_MODE or ACAMERA_CONTROL_MODE is set to
* OFF; otherwise the auto-exposure algorithm will override this value.</p>
+ * <p>Note that for devices supporting postRawSensitivityBoost, the total sensitivity applied
+ * to the final processed image is the combination of ACAMERA_SENSOR_SENSITIVITY and
+ * ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST. In case the application uses the sensor
+ * sensitivity from last capture result of an auto request for a manual request, in order
+ * to achieve the same brightness in the output image, the application should also
+ * set postRawSensitivityBoost.</p>
*
* @see ACAMERA_CONTROL_AE_MODE
* @see ACAMERA_CONTROL_MODE
+ * @see ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST
* @see ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE
* @see ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY
+ * @see ACAMERA_SENSOR_SENSITIVITY
*/
ACAMERA_SENSOR_SENSITIVITY = // int32
ACAMERA_SENSOR_START + 2,
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index 35c8355..e511a3f 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -45,7 +45,7 @@
using namespace android;
ACameraDevice::~ACameraDevice() {
- mDevice->stopLooper();
+ mDevice->stopLooperAndDisconnect();
}
namespace android {
@@ -125,19 +125,7 @@
}
}
-// Device close implementaiton
-CameraDevice::~CameraDevice() {
- sp<ACameraCaptureSession> session = mCurrentSession.promote();
- {
- Mutex::Autolock _l(mDeviceLock);
- if (!isClosed()) {
- disconnectLocked(session);
- }
- mCurrentSession = nullptr;
- LOG_ALWAYS_FATAL_IF(mCbLooper != nullptr,
- "CameraDevice looper should've been stopped before ~CameraDevice");
- }
-}
+CameraDevice::~CameraDevice() { }
void
CameraDevice::postSessionMsgAndCleanup(sp<AMessage>& msg) {
@@ -1388,6 +1376,7 @@
// before cbh goes out of scope and causing we call the session
// destructor while holding device lock
cbh.mSession.clear();
+
postSessionMsgAndCleanup(msg);
}
@@ -1400,8 +1389,13 @@
}
}
-void CameraDevice::stopLooper() {
+void CameraDevice::stopLooperAndDisconnect() {
Mutex::Autolock _l(mDeviceLock);
+ sp<ACameraCaptureSession> session = mCurrentSession.promote();
+ if (!isClosed()) {
+ disconnectLocked(session);
+ }
+ mCurrentSession = nullptr;
if (mCbLooper != nullptr) {
mCbLooper->unregisterHandler(mHandler->id());
mCbLooper->stop();
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index 9e034c4..7fc699e 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -36,6 +36,7 @@
#include <camera/NdkCameraManager.h>
#include <camera/NdkCameraCaptureSession.h>
+
#include "ACameraMetadata.h"
#include "utils.h"
@@ -134,7 +135,7 @@
inline ACameraDevice* getWrapper() const { return mWrapper; };
// Stop the looper thread and unregister the handler
- void stopLooper();
+ void stopLooperAndDisconnect();
private:
friend ACameraCaptureSession;
diff --git a/cmds/screenrecord/Android.bp b/cmds/screenrecord/Android.bp
index 86476cd..6bdbab1 100644
--- a/cmds/screenrecord/Android.bp
+++ b/cmds/screenrecord/Android.bp
@@ -24,6 +24,10 @@
"Program.cpp",
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"libstagefright",
"libmedia",
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index e39f885..4a24b96 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -53,7 +53,7 @@
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaMuxer.h>
#include <media/stagefright/PersistentSurface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include "screenrecord.h"
@@ -74,6 +74,7 @@
using android::MediaMuxer;
using android::Overlay;
using android::PersistentSurface;
+using android::PhysicalDisplayId;
using android::ProcessState;
using android::Rect;
using android::String8;
@@ -116,7 +117,7 @@
static uint32_t gBitRate = 20000000; // 20Mbps
static uint32_t gTimeLimitSec = kMaxTimeLimitSec;
static uint32_t gBframes = 0;
-
+static PhysicalDisplayId gPhysicalDisplayId;
// Set by signal handler to stop recording.
static volatile bool gStopRequested = false;
@@ -269,14 +270,14 @@
static status_t setDisplayProjection(
SurfaceComposerClient::Transaction& t,
const sp<IBinder>& dpy,
- const DisplayInfo& mainDpyInfo) {
+ const DisplayInfo& displayInfo) {
// Set the region of the layer stack we're interested in, which in our
// case is "all of it".
- Rect layerStackRect(mainDpyInfo.viewportW, mainDpyInfo.viewportH);
+ Rect layerStackRect(displayInfo.viewportW, displayInfo.viewportH);
// We need to preserve the aspect ratio of the display.
- float displayAspect = (float) mainDpyInfo.viewportH / (float) mainDpyInfo.viewportW;
+ float displayAspect = (float) displayInfo.viewportH / (float) displayInfo.viewportW;
// Set the way we map the output onto the display surface (which will
@@ -335,16 +336,15 @@
* Configures the virtual display. When this completes, virtual display
* frames will start arriving from the buffer producer.
*/
-static status_t prepareVirtualDisplay(const DisplayInfo& mainDpyInfo,
+static status_t prepareVirtualDisplay(const DisplayInfo& displayInfo,
const sp<IGraphicBufferProducer>& bufferProducer,
sp<IBinder>* pDisplayHandle) {
sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
String8("ScreenRecorder"), false /*secure*/);
-
SurfaceComposerClient::Transaction t;
t.setDisplaySurface(dpy, bufferProducer);
- setDisplayProjection(t, dpy, mainDpyInfo);
- t.setDisplayLayerStack(dpy, 0); // default stack
+ setDisplayProjection(t, dpy, displayInfo);
+ t.setDisplayLayerStack(dpy, displayInfo.layerStack);
t.apply();
*pDisplayHandle = dpy;
@@ -406,7 +406,7 @@
* The muxer must *not* have been started before calling.
*/
static status_t runEncoder(const sp<MediaCodec>& encoder,
- const sp<MediaMuxer>& muxer, FILE* rawFp, const sp<IBinder>& mainDpy,
+ const sp<MediaMuxer>& muxer, FILE* rawFp, const sp<IBinder>& display,
const sp<IBinder>& virtualDpy, uint8_t orientation) {
static int kTimeout = 250000; // be responsive on signal
status_t err;
@@ -415,7 +415,7 @@
uint32_t debugNumFrames = 0;
int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
- DisplayInfo mainDpyInfo;
+ DisplayInfo displayInfo;
Vector<int64_t> timestamps;
bool firstFrame = true;
@@ -472,16 +472,16 @@
//
// Polling for changes is inefficient and wrong, but the
// useful stuff is hard to get at without a Dalvik VM.
- err = SurfaceComposerClient::getDisplayInfo(mainDpy,
- &mainDpyInfo);
+ err = SurfaceComposerClient::getDisplayInfo(display,
+ &displayInfo);
if (err != NO_ERROR) {
ALOGW("getDisplayInfo(main) failed: %d", err);
- } else if (orientation != mainDpyInfo.orientation) {
- ALOGD("orientation changed, now %d", mainDpyInfo.orientation);
+ } else if (orientation != displayInfo.orientation) {
+ ALOGD("orientation changed, now %d", displayInfo.orientation);
SurfaceComposerClient::Transaction t;
- setDisplayProjection(t, virtualDpy, mainDpyInfo);
+ setDisplayProjection(t, virtualDpy, displayInfo);
t.apply();
- orientation = mainDpyInfo.orientation;
+ orientation = displayInfo.orientation;
}
}
@@ -661,32 +661,33 @@
self->startThreadPool();
// Get main display parameters.
- const sp<IBinder> mainDpy = SurfaceComposerClient::getInternalDisplayToken();
- if (mainDpy == nullptr) {
+ sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(
+ gPhysicalDisplayId);
+ if (display == nullptr) {
fprintf(stderr, "ERROR: no display\n");
return NAME_NOT_FOUND;
}
- DisplayInfo mainDpyInfo;
- err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);
+ DisplayInfo displayInfo;
+ err = SurfaceComposerClient::getDisplayInfo(display, &displayInfo);
if (err != NO_ERROR) {
fprintf(stderr, "ERROR: unable to get display characteristics\n");
return err;
}
if (gVerbose) {
- printf("Main display is %dx%d @%.2ffps (orientation=%u)\n",
- mainDpyInfo.viewportW, mainDpyInfo.viewportH, mainDpyInfo.fps,
- mainDpyInfo.orientation);
+ printf("Display is %dx%d @%.2ffps (orientation=%u), layerStack=%u\n",
+ displayInfo.viewportW, displayInfo.viewportH, displayInfo.fps,
+ displayInfo.orientation, displayInfo.layerStack);
fflush(stdout);
}
// Encoder can't take odd number as config
if (gVideoWidth == 0) {
- gVideoWidth = floorToEven(mainDpyInfo.viewportW);
+ gVideoWidth = floorToEven(displayInfo.viewportW);
}
if (gVideoHeight == 0) {
- gVideoHeight = floorToEven(mainDpyInfo.viewportH);
+ gVideoHeight = floorToEven(displayInfo.viewportH);
}
// Configure and start the encoder.
@@ -694,7 +695,7 @@
sp<FrameOutput> frameOutput;
sp<IGraphicBufferProducer> encoderInputSurface;
if (gOutputFormat != FORMAT_FRAMES && gOutputFormat != FORMAT_RAW_FRAMES) {
- err = prepareEncoder(mainDpyInfo.fps, &encoder, &encoderInputSurface);
+ err = prepareEncoder(displayInfo.fps, &encoder, &encoderInputSurface);
if (err != NO_ERROR && !gSizeSpecified) {
// fallback is defined for landscape; swap if we're in portrait
@@ -707,7 +708,7 @@
gVideoWidth, gVideoHeight, newWidth, newHeight);
gVideoWidth = newWidth;
gVideoHeight = newHeight;
- err = prepareEncoder(mainDpyInfo.fps, &encoder,
+ err = prepareEncoder(displayInfo.fps, &encoder,
&encoderInputSurface);
}
}
@@ -755,7 +756,7 @@
// Configure virtual display.
sp<IBinder> dpy;
- err = prepareVirtualDisplay(mainDpyInfo, bufferProducer, &dpy);
+ err = prepareVirtualDisplay(displayInfo, bufferProducer, &dpy);
if (err != NO_ERROR) {
if (encoder != NULL) encoder->release();
return err;
@@ -838,8 +839,8 @@
}
} else {
// Main encoder loop.
- err = runEncoder(encoder, muxer, rawFp, mainDpy, dpy,
- mainDpyInfo.orientation);
+ err = runEncoder(encoder, muxer, rawFp, display, dpy,
+ displayInfo.orientation);
if (err != NO_ERROR) {
fprintf(stderr, "Encoder failed (err=%d)\n", err);
// fall through to cleanup
@@ -1005,6 +1006,9 @@
" in videos captured to illustrate bugs.\n"
"--time-limit TIME\n"
" Set the maximum recording time, in seconds. Default / maximum is %d.\n"
+ "--display-id ID\n"
+ " specify the physical display ID to record. Default is the primary display.\n"
+ " see \"dumpsys SurfaceFlinger --display-id\" for valid display IDs.\n"
"--verbose\n"
" Display interesting information on stdout.\n"
"--help\n"
@@ -1036,9 +1040,18 @@
{ "monotonic-time", no_argument, NULL, 'm' },
{ "persistent-surface", no_argument, NULL, 'p' },
{ "bframes", required_argument, NULL, 'B' },
+ { "display-id", required_argument, NULL, 'd' },
{ NULL, 0, NULL, 0 }
};
+ std::optional<PhysicalDisplayId> displayId = SurfaceComposerClient::getInternalDisplayId();
+ if (!displayId) {
+ fprintf(stderr, "Failed to get token for internal display\n");
+ return 1;
+ }
+
+ gPhysicalDisplayId = *displayId;
+
while (true) {
int optionIndex = 0;
int ic = getopt_long(argc, argv, "", longOptions, &optionIndex);
@@ -1133,6 +1146,18 @@
return 2;
}
break;
+ case 'd':
+ gPhysicalDisplayId = atoll(optarg);
+ if (gPhysicalDisplayId == 0) {
+ fprintf(stderr, "Please specify a valid physical display id\n");
+ return 2;
+ } else if (SurfaceComposerClient::
+ getPhysicalDisplayToken(gPhysicalDisplayId) == nullptr) {
+ fprintf(stderr, "Invalid physical display id: %"
+ ANDROID_PHYSICAL_DISPLAY_ID_FORMAT "\n", gPhysicalDisplayId);
+ return 2;
+ }
+ break;
default:
if (ic != '?') {
fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
diff --git a/cmds/screenrecord/screenrecord.h b/cmds/screenrecord/screenrecord.h
index 9b058c2..cec7c13 100644
--- a/cmds/screenrecord/screenrecord.h
+++ b/cmds/screenrecord/screenrecord.h
@@ -18,6 +18,6 @@
#define SCREENRECORD_SCREENRECORD_H
#define kVersionMajor 1
-#define kVersionMinor 2
+#define kVersionMinor 3
#endif /*SCREENRECORD_SCREENRECORD_H*/
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 0c8d44a..defc94f 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -3,14 +3,15 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
+ AudioPlayer.cpp \
stagefright.cpp \
jpeg.cpp \
SineSource.cpp
LOCAL_SHARED_LIBRARIES := \
- libstagefright libmedia libmedia_omx libutils libbinder \
+ libstagefright libmedia libmedia_codeclist libutils libbinder \
libstagefright_foundation libjpeg libui libgui libcutils liblog \
- libhidlbase \
+ libhidlbase libdatasource libaudioclient \
android.hardware.media.omx@1.0 \
LOCAL_C_INCLUDES:= \
@@ -31,14 +32,16 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
+ AudioPlayer.cpp \
SineSource.cpp \
record.cpp
LOCAL_SHARED_LIBRARIES := \
libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation
+ libstagefright_foundation libdatasource libaudioclient
LOCAL_C_INCLUDES:= \
+ frameworks/av/camera/include \
frameworks/av/media/libstagefright \
frameworks/native/include/media/openmax \
frameworks/native/include/media/hardware
@@ -56,12 +59,12 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
- SineSource.cpp \
+ AudioPlayer.cpp \
recordvideo.cpp
LOCAL_SHARED_LIBRARIES := \
libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation
+ libstagefright_foundation libaudioclient
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -82,12 +85,13 @@
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \
+ AudioPlayer.cpp \
SineSource.cpp \
audioloop.cpp
LOCAL_SHARED_LIBRARIES := \
libstagefright libmedia liblog libutils libbinder \
- libstagefright_foundation
+ libstagefright_foundation libaudioclient
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -110,7 +114,7 @@
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libui libgui \
- libstagefright_foundation libmedia libcutils
+ libstagefright_foundation libmedia libcutils libdatasource
LOCAL_C_INCLUDES:= \
frameworks/av/media/libstagefright \
@@ -132,6 +136,9 @@
codec.cpp \
SimplePlayer.cpp \
+LOCAL_HEADER_LIBRARIES := \
+ libmediadrm_headers \
+
LOCAL_SHARED_LIBRARIES := \
libstagefright liblog libutils libbinder libstagefright_foundation \
libmedia libmedia_omx libaudioclient libui libgui libcutils
@@ -158,17 +165,18 @@
filters/saturation.rscript \
mediafilter.cpp \
+LOCAL_HEADER_LIBRARIES := \
+ libmediadrm_headers \
+
LOCAL_SHARED_LIBRARIES := \
libstagefright \
liblog \
libutils \
libbinder \
libstagefright_foundation \
- libmedia \
libmedia_omx \
libui \
libgui \
- libcutils \
libRScpp \
LOCAL_C_INCLUDES:= \
diff --git a/media/libstagefright/AudioPlayer.cpp b/cmds/stagefright/AudioPlayer.cpp
similarity index 99%
rename from media/libstagefright/AudioPlayer.cpp
rename to cmds/stagefright/AudioPlayer.cpp
index 199b57b..208713d 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/cmds/stagefright/AudioPlayer.cpp
@@ -28,12 +28,13 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include "AudioPlayer.h"
+
namespace android {
AudioPlayer::AudioPlayer(
diff --git a/media/libstagefright/include/media/stagefright/AudioPlayer.h b/cmds/stagefright/AudioPlayer.h
similarity index 100%
rename from media/libstagefright/include/media/stagefright/AudioPlayer.h
rename to cmds/stagefright/AudioPlayer.h
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index afb7db3..f4b8164 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -23,7 +23,7 @@
#include <gui/Surface.h>
#include <media/AudioTrack.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index d4f2e8d..bd274d8 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -29,11 +29,11 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/AMRWriter.h>
-#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/AudioSource.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/SimpleDecodingSource.h>
+#include "AudioPlayer.h"
#include "SineSource.h"
using namespace android;
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index e5a4337..f2d1c29 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -23,7 +23,7 @@
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
#include <media/MediaCodecBuffer.h>
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
index 2cf6955..66302b0 100644
--- a/cmds/stagefright/mediafilter.cpp
+++ b/cmds/stagefright/mediafilter.cpp
@@ -24,9 +24,9 @@
#include <gui/ISurfaceComposer.h>
#include <gui/SurfaceComposerClient.h>
#include <gui/Surface.h>
-#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/MediaCodecBuffer.h>
+#include <mediadrm/ICrypto.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 95a16f3..37091c4 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -17,12 +17,11 @@
#include "SineSource.h"
#include <binder/ProcessState.h>
+#include <datasource/FileSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/CameraSource.h>
-#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaCodecSource.h>
@@ -33,6 +32,8 @@
#include <media/stagefright/SimpleDecodingSource.h>
#include <media/MediaPlayerInterface.h>
+#include "AudioPlayer.h"
+
using namespace android;
static const int32_t kAudioBitRate = 12200;
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index a63b9b9..01a178e 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include "SineSource.h"
-
#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -25,8 +23,8 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index bf36be0..9ae87d8 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -31,18 +31,15 @@
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
+#include <datasource/DataSourceFactory.h>
#include <media/DataSource.h>
#include <media/MediaSource.h>
-#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/IMediaPlayerService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
-#include "include/NuCachedSource2.h"
-#include <media/stagefright/AudioPlayer.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/JPEGSource.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaCodec.h>
@@ -69,6 +66,8 @@
#include <android/hardware/media/omx/1.0/IOmx.h>
+#include "AudioPlayer.h"
+
using namespace android;
static long gNumRepetitions;
@@ -989,7 +988,7 @@
failed = false;
printf("getFrameAtTime(%s) => OK\n", filename);
- VideoFrame *frame = (VideoFrame *)mem->pointer();
+ VideoFrame *frame = (VideoFrame *)mem->unsecurePointer();
CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
frame->getFlattenedData(),
@@ -1086,7 +1085,7 @@
const char *filename = argv[k];
sp<DataSource> dataSource =
- DataSourceFactory::CreateFromURI(NULL /* httpService */, filename);
+ DataSourceFactory::getInstance()->CreateFromURI(NULL /* httpService */, filename);
if (strncasecmp(filename, "sine:", 5) && dataSource == NULL) {
fprintf(stderr, "Unable to create data source.\n");
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 35bdbc0..fe613a8 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -21,6 +21,7 @@
#include <binder/ProcessState.h>
#include <cutils/properties.h> // for property_get
+#include <datasource/DataSourceFactory.h>
#include <media/DataSource.h>
#include <media/IMediaHTTPService.h>
#include <media/IStreamSource.h>
@@ -28,7 +29,6 @@
#include <media/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MPEG2TSWriter.h>
#include <media/stagefright/MediaExtractor.h>
@@ -116,7 +116,7 @@
sp<IMemory> mem = mBuffers.itemAt(index);
- ssize_t n = read(mFd, mem->pointer(), mem->size());
+ ssize_t n = read(mFd, mem->unsecurePointer(), mem->size());
if (n <= 0) {
mListener->issueCommand(IStreamListener::EOS, false /* synchronous */);
} else {
@@ -164,7 +164,7 @@
: mCurrentBufferIndex(-1),
mCurrentBufferOffset(0) {
sp<DataSource> dataSource =
- DataSourceFactory::CreateFromURI(NULL /* httpService */, filename);
+ DataSourceFactory::getInstance()->CreateFromURI(NULL /* httpService */, filename);
CHECK(dataSource != NULL);
@@ -238,7 +238,7 @@
copy = mem->size() - mCurrentBufferOffset;
}
- memcpy((uint8_t *)mem->pointer() + mCurrentBufferOffset, data, copy);
+ memcpy((uint8_t *)mem->unsecurePointer() + mCurrentBufferOffset, data, copy);
mCurrentBufferOffset += copy;
if (mCurrentBufferOffset == mem->size()) {
diff --git a/drm/drmserver/Android.bp b/drm/drmserver/Android.bp
index c25a0a1..fd71837 100644
--- a/drm/drmserver/Android.bp
+++ b/drm/drmserver/Android.bp
@@ -25,6 +25,7 @@
shared_libs: [
"libmedia",
+ "libmediametrics",
"libutils",
"liblog",
"libbinder",
diff --git a/drm/drmserver/DrmManager.cpp b/drm/drmserver/DrmManager.cpp
index afbcb39..e2ea83a 100644
--- a/drm/drmserver/DrmManager.cpp
+++ b/drm/drmserver/DrmManager.cpp
@@ -19,7 +19,10 @@
#include "utils/Log.h"
#include <utils/String8.h>
+
+#include <binder/IPCThreadState.h>
#include <drm/DrmInfo.h>
+
#include <drm/DrmInfoEvent.h>
#include <drm/DrmRights.h>
#include <drm/DrmConstraints.h>
@@ -28,6 +31,7 @@
#include <drm/DrmInfoRequest.h>
#include <drm/DrmSupportInfo.h>
#include <drm/DrmConvertedStatus.h>
+#include <media/MediaAnalyticsItem.h>
#include <IDrmEngine.h>
#include "DrmManager.h"
@@ -50,6 +54,39 @@
}
+void DrmManager::reportEngineMetrics(
+ const char func[], const String8& plugInId, const String8& mimeType) {
+ IDrmEngine& engine = mPlugInManager.getPlugIn(plugInId);
+
+ std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("drmmanager"));
+ item->setUid(IPCThreadState::self()->getCallingUid());
+ item->setCString("function_name", func);
+ item->setCString("plugin_id", plugInId.getPathLeaf().getBasePath().c_str());
+
+ std::unique_ptr<DrmSupportInfo> info(engine.getSupportInfo(0));
+ if (NULL != info) {
+ item->setCString("description", info->getDescription().c_str());
+ }
+
+ if (!mimeType.isEmpty()) {
+ item->setCString("mime_types", mimeType.c_str());
+ } else if (NULL != info) {
+ DrmSupportInfo::MimeTypeIterator mimeIter = info->getMimeTypeIterator();
+ String8 mimes;
+ while (mimeIter.hasNext()) {
+ mimes += mimeIter.next();
+ if (mimeIter.hasNext()) {
+ mimes += ",";
+ }
+ }
+ item->setCString("mime_types", mimes.c_str());
+ }
+
+ if (!item->selfrecord()) {
+ ALOGE("Failed to record metrics");
+ }
+}
+
int DrmManager::addUniqueId(bool isNative) {
Mutex::Autolock _l(mLock);
@@ -147,27 +184,36 @@
for (size_t index = 0; index < plugInIdList.size(); index++) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInIdList.itemAt(index));
rDrmEngine.terminate(uniqueId);
+ reportEngineMetrics(__func__, plugInIdList[index]);
}
}
DrmConstraints* DrmManager::getConstraints(int uniqueId, const String8* path, const int action) {
Mutex::Autolock _l(mLock);
+ DrmConstraints *constraints = NULL;
const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, *path);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.getConstraints(uniqueId, path, action);
+ constraints = rDrmEngine.getConstraints(uniqueId, path, action);
}
- return NULL;
+ if (NULL != constraints) {
+ reportEngineMetrics(__func__, plugInId);
+ }
+ return constraints;
}
DrmMetadata* DrmManager::getMetadata(int uniqueId, const String8* path) {
Mutex::Autolock _l(mLock);
+ DrmMetadata *meta = NULL;
const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, *path);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.getMetadata(uniqueId, path);
+ meta = rDrmEngine.getMetadata(uniqueId, path);
}
- return NULL;
+ if (NULL != meta) {
+ reportEngineMetrics(__func__, plugInId);
+ }
+ return meta;
}
bool DrmManager::canHandle(int uniqueId, const String8& path, const String8& mimeType) {
@@ -175,6 +221,10 @@
const String8 plugInId = getSupportedPlugInId(mimeType);
bool result = (EMPTY_STRING != plugInId) ? true : false;
+ if (result) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
+
if (0 < path.length()) {
if (result) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
@@ -191,12 +241,17 @@
DrmInfoStatus* DrmManager::processDrmInfo(int uniqueId, const DrmInfo* drmInfo) {
Mutex::Autolock _l(mLock);
- const String8 plugInId = getSupportedPlugInId(drmInfo->getMimeType());
+ DrmInfoStatus *infoStatus = NULL;
+ const String8 mimeType = drmInfo->getMimeType();
+ const String8 plugInId = getSupportedPlugInId(mimeType);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.processDrmInfo(uniqueId, drmInfo);
+ infoStatus = rDrmEngine.processDrmInfo(uniqueId, drmInfo);
}
- return NULL;
+ if (NULL != infoStatus) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
+ return infoStatus;
}
bool DrmManager::canHandle(int uniqueId, const String8& path) {
@@ -208,6 +263,7 @@
result = rDrmEngine.canHandle(uniqueId, path);
if (result) {
+ reportEngineMetrics(__func__, plugInPathList[i]);
break;
}
}
@@ -216,54 +272,75 @@
DrmInfo* DrmManager::acquireDrmInfo(int uniqueId, const DrmInfoRequest* drmInfoRequest) {
Mutex::Autolock _l(mLock);
- const String8 plugInId = getSupportedPlugInId(drmInfoRequest->getMimeType());
+ DrmInfo *info = NULL;
+ const String8 mimeType = drmInfoRequest->getMimeType();
+ const String8 plugInId = getSupportedPlugInId(mimeType);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.acquireDrmInfo(uniqueId, drmInfoRequest);
+ info = rDrmEngine.acquireDrmInfo(uniqueId, drmInfoRequest);
}
- return NULL;
+ if (NULL != info) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
+ return info;
}
status_t DrmManager::saveRights(int uniqueId, const DrmRights& drmRights,
const String8& rightsPath, const String8& contentPath) {
Mutex::Autolock _l(mLock);
- const String8 plugInId = getSupportedPlugInId(drmRights.getMimeType());
+ const String8 mimeType = drmRights.getMimeType();
+ const String8 plugInId = getSupportedPlugInId(mimeType);
status_t result = DRM_ERROR_UNKNOWN;
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
result = rDrmEngine.saveRights(uniqueId, drmRights, rightsPath, contentPath);
}
+ if (DRM_NO_ERROR == result) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
return result;
}
String8 DrmManager::getOriginalMimeType(int uniqueId, const String8& path, int fd) {
Mutex::Autolock _l(mLock);
+ String8 mimeType(EMPTY_STRING);
const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, path);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.getOriginalMimeType(uniqueId, path, fd);
+ mimeType = rDrmEngine.getOriginalMimeType(uniqueId, path, fd);
}
- return EMPTY_STRING;
+ if (!mimeType.isEmpty()) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
+ return mimeType;
}
int DrmManager::getDrmObjectType(int uniqueId, const String8& path, const String8& mimeType) {
Mutex::Autolock _l(mLock);
+ int type = DrmObjectType::UNKNOWN;
const String8 plugInId = getSupportedPlugInId(uniqueId, path, mimeType);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.getDrmObjectType(uniqueId, path, mimeType);
+ type = rDrmEngine.getDrmObjectType(uniqueId, path, mimeType);
}
- return DrmObjectType::UNKNOWN;
+ if (DrmObjectType::UNKNOWN != type) {
+ reportEngineMetrics(__func__, plugInId, mimeType);
+ }
+ return type;
}
int DrmManager::checkRightsStatus(int uniqueId, const String8& path, int action) {
Mutex::Autolock _l(mLock);
+ int rightsStatus = RightsStatus::RIGHTS_INVALID;
const String8 plugInId = getSupportedPlugInIdFromPath(uniqueId, path);
if (EMPTY_STRING != plugInId) {
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
- return rDrmEngine.checkRightsStatus(uniqueId, path, action);
+ rightsStatus = rDrmEngine.checkRightsStatus(uniqueId, path, action);
}
- return RightsStatus::RIGHTS_INVALID;
+ if (RightsStatus::RIGHTS_INVALID != rightsStatus) {
+ reportEngineMetrics(__func__, plugInId);
+ }
+ return rightsStatus;
}
status_t DrmManager::consumeRights(
@@ -307,6 +384,9 @@
IDrmEngine& rDrmEngine = mPlugInManager.getPlugIn(plugInId);
result = rDrmEngine.removeRights(uniqueId, path);
}
+ if (DRM_NO_ERROR == result) {
+ reportEngineMetrics(__func__, plugInId);
+ }
return result;
}
@@ -319,6 +399,7 @@
if (DRM_NO_ERROR != result) {
break;
}
+ reportEngineMetrics(__func__, plugInIdList[index]);
}
return result;
}
@@ -335,6 +416,7 @@
++mConvertId;
convertId = mConvertId;
mConvertSessionMap.add(convertId, &rDrmEngine);
+ reportEngineMetrics(__func__, plugInId, mimeType);
}
}
return convertId;
@@ -415,6 +497,7 @@
if (DRM_NO_ERROR == result) {
++mDecryptSessionId;
mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+ reportEngineMetrics(__func__, plugInId, String8(mime));
break;
}
}
@@ -443,6 +526,7 @@
if (DRM_NO_ERROR == result) {
++mDecryptSessionId;
mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+ reportEngineMetrics(__func__, plugInId, String8(mime));
break;
}
}
@@ -472,6 +556,7 @@
if (DRM_NO_ERROR == result) {
++mDecryptSessionId;
mDecryptSessionMap.add(mDecryptSessionId, &rDrmEngine);
+ reportEngineMetrics(__func__, plugInId, mimeType);
break;
}
}
diff --git a/drm/drmserver/DrmManager.h b/drm/drmserver/DrmManager.h
index 26222bc..75fc1a8 100644
--- a/drm/drmserver/DrmManager.h
+++ b/drm/drmserver/DrmManager.h
@@ -143,6 +143,9 @@
bool canHandle(int uniqueId, const String8& path);
+ void reportEngineMetrics(const char func[],
+ const String8& plugInId, const String8& mimeType = String8(""));
+
private:
enum {
kMaxNumUniqueIds = 0x1000,
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index d6db1d4..52c7438 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -2,9 +2,16 @@
// libmediadrm
//
-// TODO: change it back to cc_library_shared when MediaPlayer2 switches to
-// using NdkMediaDrm, instead of MediaDrm.java.
-cc_library {
+cc_library_headers {
+ name: "libmediadrm_headers",
+
+ export_include_dirs: [
+ "interface"
+ ],
+
+}
+
+cc_library_shared {
name: "libmediadrm",
srcs: [
@@ -19,14 +26,29 @@
"CryptoHal.cpp",
],
+ local_include_dirs: [
+ "include",
+ "interface"
+ ],
+
+ export_include_dirs: [
+ "include"
+ ],
+
+ header_libs: [
+ "libmedia_headers",
+ ],
+
shared_libs: [
"libbinder",
"libcutils",
"libdl",
"liblog",
+ "libmedia",
"libmediadrmmetrics_lite",
"libmediametrics",
"libmediautils",
+ "libresourcemanagerservice",
"libstagefright_foundation",
"libutils",
"android.hardware.drm@1.0",
@@ -34,7 +56,6 @@
"android.hardware.drm@1.2",
"libhidlallocatorutils",
"libhidlbase",
- "libhidltransport",
],
cflags: [
@@ -52,10 +73,17 @@
"protos/metrics.proto",
],
+ local_include_dirs: [
+ "include"
+ ],
+
proto: {
export_proto_headers: true,
type: "lite",
},
+ header_libs: [
+ "libmedia_headers",
+ ],
shared_libs: [
"android.hardware.drm@1.0",
"android.hardware.drm@1.1",
@@ -83,10 +111,17 @@
"protos/metrics.proto",
],
+ local_include_dirs: [
+ "include"
+ ],
+
proto: {
export_proto_headers: true,
type: "full",
},
+ header_libs: [
+ "libmedia_headers",
+ ],
shared_libs: [
"android.hardware.drm@1.0",
"android.hardware.drm@1.1",
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index 954608f..6a0e75e 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -118,7 +118,6 @@
CryptoHal::CryptoHal()
: mFactories(makeCryptoFactories()),
mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT),
- mNextBufferId(0),
mHeapSeqNum(0) {
}
@@ -260,17 +259,18 @@
using ::android::hardware::fromHeap;
using ::android::hardware::HidlMemory;
- if (heap == NULL) {
- ALOGE("setHeapBase(): heap is NULL");
+ if (heap == NULL || mHeapSeqNum < 0) {
+ ALOGE("setHeapBase(): heap %p mHeapSeqNum %d", heap.get(), mHeapSeqNum);
return -1;
}
Mutex::Autolock autoLock(mLock);
int32_t seqNum = mHeapSeqNum++;
+ uint32_t bufferId = static_cast<uint32_t>(seqNum);
sp<HidlMemory> hidlMemory = fromHeap(heap);
- mHeapBases.add(seqNum, HeapBase(mNextBufferId, heap->getSize()));
- Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, mNextBufferId++);
+ mHeapBases.add(seqNum, HeapBase(bufferId, heap->getSize()));
+ Return<void> hResult = mPlugin->setSharedBufferBase(*hidlMemory, bufferId);
ALOGE_IF(!hResult.isOk(), "setSharedBufferBase(): remote call failed");
return seqNum;
}
@@ -321,7 +321,11 @@
}
// memory must be within the address space of the heap
- if (memory->pointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset() ||
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ if (memory->unsecurePointer() != static_cast<uint8_t *>(heap->getBase()) + memory->offset() ||
heap->getSize() < memory->offset() + memory->size() ||
SIZE_MAX - memory->offset() < memory->size()) {
android_errorWriteLog(0x534e4554, "76221123");
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index 7cfe900..8a08a7b 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -25,6 +25,7 @@
#include <android/hardware/drm/1.2/types.h>
#include <android/hidl/manager/1.2/IServiceManager.h>
+#include <android/media/BnResourceManagerClient.h>
#include <hidl/ServiceManagement.h>
#include <media/EventMetric.h>
#include <media/PluginMetricsReporting.h>
@@ -293,28 +294,20 @@
}
}
-
Mutex DrmHal::mLock;
-struct DrmSessionClient : public DrmSessionClientInterface {
- explicit DrmSessionClient(DrmHal* drm) : mDrm(drm) {}
+struct DrmHal::DrmSessionClient : public android::media::BnResourceManagerClient {
+ explicit DrmSessionClient(DrmHal* drm, const Vector<uint8_t>& sessionId)
+ : mSessionId(sessionId),
+ mDrm(drm) {}
- virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
- sp<DrmHal> drm = mDrm.promote();
- if (drm == NULL) {
- return true;
- }
- status_t err = drm->closeSession(sessionId);
- if (err != OK) {
- return false;
- }
- drm->sendEvent(EventType::SESSION_RECLAIMED,
- toHidlVec(sessionId), hidl_vec<uint8_t>());
- return true;
- }
+ ::android::binder::Status reclaimResource(bool* _aidl_return) override;
+ ::android::binder::Status getName(::std::string* _aidl_return) override;
+
+ const Vector<uint8_t> mSessionId;
protected:
- virtual ~DrmSessionClient() {}
+ virtual ~DrmSessionClient();
private:
wp<DrmHal> mDrm;
@@ -322,9 +315,47 @@
DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
};
+::android::binder::Status DrmHal::DrmSessionClient::reclaimResource(bool* _aidl_return) {
+ sp<DrmHal> drm = mDrm.promote();
+ if (drm == NULL) {
+ *_aidl_return = true;
+ return ::android::binder::Status::ok();
+ }
+ status_t err = drm->closeSession(mSessionId);
+ if (err != OK) {
+ *_aidl_return = false;
+ return ::android::binder::Status::ok();
+ }
+ drm->sendEvent(EventType::SESSION_RECLAIMED,
+ toHidlVec(mSessionId), hidl_vec<uint8_t>());
+ *_aidl_return = true;
+ return ::android::binder::Status::ok();
+}
+
+::android::binder::Status DrmHal::DrmSessionClient::getName(::std::string* _aidl_return) {
+ String8 name;
+ sp<DrmHal> drm = mDrm.promote();
+ if (drm == NULL) {
+ name.append("<deleted>");
+ } else if (drm->getPropertyStringInternal(String8("vendor"), name) != OK
+ || name.isEmpty()) {
+ name.append("<Get vendor failed or is empty>");
+ }
+ name.append("[");
+ for (size_t i = 0; i < mSessionId.size(); ++i) {
+ name.appendFormat("%02x", mSessionId[i]);
+ }
+ name.append("]");
+ *_aidl_return = name;
+ return ::android::binder::Status::ok();
+}
+
+DrmHal::DrmSessionClient::~DrmSessionClient() {
+ DrmSessionManager::Instance()->removeSession(mSessionId);
+}
+
DrmHal::DrmHal()
- : mDrmSessionClient(new DrmSessionClient(this)),
- mFactories(makeDrmFactories()),
+ : mFactories(makeDrmFactories()),
mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {
}
@@ -333,14 +364,13 @@
auto openSessions = mOpenSessions;
for (size_t i = 0; i < openSessions.size(); i++) {
mLock.unlock();
- closeSession(openSessions[i]);
+ closeSession(openSessions[i]->mSessionId);
mLock.lock();
}
mOpenSessions.clear();
}
DrmHal::~DrmHal() {
- DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
}
void DrmHal::cleanup() {
@@ -746,9 +776,9 @@
} while (retry);
if (err == OK) {
- DrmSessionManager::Instance()->addSession(getCallingPid(),
- mDrmSessionClient, sessionId);
- mOpenSessions.push(sessionId);
+ sp<DrmSessionClient> client(new DrmSessionClient(this, sessionId));
+ DrmSessionManager::Instance()->addSession(getCallingPid(), client, sessionId);
+ mOpenSessions.push(client);
mMetrics.SetSessionStart(sessionId);
}
@@ -765,7 +795,7 @@
if (status == Status::OK) {
DrmSessionManager::Instance()->removeSession(sessionId);
for (size_t i = 0; i < mOpenSessions.size(); i++) {
- if (mOpenSessions[i] == sessionId) {
+ if (isEqualSessionId(mOpenSessions[i]->mSessionId, sessionId)) {
mOpenSessions.removeAt(i);
break;
}
@@ -893,9 +923,8 @@
status_t DrmHal::provideKeyResponse(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &response, Vector<uint8_t> &keySetId) {
Mutex::Autolock autoLock(mLock);
- EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
-
INIT_CHECK();
+ EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
@@ -1569,7 +1598,6 @@
void DrmHal::reportFrameworkMetrics() const
{
std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("mediadrm"));
- item->generateSessionID();
item->setPkgName(mMetrics.GetAppPackageName().c_str());
String8 vendor;
String8 description;
diff --git a/drm/libmediadrm/DrmSessionManager.cpp b/drm/libmediadrm/DrmSessionManager.cpp
index 375644c..0b91b85 100644
--- a/drm/libmediadrm/DrmSessionManager.cpp
+++ b/drm/libmediadrm/DrmSessionManager.cpp
@@ -18,16 +18,23 @@
#define LOG_TAG "DrmSessionManager"
#include <utils/Log.h>
+#include <android/media/IResourceManagerClient.h>
+#include <android/media/IResourceManagerService.h>
#include <binder/IPCThreadState.h>
#include <binder/IProcessInfoService.h>
#include <binder/IServiceManager.h>
-#include <media/stagefright/ProcessInfo.h>
-#include <mediadrm/DrmSessionClientInterface.h>
+#include <cutils/properties.h>
+#include <media/MediaResource.h>
#include <mediadrm/DrmSessionManager.h>
#include <unistd.h>
#include <utils/String8.h>
+#include <vector>
+
+#include "ResourceManagerService.h"
+
namespace android {
+using android::binder::Status;
static String8 GetSessionIdString(const Vector<uint8_t> &sessionId) {
String8 sessionIdStr;
@@ -37,6 +44,35 @@
return sessionIdStr;
}
+static std::vector<uint8_t> toStdVec(const Vector<uint8_t> &vector) {
+ const uint8_t *v = vector.array();
+ std::vector<uint8_t> vec(v, v + vector.size());
+ return vec;
+}
+
+static uint64_t toClientId(const sp<IResourceManagerClient>& drm) {
+ return reinterpret_cast<int64_t>(drm.get());
+}
+
+static std::vector<MediaResourceParcel> toResourceVec(
+ const Vector<uint8_t> &sessionId, int64_t value) {
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource::DrmSessionResource(toStdVec(sessionId), value));
+ return resources;
+}
+
+static sp<IResourceManagerService> getResourceManagerService() {
+ if (property_get_bool("persist.device_config.media_native.mediadrmserver", 1)) {
+ return new android::media::ResourceManagerService();
+ }
+ sp<IServiceManager> sm = defaultServiceManager();
+ if (sm == NULL) {
+ return NULL;
+ }
+ sp<IBinder> binder = sm->getService(String16("media.resource_manager"));
+ return interface_cast<IResourceManagerService>(binder);
+}
+
bool isEqualSessionId(const Vector<uint8_t> &sessionId1, const Vector<uint8_t> &sessionId2) {
if (sessionId1.size() != sessionId2.size()) {
return false;
@@ -51,189 +87,116 @@
sp<DrmSessionManager> DrmSessionManager::Instance() {
static sp<DrmSessionManager> drmSessionManager = new DrmSessionManager();
+ drmSessionManager->init();
return drmSessionManager;
}
DrmSessionManager::DrmSessionManager()
- : mProcessInfo(new ProcessInfo()),
- mTime(0) {}
+ : DrmSessionManager(getResourceManagerService()) {
+}
-DrmSessionManager::DrmSessionManager(sp<ProcessInfoInterface> processInfo)
- : mProcessInfo(processInfo),
- mTime(0) {}
+DrmSessionManager::DrmSessionManager(const sp<IResourceManagerService> &service)
+ : mService(service),
+ mInitialized(false) {
+ if (mService == NULL) {
+ ALOGE("Failed to init ResourceManagerService");
+ }
+}
-DrmSessionManager::~DrmSessionManager() {}
+DrmSessionManager::~DrmSessionManager() {
+ if (mService != NULL) {
+ IInterface::asBinder(mService)->unlinkToDeath(this);
+ }
+}
-void DrmSessionManager::addSession(
- int pid, const sp<DrmSessionClientInterface>& drm, const Vector<uint8_t> &sessionId) {
- ALOGV("addSession(pid %d, drm %p, sessionId %s)", pid, drm.get(),
+void DrmSessionManager::init() {
+ Mutex::Autolock lock(mLock);
+ if (mInitialized) {
+ return;
+ }
+ mInitialized = true;
+ if (mService != NULL) {
+ IInterface::asBinder(mService)->linkToDeath(this);
+ }
+}
+
+void DrmSessionManager::addSession(int pid,
+ const sp<IResourceManagerClient>& drm, const Vector<uint8_t> &sessionId) {
+ uid_t uid = IPCThreadState::self()->getCallingUid();
+ ALOGV("addSession(pid %d, uid %d, drm %p, sessionId %s)", pid, uid, drm.get(),
GetSessionIdString(sessionId).string());
Mutex::Autolock lock(mLock);
- SessionInfo info;
- info.drm = drm;
- info.sessionId = sessionId;
- info.timeStamp = getTime_l();
- ssize_t index = mSessionMap.indexOfKey(pid);
- if (index < 0) {
- // new pid
- SessionInfos infosForPid;
- infosForPid.push_back(info);
- mSessionMap.add(pid, infosForPid);
- } else {
- mSessionMap.editValueAt(index).push_back(info);
+ if (mService == NULL) {
+ return;
}
+
+ int64_t clientId = toClientId(drm);
+ mSessionMap[toStdVec(sessionId)] = (SessionInfo){pid, uid, clientId};
+ mService->addResource(pid, uid, clientId, drm, toResourceVec(sessionId, INT64_MAX));
}
void DrmSessionManager::useSession(const Vector<uint8_t> &sessionId) {
ALOGV("useSession(%s)", GetSessionIdString(sessionId).string());
Mutex::Autolock lock(mLock);
- for (size_t i = 0; i < mSessionMap.size(); ++i) {
- SessionInfos& infos = mSessionMap.editValueAt(i);
- for (size_t j = 0; j < infos.size(); ++j) {
- SessionInfo& info = infos.editItemAt(j);
- if (isEqualSessionId(sessionId, info.sessionId)) {
- info.timeStamp = getTime_l();
- return;
- }
- }
+ auto it = mSessionMap.find(toStdVec(sessionId));
+ if (mService == NULL || it == mSessionMap.end()) {
+ return;
}
+
+ auto info = it->second;
+ mService->addResource(info.pid, info.uid, info.clientId, NULL, toResourceVec(sessionId, -1));
}
void DrmSessionManager::removeSession(const Vector<uint8_t> &sessionId) {
ALOGV("removeSession(%s)", GetSessionIdString(sessionId).string());
Mutex::Autolock lock(mLock);
- for (size_t i = 0; i < mSessionMap.size(); ++i) {
- SessionInfos& infos = mSessionMap.editValueAt(i);
- for (size_t j = 0; j < infos.size(); ++j) {
- if (isEqualSessionId(sessionId, infos[j].sessionId)) {
- infos.removeAt(j);
- return;
- }
- }
+ auto it = mSessionMap.find(toStdVec(sessionId));
+ if (mService == NULL || it == mSessionMap.end()) {
+ return;
}
-}
-void DrmSessionManager::removeDrm(const sp<DrmSessionClientInterface>& drm) {
- ALOGV("removeDrm(%p)", drm.get());
-
- Mutex::Autolock lock(mLock);
- bool found = false;
- for (size_t i = 0; i < mSessionMap.size(); ++i) {
- SessionInfos& infos = mSessionMap.editValueAt(i);
- for (size_t j = 0; j < infos.size();) {
- if (infos[j].drm == drm) {
- ALOGV("removed session (%s)", GetSessionIdString(infos[j].sessionId).string());
- j = infos.removeAt(j);
- found = true;
- } else {
- ++j;
- }
- }
- if (found) {
- break;
- }
- }
+ auto info = it->second;
+ mService->removeResource(info.pid, info.clientId, toResourceVec(sessionId, INT64_MAX));
+ mSessionMap.erase(it);
}
bool DrmSessionManager::reclaimSession(int callingPid) {
ALOGV("reclaimSession(%d)", callingPid);
- sp<DrmSessionClientInterface> drm;
- Vector<uint8_t> sessionId;
- int lowestPriorityPid;
- int lowestPriority;
- {
- Mutex::Autolock lock(mLock);
- int callingPriority;
- if (!mProcessInfo->getPriority(callingPid, &callingPriority)) {
- return false;
- }
- if (!getLowestPriority_l(&lowestPriorityPid, &lowestPriority)) {
- return false;
- }
- if (lowestPriority <= callingPriority) {
- return false;
- }
+ // unlock early because reclaimResource might callback into removeSession
+ mLock.lock();
+ sp<IResourceManagerService> service(mService);
+ mLock.unlock();
- if (!getLeastUsedSession_l(lowestPriorityPid, &drm, &sessionId)) {
- return false;
- }
- }
-
- if (drm == NULL) {
+ if (service == NULL) {
return false;
}
- ALOGV("reclaim session(%s) opened by pid %d",
- GetSessionIdString(sessionId).string(), lowestPriorityPid);
-
- return drm->reclaimSession(sessionId);
+ // cannot update mSessionMap because we do not know which sessionId is reclaimed;
+ // we rely on IResourceManagerClient to removeSession in reclaimResource
+ Vector<uint8_t> dummy;
+ bool success;
+ Status status = service->reclaimResource(callingPid, toResourceVec(dummy, INT64_MAX), &success);
+ return status.isOk() && success;
}
-int64_t DrmSessionManager::getTime_l() {
- return mTime++;
+size_t DrmSessionManager::getSessionCount() const {
+ Mutex::Autolock lock(mLock);
+ return mSessionMap.size();
}
-bool DrmSessionManager::getLowestPriority_l(int* lowestPriorityPid, int* lowestPriority) {
- int pid = -1;
- int priority = -1;
- for (size_t i = 0; i < mSessionMap.size(); ++i) {
- if (mSessionMap.valueAt(i).size() == 0) {
- // no opened session by this process.
- continue;
- }
- int tempPid = mSessionMap.keyAt(i);
- int tempPriority;
- if (!mProcessInfo->getPriority(tempPid, &tempPriority)) {
- // shouldn't happen.
- return false;
- }
- if (pid == -1) {
- pid = tempPid;
- priority = tempPriority;
- } else {
- if (tempPriority > priority) {
- pid = tempPid;
- priority = tempPriority;
- }
- }
- }
- if (pid != -1) {
- *lowestPriorityPid = pid;
- *lowestPriority = priority;
- }
- return (pid != -1);
+bool DrmSessionManager::containsSession(const Vector<uint8_t>& sessionId) const {
+ Mutex::Autolock lock(mLock);
+ return mSessionMap.count(toStdVec(sessionId));
}
-bool DrmSessionManager::getLeastUsedSession_l(
- int pid, sp<DrmSessionClientInterface>* drm, Vector<uint8_t>* sessionId) {
- ssize_t index = mSessionMap.indexOfKey(pid);
- if (index < 0) {
- return false;
- }
-
- int leastUsedIndex = -1;
- int64_t minTs = LLONG_MAX;
- const SessionInfos& infos = mSessionMap.valueAt(index);
- for (size_t j = 0; j < infos.size(); ++j) {
- if (leastUsedIndex == -1) {
- leastUsedIndex = j;
- minTs = infos[j].timeStamp;
- } else {
- if (infos[j].timeStamp < minTs) {
- leastUsedIndex = j;
- minTs = infos[j].timeStamp;
- }
- }
- }
- if (leastUsedIndex != -1) {
- *drm = infos[leastUsedIndex].drm;
- *sessionId = infos[leastUsedIndex].sessionId;
- }
- return (leastUsedIndex != -1);
+void DrmSessionManager::binderDied(const wp<IBinder>& /*who*/) {
+ ALOGW("ResourceManagerService died.");
+ Mutex::Autolock lock(mLock);
+ mService.clear();
}
} // namespace android
diff --git a/drm/libmediadrm/PluginMetricsReporting.cpp b/drm/libmediadrm/PluginMetricsReporting.cpp
index 8cd6f96..098f07b 100644
--- a/drm/libmediadrm/PluginMetricsReporting.cpp
+++ b/drm/libmediadrm/PluginMetricsReporting.cpp
@@ -35,8 +35,6 @@
const String8& name,
const String8& appPackageName) {
std::unique_ptr<MediaAnalyticsItem> analyticsItem(MediaAnalyticsItem::create(name.c_str()));
- analyticsItem->generateSessionID();
-
std::string app_package_name(appPackageName.c_str(), appPackageName.size());
analyticsItem->setPkgName(app_package_name);
if (metrics.size() > 0) {
@@ -44,7 +42,7 @@
}
if (!analyticsItem->selfrecord()) {
- ALOGE("selfrecord() returned false. sessioId %" PRId64, analyticsItem->getSessionID());
+ ALOGE("%s: selfrecord() returned false", __func__);
}
return OK;
diff --git a/drm/libmediadrm/TEST_MAPPING b/drm/libmediadrm/TEST_MAPPING
new file mode 100644
index 0000000..bc15879
--- /dev/null
+++ b/drm/libmediadrm/TEST_MAPPING
@@ -0,0 +1,26 @@
+{
+ "presubmit": [
+ {
+ "name": "GtsMediaTestCases",
+ "options" : [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "include-filter": "com.google.android.media.gts.WidevineGenericOpsTests"
+ },
+ {
+ "include-filter": "com.google.android.media.gts.MediaDrmTest"
+ },
+ {
+ "include-filter": "com.google.android.media.gts.WidevineDashPolicyTests"
+ }
+ ]
+ }
+ ],
+ "imports": [
+ {
+ "path": "frameworks/av/drm/mediadrm/plugins"
+ }
+ ]
+}
diff --git a/media/libmedia/include/media/CryptoHal.h b/drm/libmediadrm/include/mediadrm/CryptoHal.h
similarity index 98%
rename from media/libmedia/include/media/CryptoHal.h
rename to drm/libmediadrm/include/mediadrm/CryptoHal.h
index 73c029f..9e61777 100644
--- a/media/libmedia/include/media/CryptoHal.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHal.h
@@ -97,7 +97,6 @@
};
KeyedVector<int32_t, HeapBase> mHeapBases;
- uint32_t mNextBufferId;
int32_t mHeapSeqNum;
Vector<sp<ICryptoFactory>> makeCryptoFactories();
diff --git a/media/libmedia/include/media/DrmHal.h b/drm/libmediadrm/include/mediadrm/DrmHal.h
similarity index 98%
rename from media/libmedia/include/media/DrmHal.h
rename to drm/libmediadrm/include/mediadrm/DrmHal.h
index bdf1b30..0431c93 100644
--- a/media/libmedia/include/media/DrmHal.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHal.h
@@ -28,6 +28,7 @@
#include <media/MediaAnalyticsItem.h>
#include <mediadrm/DrmMetrics.h>
+#include <mediadrm/DrmSessionManager.h>
#include <mediadrm/IDrm.h>
#include <mediadrm/IDrmClient.h>
#include <utils/threads.h>
@@ -59,6 +60,9 @@
struct DrmHal : public BnDrm,
public IBinder::DeathRecipient,
public IDrmPluginListener_V1_2 {
+
+ struct DrmSessionClient;
+
DrmHal();
virtual ~DrmHal();
@@ -193,8 +197,6 @@
private:
static Mutex mLock;
- sp<DrmSessionClientInterface> mDrmSessionClient;
-
sp<IDrmClient> mListener;
mutable Mutex mEventLock;
mutable Mutex mNotifyLock;
@@ -208,7 +210,7 @@
// Mutable to allow modification within GetPropertyByteArray.
mutable MediaDrmMetrics mMetrics;
- Vector<Vector<uint8_t>> mOpenSessions;
+ Vector<sp<DrmSessionClient>> mOpenSessions;
void closeOpenSessions();
void cleanup();
diff --git a/media/libmedia/include/media/DrmMetrics.h b/drm/libmediadrm/include/mediadrm/DrmMetrics.h
similarity index 100%
rename from media/libmedia/include/media/DrmMetrics.h
rename to drm/libmediadrm/include/mediadrm/DrmMetrics.h
diff --git a/media/libmedia/include/media/DrmPluginPath.h b/drm/libmediadrm/include/mediadrm/DrmPluginPath.h
similarity index 100%
rename from media/libmedia/include/media/DrmPluginPath.h
rename to drm/libmediadrm/include/mediadrm/DrmPluginPath.h
diff --git a/media/libmedia/include/media/DrmSessionClientInterface.h b/drm/libmediadrm/include/mediadrm/DrmSessionClientInterface.h
similarity index 100%
rename from media/libmedia/include/media/DrmSessionClientInterface.h
rename to drm/libmediadrm/include/mediadrm/DrmSessionClientInterface.h
diff --git a/media/libmedia/include/media/DrmSessionManager.h b/drm/libmediadrm/include/mediadrm/DrmSessionManager.h
similarity index 60%
rename from media/libmedia/include/media/DrmSessionManager.h
rename to drm/libmediadrm/include/mediadrm/DrmSessionManager.h
index ba27199..3258f7a 100644
--- a/media/libmedia/include/media/DrmSessionManager.h
+++ b/drm/libmediadrm/include/mediadrm/DrmSessionManager.h
@@ -18,56 +18,66 @@
#define DRM_SESSION_MANAGER_H_
+#include <binder/IBinder.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/RefBase.h>
#include <utils/KeyedVector.h>
#include <utils/threads.h>
#include <utils/Vector.h>
+#include <map>
+#include <utility>
+#include <vector>
+
namespace android {
class DrmSessionManagerTest;
-struct DrmSessionClientInterface;
-struct ProcessInfoInterface;
+
+namespace media {
+class IResourceManagerClient;
+class IResourceManagerService;
+}
+using android::media::IResourceManagerClient;
+using android::media::IResourceManagerService;
bool isEqualSessionId(const Vector<uint8_t> &sessionId1, const Vector<uint8_t> &sessionId2);
struct SessionInfo {
- sp<DrmSessionClientInterface> drm;
- Vector<uint8_t> sessionId;
- int64_t timeStamp;
+ pid_t pid;
+ uid_t uid;
+ int64_t clientId;
};
-typedef Vector<SessionInfo > SessionInfos;
-typedef KeyedVector<int, SessionInfos > PidSessionInfosMap;
+typedef std::map<std::vector<uint8_t>, SessionInfo> SessionInfoMap;
-struct DrmSessionManager : public RefBase {
+struct DrmSessionManager : public IBinder::DeathRecipient {
static sp<DrmSessionManager> Instance();
DrmSessionManager();
- explicit DrmSessionManager(sp<ProcessInfoInterface> processInfo);
+ explicit DrmSessionManager(const sp<IResourceManagerService> &service);
- void addSession(int pid, const sp<DrmSessionClientInterface>& drm, const Vector<uint8_t>& sessionId);
+ void addSession(int pid, const sp<IResourceManagerClient>& drm, const Vector<uint8_t>& sessionId);
void useSession(const Vector<uint8_t>& sessionId);
void removeSession(const Vector<uint8_t>& sessionId);
- void removeDrm(const sp<DrmSessionClientInterface>& drm);
bool reclaimSession(int callingPid);
+ // sanity check APIs
+ size_t getSessionCount() const;
+ bool containsSession(const Vector<uint8_t>& sessionId) const;
+
+ // implements DeathRecipient
+ virtual void binderDied(const wp<IBinder>& /*who*/);
+
protected:
virtual ~DrmSessionManager();
private:
- friend class DrmSessionManagerTest;
+ void init();
- int64_t getTime_l();
- bool getLowestPriority_l(int* lowestPriorityPid, int* lowestPriority);
- bool getLeastUsedSession_l(
- int pid, sp<DrmSessionClientInterface>* drm, Vector<uint8_t>* sessionId);
-
- sp<ProcessInfoInterface> mProcessInfo;
+ sp<IResourceManagerService> mService;
mutable Mutex mLock;
- PidSessionInfosMap mSessionMap;
- int64_t mTime;
+ SessionInfoMap mSessionMap;
+ bool mInitialized;
DISALLOW_EVIL_CONSTRUCTORS(DrmSessionManager);
};
diff --git a/media/libmedia/include/media/IDrm.h b/drm/libmediadrm/include/mediadrm/IDrm.h
similarity index 100%
rename from media/libmedia/include/media/IDrm.h
rename to drm/libmediadrm/include/mediadrm/IDrm.h
diff --git a/media/libmedia/include/media/IDrmClient.h b/drm/libmediadrm/include/mediadrm/IDrmClient.h
similarity index 100%
rename from media/libmedia/include/media/IDrmClient.h
rename to drm/libmediadrm/include/mediadrm/IDrmClient.h
diff --git a/media/libmedia/include/media/IMediaDrmService.h b/drm/libmediadrm/include/mediadrm/IMediaDrmService.h
similarity index 100%
rename from media/libmedia/include/media/IMediaDrmService.h
rename to drm/libmediadrm/include/mediadrm/IMediaDrmService.h
diff --git a/media/libmedia/include/media/SharedLibrary.h b/drm/libmediadrm/include/mediadrm/SharedLibrary.h
similarity index 100%
rename from media/libmedia/include/media/SharedLibrary.h
rename to drm/libmediadrm/include/mediadrm/SharedLibrary.h
diff --git a/media/libmedia/include/media/ICrypto.h b/drm/libmediadrm/interface/mediadrm/ICrypto.h
similarity index 100%
rename from media/libmedia/include/media/ICrypto.h
rename to drm/libmediadrm/interface/mediadrm/ICrypto.h
diff --git a/drm/libmediadrm/tests/Android.bp b/drm/libmediadrm/tests/Android.bp
index 9e0115e..2e39943 100644
--- a/drm/libmediadrm/tests/Android.bp
+++ b/drm/libmediadrm/tests/Android.bp
@@ -3,8 +3,8 @@
cc_test {
name: "CounterMetric_test",
srcs: ["CounterMetric_test.cpp"],
+ header_libs: ["libmedia_headers"],
shared_libs: ["libmediadrm"],
- include_dirs: ["frameworks/av/include/media"],
cflags: [
"-Werror",
"-Wall",
@@ -14,6 +14,9 @@
cc_test {
name: "DrmMetrics_test",
srcs: ["DrmMetrics_test.cpp"],
+ header_libs: [
+ "libmedia_headers"
+ ],
shared_libs: [
"android.hardware.drm@1.0",
"android.hardware.drm@1.1",
@@ -28,7 +31,7 @@
],
static_libs: ["libgmock"],
include_dirs: [
- "frameworks/av/include/media",
+ "frameworks/av/drm/libmediadrm/include",
],
cflags: [
// Suppress unused parameter and no error options. These cause problems
@@ -40,12 +43,14 @@
cc_test {
name: "EventMetric_test",
srcs: ["EventMetric_test.cpp"],
+ header_libs: [
+ "libmedia_headers"
+ ],
shared_libs: [
"liblog",
"libmediadrm",
"libutils",
],
- include_dirs: ["frameworks/av/include/media"],
cflags: [
"-Werror",
"-Wall",
diff --git a/drm/libmediadrm/tests/CounterMetric_test.cpp b/drm/libmediadrm/tests/CounterMetric_test.cpp
index 6bca0da..c2becb4 100644
--- a/drm/libmediadrm/tests/CounterMetric_test.cpp
+++ b/drm/libmediadrm/tests/CounterMetric_test.cpp
@@ -16,7 +16,7 @@
#include <gtest/gtest.h>
-#include "CounterMetric.h"
+#include <media/CounterMetric.h>
namespace android {
diff --git a/drm/libmediadrm/tests/EventMetric_test.cpp b/drm/libmediadrm/tests/EventMetric_test.cpp
index eb6c4f6..b3c3f62 100644
--- a/drm/libmediadrm/tests/EventMetric_test.cpp
+++ b/drm/libmediadrm/tests/EventMetric_test.cpp
@@ -16,7 +16,7 @@
#include <gtest/gtest.h>
-#include "EventMetric.h"
+#include <media/EventMetric.h>
namespace android {
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index bf35224..af7c367 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -97,7 +97,8 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
void *appData, CasPluginCallback callback)
- : mCallback(callback), mCallbackExt(NULL), mAppData(appData) {
+ : mCallback(callback), mCallbackExt(NULL), mStatusCallback(NULL),
+ mAppData(appData) {
ALOGV("CTOR");
}
@@ -112,6 +113,13 @@
ClearKeySessionLibrary::get()->destroyPlugin(this);
}
+status_t ClearKeyCasPlugin::setStatusCallback(
+ CasPluginStatusCallback callback) {
+ ALOGV("setStatusCallback");
+ mStatusCallback = callback;
+ return OK;
+}
+
status_t ClearKeyCasPlugin::setPrivateData(const CasData &/*data*/) {
ALOGV("setPrivateData");
@@ -135,6 +143,19 @@
return ClearKeySessionLibrary::get()->addSession(this, sessionId);
}
+status_t ClearKeyCasPlugin::openSession(uint32_t intent, uint32_t mode,
+ CasSessionId* sessionId) {
+ ALOGV("openSession with intent=%d, mode=%d", intent, mode);
+ // Echo the received information to the callback.
+ // Clear key plugin doesn't use any event, echo'ing for testing only.
+ if (mStatusCallback != NULL) {
+ mStatusCallback((void*)mAppData, intent, mode);
+ }
+
+ // Clear key plugin doesn't use intent and mode.
+ return ClearKeySessionLibrary::get()->addSession(this, sessionId);
+}
+
status_t ClearKeyCasPlugin::closeSession(const CasSessionId &sessionId) {
ALOGV("closeSession: sessionId=%s", sessionIdToString(sessionId).string());
std::shared_ptr<ClearKeyCasSession> session =
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
index f48d5b1..c6938e6 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.h
@@ -71,11 +71,17 @@
ClearKeyCasPlugin(void *appData, CasPluginCallbackExt callback);
virtual ~ClearKeyCasPlugin();
+ virtual status_t setStatusCallback(
+ CasPluginStatusCallback callback) override;
+
virtual status_t setPrivateData(
const CasData &data) override;
virtual status_t openSession(CasSessionId *sessionId) override;
+ virtual status_t openSession(uint32_t intent, uint32_t mode,
+ CasSessionId *sessionId) override;
+
virtual status_t closeSession(
const CasSessionId &sessionId) override;
@@ -105,6 +111,7 @@
std::unique_ptr<KeyFetcher> mKeyFetcher;
CasPluginCallback mCallback;
CasPluginCallbackExt mCallbackExt;
+ CasPluginStatusCallback mStatusCallback;
void* mAppData;
};
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
index eaa3390..cb69f91 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -89,7 +89,7 @@
// asset_id change. If it sends an EcmContainer with 2 Ecms with different
// asset_ids (old and new) then it might be best to prefetch the Emm.
if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
- ALOGW("Asset_id change from %llu to %" PRIu64, asset_.id(), *asset_id);
+ ALOGW("Asset_id change from %" PRIu64 " to %" PRIu64, asset_.id(), *asset_id);
asset_.Clear();
}
diff --git a/drm/mediacas/plugins/clearkey/ecm.cpp b/drm/mediacas/plugins/clearkey/ecm.cpp
index 9fde13a..b3b5218 100644
--- a/drm/mediacas/plugins/clearkey/ecm.cpp
+++ b/drm/mediacas/plugins/clearkey/ecm.cpp
@@ -17,6 +17,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "ecm"
+#include <inttypes.h>
+
#include "ecm.h"
#include "ecm_generator.h"
#include "protos/license_protos.pb.h"
@@ -76,7 +78,7 @@
return status;
}
if (asset.id() != asset_from_emm.id()) {
- ALOGE("Asset_id from Emm (%llu) does not match asset_id from Ecm (%llu).",
+ ALOGE("Asset_id from Emm (%" PRIu64 ") does not match asset_id from Ecm (%" PRIu64 ").",
asset_from_emm.id(), asset.id());
return CLEARKEY_STATUS_INVALID_PARAMETER;
}
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.cpp b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
index 2964791..f8bab0a 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.cpp
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.cpp
@@ -111,6 +111,12 @@
MockSessionLibrary::get()->destroyPlugin(this);
}
+status_t MockCasPlugin::setStatusCallback(
+ CasPluginStatusCallback /*callback*/) {
+ ALOGV("setStatusCallback");
+ return OK;
+}
+
status_t MockCasPlugin::setPrivateData(const CasData& /*data*/) {
ALOGV("setPrivateData");
return OK;
@@ -121,6 +127,13 @@
return MockSessionLibrary::get()->addSession(this, sessionId);
}
+status_t MockCasPlugin::openSession(uint32_t intent, uint32_t mode,
+ CasSessionId* sessionId) {
+ ALOGV("openSession with intent=%d, mode=%d", intent, mode);
+ // Clear key plugin doesn't use intent and mode.
+ return MockSessionLibrary::get()->addSession(this, sessionId);
+}
+
status_t MockCasPlugin::closeSession(const CasSessionId &sessionId) {
ALOGV("closeSession: sessionId=%s", arrayToString(sessionId).string());
Mutex::Autolock lock(mLock);
diff --git a/drm/mediacas/plugins/mock/MockCasPlugin.h b/drm/mediacas/plugins/mock/MockCasPlugin.h
index 74b540c..660fd44 100644
--- a/drm/mediacas/plugins/mock/MockCasPlugin.h
+++ b/drm/mediacas/plugins/mock/MockCasPlugin.h
@@ -65,11 +65,17 @@
MockCasPlugin();
virtual ~MockCasPlugin();
+ virtual status_t setStatusCallback(
+ CasPluginStatusCallback callback) override;
+
virtual status_t setPrivateData(
const CasData &data) override;
virtual status_t openSession(CasSessionId *sessionId) override;
+ virtual status_t openSession(uint32_t intent, uint32_t mode,
+ CasSessionId *sessionId) override;
+
virtual status_t closeSession(
const CasSessionId &sessionId) override;
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
new file mode 100644
index 0000000..7bd1568
--- /dev/null
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -0,0 +1,18 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsMediaTestCases",
+ "options" : [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "include-filter": "android.media.cts.MediaDrmClearkeyTest"
+ },
+ {
+ "include-filter": "android.media.cts.MediaDrmMetricsTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp
index caff393..121a4e2 100644
--- a/drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp
+++ b/drm/mediadrm/plugins/clearkey/default/InitDataParser.cpp
@@ -76,10 +76,21 @@
android::status_t InitDataParser::parsePssh(const Vector<uint8_t>& initData,
Vector<const uint8_t*>* keyIds) {
+ // Description of PSSH format:
+ // https://w3c.github.io/encrypted-media/format-registry/initdata/cenc.html
size_t readPosition = 0;
- // Validate size field
uint32_t expectedSize = initData.size();
+ const char psshIdentifier[4] = {'p', 's', 's', 'h'};
+ const uint8_t psshVersion1[4] = {1, 0, 0, 0};
+ uint32_t keyIdCount = 0;
+ size_t headerSize = sizeof(expectedSize) + sizeof(psshIdentifier) +
+ sizeof(psshVersion1) + kSystemIdSize + sizeof(keyIdCount);
+ if (initData.size() < headerSize) {
+ return android::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ // Validate size field
expectedSize = htonl(expectedSize);
if (memcmp(&initData[readPosition], &expectedSize,
sizeof(expectedSize)) != 0) {
@@ -88,7 +99,6 @@
readPosition += sizeof(expectedSize);
// Validate PSSH box identifier
- const char psshIdentifier[4] = {'p', 's', 's', 'h'};
if (memcmp(&initData[readPosition], psshIdentifier,
sizeof(psshIdentifier)) != 0) {
return android::ERROR_DRM_CANNOT_HANDLE;
@@ -96,7 +106,6 @@
readPosition += sizeof(psshIdentifier);
// Validate EME version number
- const uint8_t psshVersion1[4] = {1, 0, 0, 0};
if (memcmp(&initData[readPosition], psshVersion1,
sizeof(psshVersion1)) != 0) {
return android::ERROR_DRM_CANNOT_HANDLE;
@@ -110,12 +119,14 @@
readPosition += kSystemIdSize;
// Read key ID count
- uint32_t keyIdCount;
memcpy(&keyIdCount, &initData[readPosition], sizeof(keyIdCount));
keyIdCount = ntohl(keyIdCount);
readPosition += sizeof(keyIdCount);
- if (readPosition + ((uint64_t)keyIdCount * kKeyIdSize) !=
- initData.size() - sizeof(uint32_t)) {
+
+ uint64_t psshSize = 0;
+ if (__builtin_mul_overflow(keyIdCount, kKeyIdSize, &psshSize) ||
+ __builtin_add_overflow(readPosition, psshSize, &psshSize) ||
+ psshSize != initData.size() - sizeof(uint32_t) /* DataSize(0) */) {
return android::ERROR_DRM_CANNOT_HANDLE;
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/Android.bp b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
index e91e918..a153ce2 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/hidl/Android.bp
@@ -48,7 +48,6 @@
"libcrypto",
"libhidlbase",
"libhidlmemory",
- "libhidltransport",
"liblog",
"libprotobuf-cpp-lite",
"libutils",
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 23a35e5..f164f28 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -62,10 +62,8 @@
secure, keyId, iv, mode, pattern, subSamples, source, offset, destination,
[&](Status_V1_2 hStatus, uint32_t hBytesWritten, hidl_string hDetailedError) {
status = toStatus_1_0(hStatus);
- if (status == Status::OK) {
- bytesWritten = hBytesWritten;
- detailedError = hDetailedError;
- }
+ bytesWritten = hBytesWritten;
+ detailedError = hDetailedError;
}
);
@@ -109,6 +107,10 @@
"destination decrypt buffer base not set");
return Void();
}
+ } else {
+ _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
+ "destination type not supported");
+ return Void();
}
sp<IMemory> sourceBase = mSharedBufferMap[source.bufferId];
@@ -126,38 +128,45 @@
(static_cast<void *>(sourceBase->getPointer()));
uint8_t* srcPtr = static_cast<uint8_t *>(base + source.offset + offset);
void* destPtr = NULL;
- if (destination.type == BufferType::SHARED_MEMORY) {
- const SharedBuffer& destBuffer = destination.nonsecureMemory;
- sp<IMemory> destBase = mSharedBufferMap[destBuffer.bufferId];
- if (destBase == nullptr) {
- _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "destination is a nullptr");
- return Void();
- }
-
- if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
- _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
- return Void();
- }
- destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
- } else if (destination.type == BufferType::NATIVE_HANDLE) {
- native_handle_t *handle = const_cast<native_handle_t *>(
- destination.secureMemory.getNativeHandle());
- destPtr = static_cast<void *>(handle);
+ // destination.type == BufferType::SHARED_MEMORY
+ const SharedBuffer& destBuffer = destination.nonsecureMemory;
+ sp<IMemory> destBase = mSharedBufferMap[destBuffer.bufferId];
+ if (destBase == nullptr) {
+ _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "destination is a nullptr");
+ return Void();
}
+ if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
+ _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
+ return Void();
+ }
+ destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
+
+
// Calculate the output buffer size and determine if any subsamples are
// encrypted.
size_t destSize = 0;
bool haveEncryptedSubsamples = false;
for (size_t i = 0; i < subSamples.size(); i++) {
const SubSample &subSample = subSamples[i];
- destSize += subSample.numBytesOfClearData;
- destSize += subSample.numBytesOfEncryptedData;
+ if (__builtin_add_overflow(destSize, subSample.numBytesOfClearData, &destSize)) {
+ _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample clear size overflow");
+ return Void();
+ }
+ if (__builtin_add_overflow(destSize, subSample.numBytesOfEncryptedData, &destSize)) {
+ _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample encrypted size overflow");
+ return Void();
+ }
if (subSample.numBytesOfEncryptedData > 0) {
haveEncryptedSubsamples = true;
}
}
+ if (destSize > destBuffer.size) {
+ _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "subsample sum too large");
+ return Void();
+ }
+
if (mode == Mode::UNENCRYPTED) {
if (haveEncryptedSubsamples) {
_hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0,
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index d74bc53..942ea7d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -111,6 +111,8 @@
// The content in this secure stop is implementation dependent, the clearkey
// secureStop does not serve as a reference implementation.
void DrmPlugin::installSecureStop(const hidl_vec<uint8_t>& sessionId) {
+ Mutex::Autolock lock(mSecureStopLock);
+
ClearkeySecureStop clearkeySecureStop;
clearkeySecureStop.id = uint32ToVector(++mNextSecureStopId);
clearkeySecureStop.data.assign(sessionId.begin(), sessionId.end());
@@ -744,6 +746,7 @@
}
Return<void> DrmPlugin::getSecureStops(getSecureStops_cb _hidl_cb) {
+ mSecureStopLock.lock();
std::vector<SecureStop> stops;
for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
ClearkeySecureStop clearkeyStop = itr->second;
@@ -755,26 +758,32 @@
stop.opaqueData = toHidlVec(stopVec);
stops.push_back(stop);
}
+ mSecureStopLock.unlock();
+
_hidl_cb(Status::OK, stops);
return Void();
}
Return<void> DrmPlugin::getSecureStop(const hidl_vec<uint8_t>& secureStopId,
getSecureStop_cb _hidl_cb) {
- SecureStop stop;
+ std::vector<uint8_t> stopVec;
+
+ mSecureStopLock.lock();
auto itr = mSecureStops.find(toVector(secureStopId));
if (itr != mSecureStops.end()) {
ClearkeySecureStop clearkeyStop = itr->second;
- std::vector<uint8_t> stopVec;
stopVec.insert(stopVec.end(), clearkeyStop.id.begin(), clearkeyStop.id.end());
stopVec.insert(stopVec.end(), clearkeyStop.data.begin(), clearkeyStop.data.end());
+ }
+ mSecureStopLock.unlock();
+ SecureStop stop;
+ if (!stopVec.empty()) {
stop.opaqueData = toHidlVec(stopVec);
_hidl_cb(Status::OK, stop);
} else {
_hidl_cb(Status::BAD_VALUE, stop);
}
-
return Void();
}
@@ -787,51 +796,73 @@
}
Return<void> DrmPlugin::getSecureStopIds(getSecureStopIds_cb _hidl_cb) {
+ mSecureStopLock.lock();
std::vector<SecureStopId> ids;
for (auto itr = mSecureStops.begin(); itr != mSecureStops.end(); ++itr) {
ids.push_back(itr->first);
}
+ mSecureStopLock.unlock();
_hidl_cb(Status::OK, toHidlVec(ids));
return Void();
}
Return<Status> DrmPlugin::releaseSecureStops(const SecureStopRelease& ssRelease) {
- if (ssRelease.opaqueData.size() == 0) {
+ // OpaqueData starts with 4 byte decimal integer string
+ const size_t kFourBytesOffset = 4;
+ if (ssRelease.opaqueData.size() < kFourBytesOffset) {
+ ALOGE("Invalid secureStopRelease length");
return Status::BAD_VALUE;
}
Status status = Status::OK;
std::vector<uint8_t> input = toVector(ssRelease.opaqueData);
+ if (input.size() < kSecureStopIdSize + kFourBytesOffset) {
+ // The minimum size of SecureStopRelease has to contain
+ // a 4 bytes count and one secureStop id
+ ALOGE("Total size of secureStops is too short");
+ return Status::BAD_VALUE;
+ }
+
// The format of opaqueData is shared between the server
// and the drm service. The clearkey implementation consists of:
// count - number of secure stops
// list of fixed length secure stops
- size_t countBufferSize = sizeof(uint32_t);
uint32_t count = 0;
sscanf(reinterpret_cast<char*>(input.data()), "%04" PRIu32, &count);
// Avoid divide by 0 below.
if (count == 0) {
+ ALOGE("Invalid 0 secureStop count");
return Status::BAD_VALUE;
}
- size_t secureStopSize = (input.size() - countBufferSize) / count;
- uint8_t buffer[secureStopSize];
- size_t offset = countBufferSize; // skip the count
+ // Computes the fixed length secureStop size
+ size_t secureStopSize = (input.size() - kFourBytesOffset) / count;
+ if (secureStopSize < kSecureStopIdSize) {
+ // A valid secureStop contains the id plus data
+ ALOGE("Invalid secureStop size");
+ return Status::BAD_VALUE;
+ }
+ uint8_t* buffer = new uint8_t[secureStopSize];
+ size_t offset = kFourBytesOffset; // skip the count
for (size_t i = 0; i < count; ++i, offset += secureStopSize) {
memcpy(buffer, input.data() + offset, secureStopSize);
- std::vector<uint8_t> id(buffer, buffer + kSecureStopIdSize);
+ // A secureStop contains id+data, we only use the id for removal
+ std::vector<uint8_t> id(buffer, buffer + kSecureStopIdSize);
status = removeSecureStop(toHidlVec(id));
if (Status::OK != status) break;
}
+ delete[] buffer;
return status;
}
Return<Status> DrmPlugin::removeSecureStop(const hidl_vec<uint8_t>& secureStopId) {
+ Mutex::Autolock lock(mSecureStopLock);
+
if (1 != mSecureStops.erase(toVector(secureStopId))) {
return Status::BAD_VALUE;
}
@@ -839,6 +870,8 @@
}
Return<Status> DrmPlugin::removeAllSecureStops() {
+ Mutex::Autolock lock(mSecureStopLock);
+
mSecureStops.clear();
mNextSecureStopId = kSecureStopIdStart;
return Status::OK;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp b/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp
index b988ce0..8513434 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/InitDataParser.cpp
@@ -85,10 +85,21 @@
Status InitDataParser::parsePssh(const std::vector<uint8_t>& initData,
std::vector<const uint8_t*>* keyIds) {
+ // Description of PSSH format:
+ // https://w3c.github.io/encrypted-media/format-registry/initdata/cenc.html
size_t readPosition = 0;
- // Validate size field
uint32_t expectedSize = initData.size();
+ const char psshIdentifier[4] = {'p', 's', 's', 'h'};
+ const uint8_t psshVersion1[4] = {1, 0, 0, 0};
+ uint32_t keyIdCount = 0;
+ size_t headerSize = sizeof(expectedSize) + sizeof(psshIdentifier) +
+ sizeof(psshVersion1) + kSystemIdSize + sizeof(keyIdCount);
+ if (initData.size() < headerSize) {
+ return Status::ERROR_DRM_CANNOT_HANDLE;
+ }
+
+ // Validate size field
expectedSize = htonl(expectedSize);
if (memcmp(&initData[readPosition], &expectedSize,
sizeof(expectedSize)) != 0) {
@@ -97,7 +108,6 @@
readPosition += sizeof(expectedSize);
// Validate PSSH box identifier
- const char psshIdentifier[4] = {'p', 's', 's', 'h'};
if (memcmp(&initData[readPosition], psshIdentifier,
sizeof(psshIdentifier)) != 0) {
return Status::ERROR_DRM_CANNOT_HANDLE;
@@ -105,7 +115,6 @@
readPosition += sizeof(psshIdentifier);
// Validate EME version number
- const uint8_t psshVersion1[4] = {1, 0, 0, 0};
if (memcmp(&initData[readPosition], psshVersion1,
sizeof(psshVersion1)) != 0) {
return Status::ERROR_DRM_CANNOT_HANDLE;
@@ -119,12 +128,14 @@
readPosition += kSystemIdSize;
// Read key ID count
- uint32_t keyIdCount;
memcpy(&keyIdCount, &initData[readPosition], sizeof(keyIdCount));
keyIdCount = ntohl(keyIdCount);
readPosition += sizeof(keyIdCount);
- if (readPosition + ((uint64_t)keyIdCount * kKeyIdSize) !=
- initData.size() - sizeof(uint32_t)) {
+
+ uint64_t psshSize = 0;
+ if (__builtin_mul_overflow(keyIdCount, kKeyIdSize, &psshSize) ||
+ __builtin_add_overflow(readPosition, psshSize, &psshSize) ||
+ psshSize != initData.size() - sizeof(uint32_t) /* DataSize(0) */) {
return Status::ERROR_DRM_CANNOT_HANDLE;
}
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
index f294d4d..3de7589 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/DrmPlugin.h
@@ -416,6 +416,7 @@
}
DeviceFiles mFileHandle;
+ Mutex mSecureStopLock;
CLEARKEY_DISALLOW_COPY_AND_ASSIGN_AND_NEW(DrmPlugin);
};
diff --git a/include/camera b/include/camera
deleted file mode 120000
index 00848e3..0000000
--- a/include/camera
+++ /dev/null
@@ -1 +0,0 @@
-../camera/include/camera/
\ No newline at end of file
diff --git a/include/cpustats b/include/cpustats
deleted file mode 120000
index 4a02d41..0000000
--- a/include/cpustats
+++ /dev/null
@@ -1 +0,0 @@
-../media/libcpustats/include/cpustats/
\ No newline at end of file
diff --git a/include/media/AVSyncSettings.h b/include/media/AVSyncSettings.h
deleted file mode 120000
index bbe211f..0000000
--- a/include/media/AVSyncSettings.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/AVSyncSettings.h
\ No newline at end of file
diff --git a/include/media/AudioAttributes.h b/include/media/AudioAttributes.h
deleted file mode 120000
index 27ba471..0000000
--- a/include/media/AudioAttributes.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioAttributes.h
\ No newline at end of file
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
deleted file mode 120000
index c4d6e79..0000000
--- a/include/media/AudioBufferProvider.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/AudioClient.h b/include/media/AudioClient.h
deleted file mode 120000
index a0530e4..0000000
--- a/include/media/AudioClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioClient.h
\ No newline at end of file
diff --git a/include/media/AudioCommonTypes.h b/include/media/AudioCommonTypes.h
deleted file mode 120000
index ae7c99a..0000000
--- a/include/media/AudioCommonTypes.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioCommonTypes.h
\ No newline at end of file
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
deleted file mode 120000
index bf52955..0000000
--- a/include/media/AudioEffect.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioEffect.h
\ No newline at end of file
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
deleted file mode 120000
index 68f54c9..0000000
--- a/include/media/AudioIoDescriptor.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioIoDescriptor.h
\ No newline at end of file
diff --git a/include/media/AudioMixer.h b/include/media/AudioMixer.h
deleted file mode 120000
index 85ee950..0000000
--- a/include/media/AudioMixer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioprocessing/include/media/AudioMixer.h
\ No newline at end of file
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
deleted file mode 120000
index a5889e5..0000000
--- a/include/media/AudioParameter.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioParameter.h
\ No newline at end of file
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
deleted file mode 120000
index dd4cd53..0000000
--- a/include/media/AudioPolicy.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioPolicy.h
\ No newline at end of file
diff --git a/include/media/AudioProductStrategy.h b/include/media/AudioProductStrategy.h
deleted file mode 120000
index 6bfaf11..0000000
--- a/include/media/AudioProductStrategy.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioProductStrategy.h
\ No newline at end of file
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
deleted file mode 120000
index 7939dd3..0000000
--- a/include/media/AudioRecord.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioRecord.h
\ No newline at end of file
diff --git a/include/media/AudioResampler.h b/include/media/AudioResampler.h
deleted file mode 120000
index 771f1b8..0000000
--- a/include/media/AudioResampler.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioprocessing/include/media/AudioResampler.h
\ No newline at end of file
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
deleted file mode 120000
index 9fad2b7..0000000
--- a/include/media/AudioSystem.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioSystem.h
\ No newline at end of file
diff --git a/include/media/AudioTimestamp.h b/include/media/AudioTimestamp.h
deleted file mode 120000
index b6b9278..0000000
--- a/include/media/AudioTimestamp.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioTimestamp.h
\ No newline at end of file
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
deleted file mode 120000
index 303bfcd..0000000
--- a/include/media/AudioTrack.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioTrack.h
\ No newline at end of file
diff --git a/include/media/AudioVolumeGroup.h b/include/media/AudioVolumeGroup.h
deleted file mode 120000
index d6f1c99..0000000
--- a/include/media/AudioVolumeGroup.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/AudioVolumeGroup.h
\ No newline at end of file
diff --git a/include/media/BufferProviders.h b/include/media/BufferProviders.h
deleted file mode 120000
index 778e1d8..0000000
--- a/include/media/BufferProviders.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioprocessing/include/media/BufferProviders.h
\ No newline at end of file
diff --git a/include/media/BufferingSettings.h b/include/media/BufferingSettings.h
deleted file mode 120000
index 409203f..0000000
--- a/include/media/BufferingSettings.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/BufferingSettings.h
\ No newline at end of file
diff --git a/include/media/CharacterEncodingDetector.h b/include/media/CharacterEncodingDetector.h
deleted file mode 120000
index 2b28387..0000000
--- a/include/media/CharacterEncodingDetector.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/CharacterEncodingDetector.h
\ No newline at end of file
diff --git a/include/media/CounterMetric.h b/include/media/CounterMetric.h
deleted file mode 120000
index baba043..0000000
--- a/include/media/CounterMetric.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/CounterMetric.h
\ No newline at end of file
diff --git a/include/media/EventLog.h b/include/media/EventLog.h
deleted file mode 120000
index 9b2c4bf..0000000
--- a/include/media/EventLog.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/utils/include/mediautils/EventLog.h
\ No newline at end of file
diff --git a/include/media/EventMetric.h b/include/media/EventMetric.h
deleted file mode 120000
index 5707d9a..0000000
--- a/include/media/EventMetric.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/EventMetric.h
\ No newline at end of file
diff --git a/include/media/ExtendedAudioBufferProvider.h b/include/media/ExtendedAudioBufferProvider.h
deleted file mode 120000
index 99d3c13..0000000
--- a/include/media/ExtendedAudioBufferProvider.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/ExtendedAudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
deleted file mode 120000
index ef6f5be..0000000
--- a/include/media/IAudioFlinger.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioFlinger.h
\ No newline at end of file
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
deleted file mode 120000
index dc481e8..0000000
--- a/include/media/IAudioFlingerClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioFlingerClient.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
deleted file mode 120000
index 08101fc..0000000
--- a/include/media/IAudioPolicyService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioPolicyService.h
\ No newline at end of file
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
deleted file mode 120000
index 0d4b3e7..0000000
--- a/include/media/IAudioPolicyServiceClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioPolicyServiceClient.h
\ No newline at end of file
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
deleted file mode 120000
index 7bab1fd..0000000
--- a/include/media/IAudioTrack.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IAudioTrack.h
\ No newline at end of file
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
deleted file mode 120000
index 41cdd8b..0000000
--- a/include/media/IDataSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IDataSource.h
\ No newline at end of file
diff --git a/include/media/IEffect.h b/include/media/IEffect.h
deleted file mode 120000
index 2fb8bfb..0000000
--- a/include/media/IEffect.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IEffect.h
\ No newline at end of file
diff --git a/include/media/IEffectClient.h b/include/media/IEffectClient.h
deleted file mode 120000
index b4e39cf..0000000
--- a/include/media/IEffectClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/IEffectClient.h
\ No newline at end of file
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
deleted file mode 120000
index 2186312..0000000
--- a/include/media/IMediaCodecList.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaCodecList.h
\ No newline at end of file
diff --git a/include/media/IMediaDeathNotifier.h b/include/media/IMediaDeathNotifier.h
deleted file mode 120000
index ce3b8f0..0000000
--- a/include/media/IMediaDeathNotifier.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaDeathNotifier.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractor.h b/include/media/IMediaExtractor.h
deleted file mode 120000
index 8708c8c..0000000
--- a/include/media/IMediaExtractor.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaExtractor.h
\ No newline at end of file
diff --git a/include/media/IMediaExtractorService.h b/include/media/IMediaExtractorService.h
deleted file mode 120000
index 3ee9f1e..0000000
--- a/include/media/IMediaExtractorService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaExtractorService.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPConnection.h b/include/media/IMediaHTTPConnection.h
deleted file mode 120000
index 0970c15..0000000
--- a/include/media/IMediaHTTPConnection.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaHTTPConnection.h
\ No newline at end of file
diff --git a/include/media/IMediaHTTPService.h b/include/media/IMediaHTTPService.h
deleted file mode 120000
index b90c34f..0000000
--- a/include/media/IMediaHTTPService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaHTTPService.h
\ No newline at end of file
diff --git a/include/media/IMediaLogService.h b/include/media/IMediaLogService.h
deleted file mode 120000
index 245a29d..0000000
--- a/include/media/IMediaLogService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaLogService.h
\ No newline at end of file
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
deleted file mode 120000
index 959df1a..0000000
--- a/include/media/IMediaMetadataRetriever.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaMetadataRetriever.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
deleted file mode 120000
index 9414d37..0000000
--- a/include/media/IMediaPlayer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaPlayer.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerClient.h b/include/media/IMediaPlayerClient.h
deleted file mode 120000
index b6547ce..0000000
--- a/include/media/IMediaPlayerClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaPlayerClient.h
\ No newline at end of file
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
deleted file mode 120000
index 89c96cd..0000000
--- a/include/media/IMediaPlayerService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaPlayerService.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
deleted file mode 120000
index 57d192c..0000000
--- a/include/media/IMediaRecorder.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaRecorder.h
\ No newline at end of file
diff --git a/include/media/IMediaRecorderClient.h b/include/media/IMediaRecorderClient.h
deleted file mode 120000
index 89f4359..0000000
--- a/include/media/IMediaRecorderClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaRecorderClient.h
\ No newline at end of file
diff --git a/include/media/IMediaSource.h b/include/media/IMediaSource.h
deleted file mode 120000
index 1330ad3..0000000
--- a/include/media/IMediaSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaSource.h
\ No newline at end of file
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
deleted file mode 120000
index 6d5b375..0000000
--- a/include/media/IOMX.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IOMX.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplay.h b/include/media/IRemoteDisplay.h
deleted file mode 120000
index 4b0cf10..0000000
--- a/include/media/IRemoteDisplay.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IRemoteDisplay.h
\ No newline at end of file
diff --git a/include/media/IRemoteDisplayClient.h b/include/media/IRemoteDisplayClient.h
deleted file mode 120000
index f29a2ee..0000000
--- a/include/media/IRemoteDisplayClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IRemoteDisplayClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerClient.h b/include/media/IResourceManagerClient.h
deleted file mode 120000
index 100af9b..0000000
--- a/include/media/IResourceManagerClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IResourceManagerClient.h
\ No newline at end of file
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
deleted file mode 120000
index 9b389c6..0000000
--- a/include/media/IResourceManagerService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IResourceManagerService.h
\ No newline at end of file
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
deleted file mode 120000
index 4943af9..0000000
--- a/include/media/IStreamSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IStreamSource.h
\ No newline at end of file
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
deleted file mode 120000
index 5483fda..0000000
--- a/include/media/JetPlayer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/JetPlayer.h
\ No newline at end of file
diff --git a/include/media/LinearMap.h b/include/media/LinearMap.h
deleted file mode 120000
index 30d4ca8..0000000
--- a/include/media/LinearMap.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/LinearMap.h
\ No newline at end of file
diff --git a/include/media/MediaCodecBuffer.h b/include/media/MediaCodecBuffer.h
deleted file mode 120000
index 8c9aa76..0000000
--- a/include/media/MediaCodecBuffer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaCodecBuffer.h
\ No newline at end of file
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
deleted file mode 120000
index ff44ce4..0000000
--- a/include/media/MediaCodecInfo.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaCodecInfo.h
\ No newline at end of file
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
deleted file mode 120000
index 1c53511..0000000
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaMetadataRetrieverInterface.h
\ No newline at end of file
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
deleted file mode 120000
index 651c6e6..0000000
--- a/include/media/MediaProfiles.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaProfiles.h
\ No newline at end of file
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
deleted file mode 120000
index e40f992..0000000
--- a/include/media/MediaRecorderBase.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaRecorderBase.h
\ No newline at end of file
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
deleted file mode 120000
index 91346aa..0000000
--- a/include/media/MediaResource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaResource.h
\ No newline at end of file
diff --git a/include/media/MediaResourcePolicy.h b/include/media/MediaResourcePolicy.h
deleted file mode 120000
index 5d165ee..0000000
--- a/include/media/MediaResourcePolicy.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MediaResourcePolicy.h
\ No newline at end of file
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
deleted file mode 120000
index 504173e..0000000
--- a/include/media/MemoryLeakTrackUtil.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MemoryLeakTrackUtil.h
\ No newline at end of file
diff --git a/include/media/Metadata.h b/include/media/Metadata.h
deleted file mode 120000
index e421168..0000000
--- a/include/media/Metadata.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/Metadata.h
\ No newline at end of file
diff --git a/include/media/MidiDeviceInfo.h b/include/media/MidiDeviceInfo.h
deleted file mode 120000
index 95da7cf..0000000
--- a/include/media/MidiDeviceInfo.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MidiDeviceInfo.h
\ No newline at end of file
diff --git a/include/media/MidiIoWrapper.h b/include/media/MidiIoWrapper.h
deleted file mode 120000
index 786ec3d..0000000
--- a/include/media/MidiIoWrapper.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/MidiIoWrapper.h
\ No newline at end of file
diff --git a/include/media/Modulo.h b/include/media/Modulo.h
deleted file mode 120000
index 989c4cb..0000000
--- a/include/media/Modulo.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/Modulo.h
\ No newline at end of file
diff --git a/include/media/OMXBuffer.h b/include/media/OMXBuffer.h
deleted file mode 120000
index 00db207..0000000
--- a/include/media/OMXBuffer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/OMXBuffer.h
\ No newline at end of file
diff --git a/include/media/OMXFenceParcelable.h b/include/media/OMXFenceParcelable.h
deleted file mode 120000
index c4c1b0a..0000000
--- a/include/media/OMXFenceParcelable.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/OMXFenceParcelable.h
\ No newline at end of file
diff --git a/include/media/PluginLoader.h b/include/media/PluginLoader.h
deleted file mode 120000
index 9101735..0000000
--- a/include/media/PluginLoader.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/PluginLoader.h
\ No newline at end of file
diff --git a/include/media/PluginMetricsReporting.h b/include/media/PluginMetricsReporting.h
deleted file mode 120000
index 7d9a7a0..0000000
--- a/include/media/PluginMetricsReporting.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/PluginMetricsReporting.h
\ No newline at end of file
diff --git a/include/media/RecordBufferConverter.h b/include/media/RecordBufferConverter.h
deleted file mode 120000
index 2d7bc0c..0000000
--- a/include/media/RecordBufferConverter.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/RecordBufferConverter.h
\ No newline at end of file
diff --git a/include/media/RingBuffer.h b/include/media/RingBuffer.h
deleted file mode 120000
index 9af28d5..0000000
--- a/include/media/RingBuffer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/RingBuffer.h
\ No newline at end of file
diff --git a/include/media/StringArray.h b/include/media/StringArray.h
deleted file mode 120000
index 616ce6c..0000000
--- a/include/media/StringArray.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/StringArray.h
\ No newline at end of file
diff --git a/include/media/TimeCheck.h b/include/media/TimeCheck.h
deleted file mode 120000
index 85e17f9..0000000
--- a/include/media/TimeCheck.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/utils/include/mediautils/TimeCheck.h
\ No newline at end of file
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
deleted file mode 120000
index 33df0e3..0000000
--- a/include/media/ToneGenerator.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libaudioclient/include/media/ToneGenerator.h
\ No newline at end of file
diff --git a/include/media/TypeConverter.h b/include/media/TypeConverter.h
deleted file mode 120000
index 837af44..0000000
--- a/include/media/TypeConverter.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/TypeConverter.h
\ No newline at end of file
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
deleted file mode 120000
index ed2ec15..0000000
--- a/include/media/Visualizer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/Visualizer.h
\ No newline at end of file
diff --git a/include/media/convert.h b/include/media/convert.h
deleted file mode 120000
index cb0d00d..0000000
--- a/include/media/convert.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/convert.h
\ No newline at end of file
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
deleted file mode 120000
index b401bab..0000000
--- a/include/media/mediametadataretriever.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/mediametadataretriever.h
\ No newline at end of file
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
deleted file mode 120000
index 06d537b..0000000
--- a/include/media/mediaplayer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/mediaplayer.h
\ No newline at end of file
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
deleted file mode 120000
index a24deb3..0000000
--- a/include/media/mediarecorder.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/mediarecorder.h
\ No newline at end of file
diff --git a/include/media/mediascanner.h b/include/media/mediascanner.h
deleted file mode 120000
index 91479e0..0000000
--- a/include/media/mediascanner.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/mediascanner.h
\ No newline at end of file
diff --git a/include/media/nbaio/AudioBufferProviderSource.h b/include/media/nbaio/AudioBufferProviderSource.h
deleted file mode 120000
index 55841e7..0000000
--- a/include/media/nbaio/AudioBufferProviderSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/AudioBufferProviderSource.h
\ No newline at end of file
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
deleted file mode 120000
index f5bcc76..0000000
--- a/include/media/nbaio/AudioStreamInSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/AudioStreamInSource.h
\ No newline at end of file
diff --git a/include/media/nbaio/LibsndfileSink.h b/include/media/nbaio/LibsndfileSink.h
deleted file mode 120000
index 8a13b6c..0000000
--- a/include/media/nbaio/LibsndfileSink.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/LibsndfileSink.h
\ No newline at end of file
diff --git a/include/media/nbaio/LibsndfileSource.h b/include/media/nbaio/LibsndfileSource.h
deleted file mode 120000
index 2750fde..0000000
--- a/include/media/nbaio/LibsndfileSource.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/LibsndfileSource.h
\ No newline at end of file
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
deleted file mode 120000
index 4ea43be..0000000
--- a/include/media/nbaio/MonoPipe.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include_mono/media/nbaio/MonoPipe.h
\ No newline at end of file
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
deleted file mode 120000
index 30f426c..0000000
--- a/include/media/nbaio/MonoPipeReader.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include_mono/media/nbaio/MonoPipeReader.h
\ No newline at end of file
diff --git a/include/media/nbaio/Pipe.h b/include/media/nbaio/Pipe.h
deleted file mode 120000
index a4bbbc9..0000000
--- a/include/media/nbaio/Pipe.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/Pipe.h
\ No newline at end of file
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
deleted file mode 120000
index 64b21cf..0000000
--- a/include/media/nbaio/PipeReader.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/PipeReader.h
\ No newline at end of file
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h
deleted file mode 120000
index 74a3b06..0000000
--- a/include/media/nbaio/SourceAudioBufferProvider.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnbaio/include/media/nbaio/SourceAudioBufferProvider.h
\ No newline at end of file
diff --git a/include/media/nblog/NBLog.h b/include/media/nblog/NBLog.h
deleted file mode 120000
index 3cc366c..0000000
--- a/include/media/nblog/NBLog.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnblog/include/media/nblog/NBLog.h
\ No newline at end of file
diff --git a/include/media/nblog/PerformanceAnalysis.h b/include/media/nblog/PerformanceAnalysis.h
deleted file mode 120000
index 6ead3bc..0000000
--- a/include/media/nblog/PerformanceAnalysis.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnblog/include/media/nblog/PerformanceAnalysis.h
\ No newline at end of file
diff --git a/include/media/nblog/ReportPerformance.h b/include/media/nblog/ReportPerformance.h
deleted file mode 120000
index e9b8e80..0000000
--- a/include/media/nblog/ReportPerformance.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../media/libnblog/include/media/nblog/ReportPerformance.h
\ No newline at end of file
diff --git a/include/mediadrm/CryptoHal.h b/include/mediadrm/CryptoHal.h
deleted file mode 120000
index 92f3137..0000000
--- a/include/mediadrm/CryptoHal.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/CryptoHal.h
\ No newline at end of file
diff --git a/include/mediadrm/DrmHal.h b/include/mediadrm/DrmHal.h
deleted file mode 120000
index 17bb667..0000000
--- a/include/mediadrm/DrmHal.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/DrmHal.h
\ No newline at end of file
diff --git a/include/mediadrm/DrmMetrics.h b/include/mediadrm/DrmMetrics.h
deleted file mode 120000
index abc966b..0000000
--- a/include/mediadrm/DrmMetrics.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/DrmMetrics.h
\ No newline at end of file
diff --git a/include/mediadrm/DrmPluginPath.h b/include/mediadrm/DrmPluginPath.h
deleted file mode 120000
index 9e05194..0000000
--- a/include/mediadrm/DrmPluginPath.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/DrmPluginPath.h
\ No newline at end of file
diff --git a/include/mediadrm/DrmSessionClientInterface.h b/include/mediadrm/DrmSessionClientInterface.h
deleted file mode 120000
index f4e3211..0000000
--- a/include/mediadrm/DrmSessionClientInterface.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/DrmSessionClientInterface.h
\ No newline at end of file
diff --git a/include/mediadrm/DrmSessionManager.h b/include/mediadrm/DrmSessionManager.h
deleted file mode 120000
index f0a47bf..0000000
--- a/include/mediadrm/DrmSessionManager.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/DrmSessionManager.h
\ No newline at end of file
diff --git a/include/mediadrm/ICrypto.h b/include/mediadrm/ICrypto.h
deleted file mode 120000
index b250e07..0000000
--- a/include/mediadrm/ICrypto.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/ICrypto.h
\ No newline at end of file
diff --git a/include/mediadrm/IDrm.h b/include/mediadrm/IDrm.h
deleted file mode 120000
index 841bb1b..0000000
--- a/include/mediadrm/IDrm.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IDrm.h
\ No newline at end of file
diff --git a/include/mediadrm/IDrmClient.h b/include/mediadrm/IDrmClient.h
deleted file mode 120000
index 10aa5c0..0000000
--- a/include/mediadrm/IDrmClient.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IDrmClient.h
\ No newline at end of file
diff --git a/include/mediadrm/IMediaDrmService.h b/include/mediadrm/IMediaDrmService.h
deleted file mode 120000
index f3c260f..0000000
--- a/include/mediadrm/IMediaDrmService.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/IMediaDrmService.h
\ No newline at end of file
diff --git a/include/mediadrm/SharedLibrary.h b/include/mediadrm/SharedLibrary.h
deleted file mode 120000
index 9f8f5a4..0000000
--- a/include/mediadrm/SharedLibrary.h
+++ /dev/null
@@ -1 +0,0 @@
-../../media/libmedia/include/media/SharedLibrary.h
\ No newline at end of file
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 1b1f149..8aec80d 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -575,7 +575,7 @@
class AudioTrackServerProxy : public ServerProxy {
public:
AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
- size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
+ size_t frameSize, bool clientInServer, uint32_t sampleRate)
: ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
mPlaybackRateObserver(&cblk->mPlaybackRateQueue),
mUnderrunCount(0), mUnderrunning(false), mDrained(true) {
@@ -651,7 +651,7 @@
class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
public:
StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
- size_t frameSize);
+ size_t frameSize, uint32_t sampleRate);
protected:
virtual ~StaticAudioTrackServerProxy() { }
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index 712f118..16e794a 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -41,7 +41,7 @@
uint32_t angle, uint32_t bpp, bool hasData, size_t iccSize):
mWidth(width), mHeight(height),
mDisplayWidth(displayWidth), mDisplayHeight(displayHeight),
- mTileWidth(tileWidth), mTileHeight(tileHeight),
+ mTileWidth(tileWidth), mTileHeight(tileHeight), mDurationUs(0),
mRotationAngle(angle), mBytesPerPixel(bpp), mRowBytes(bpp * width),
mSize(hasData ? (bpp * width * height) : 0),
mIccSize(iccSize), mReserved(0) {
@@ -78,6 +78,7 @@
uint32_t mDisplayHeight; // Display height before rotation
uint32_t mTileWidth; // Tile width (0 if image doesn't have grid)
uint32_t mTileHeight; // Tile height (0 if image doesn't have grid)
+ int64_t mDurationUs; // Frame duration in microseconds
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes per pixel
uint32_t mRowBytes; // Number of bytes per row before rotation
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index e11af12..6697cb5 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -14,8 +14,6 @@
libcutils \
liblog \
libhidlbase \
- libhidltransport \
- libhwbinder \
libmedia \
libmedialogservice \
libmediautils \
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index 3f3ef69..5484613 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -6,10 +6,10 @@
capabilities BLOCK_SUSPEND
ioprio rt 4
writepid /dev/cpuset/foreground/tasks /dev/stune/foreground/tasks
- onrestart restart vendor.audio-hal-2-0
+ onrestart restart vendor.audio-hal
onrestart restart vendor.audio-hal-4-0-msd
- # Keep the original service name for backward compatibility when upgrading
- # O-MR1 devices with framework-only.
+ # Keep the original service names for backward compatibility
+ onrestart restart vendor.audio-hal-2-0
onrestart restart audio-hal-2-0
on property:vts.native_server.on=1
diff --git a/media/bufferpool/1.0/AccessorImpl.cpp b/media/bufferpool/1.0/AccessorImpl.cpp
index 09006ca..0d7e92f 100644
--- a/media/bufferpool/1.0/AccessorImpl.cpp
+++ b/media/bufferpool/1.0/AccessorImpl.cpp
@@ -14,10 +14,11 @@
* limitations under the License.
*/
-#define LOG_TAG "BufferPoolAccessor"
+#define LOG_TAG "BufferPoolAccessor1.0"
//#define LOG_NDEBUG 0
#include <sys/types.h>
+#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <utils/Log.h>
@@ -127,7 +128,6 @@
return false;
}
-int32_t Accessor::Impl::sPid = getpid();
uint32_t Accessor::Impl::sSeqId = time(nullptr);
Accessor::Impl::Impl(
@@ -145,14 +145,19 @@
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
if (newConnection) {
- ConnectionId id = (int64_t)sPid << 32 | sSeqId;
+ int32_t pid = getpid();
+ ConnectionId id = (int64_t)pid << 32 | sSeqId;
status = mBufferPool.mObserver.open(id, fmqDescPtr);
if (status == ResultStatus::OK) {
newConnection->initialize(accessor, id);
*connection = newConnection;
*pConnectionId = id;
mBufferPool.mConnectionIds.insert(id);
- ++sSeqId;
+ if (sSeqId == UINT32_MAX) {
+ sSeqId = 0;
+ } else {
+ ++sSeqId;
+ }
}
}
mBufferPool.processStatusMessages();
diff --git a/media/bufferpool/1.0/AccessorImpl.h b/media/bufferpool/1.0/AccessorImpl.h
index 84cb685..a09cbe2 100644
--- a/media/bufferpool/1.0/AccessorImpl.h
+++ b/media/bufferpool/1.0/AccessorImpl.h
@@ -61,7 +61,6 @@
// ConnectionId = pid : (timestamp_created + seqId)
// in order to guarantee uniqueness for each connection
static uint32_t sSeqId;
- static int32_t sPid;
const std::shared_ptr<BufferPoolAllocator> mAllocator;
diff --git a/media/bufferpool/1.0/Android.bp b/media/bufferpool/1.0/Android.bp
index c7ea70f..f817c76 100644
--- a/media/bufferpool/1.0/Android.bp
+++ b/media/bufferpool/1.0/Android.bp
@@ -16,8 +16,6 @@
"libcutils",
"libfmq",
"libhidlbase",
- "libhwbinder",
- "libhidltransport",
"liblog",
"libutils",
"android.hardware.media.bufferpool@1.0",
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 929a20e..84ce172 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -14,10 +14,11 @@
* limitations under the License.
*/
-#define LOG_TAG "BufferPoolAccessor"
+#define LOG_TAG "BufferPoolAccessor2.0"
//#define LOG_NDEBUG 0
#include <sys/types.h>
+#include <stdint.h>
#include <time.h>
#include <unistd.h>
#include <utils/Log.h>
@@ -134,7 +135,6 @@
return false;
}
-int32_t Accessor::Impl::sPid = getpid();
uint32_t Accessor::Impl::sSeqId = time(nullptr);
Accessor::Impl::Impl(
@@ -156,7 +156,8 @@
{
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
if (newConnection) {
- ConnectionId id = (int64_t)sPid << 32 | sSeqId;
+ int32_t pid = getpid();
+ ConnectionId id = (int64_t)pid << 32 | sSeqId;
status = mBufferPool.mObserver.open(id, statusDescPtr);
if (status == ResultStatus::OK) {
newConnection->initialize(accessor, id);
@@ -166,7 +167,11 @@
mBufferPool.mConnectionIds.insert(id);
mBufferPool.mInvalidationChannel.getDesc(invDescPtr);
mBufferPool.mInvalidation.onConnect(id, observer);
- ++sSeqId;
+ if (sSeqId == UINT32_MAX) {
+ sSeqId = 0;
+ } else {
+ ++sSeqId;
+ }
}
}
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index 807e0f1..9888be5 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -75,7 +75,6 @@
// ConnectionId = pid : (timestamp_created + seqId)
// in order to guarantee uniqueness for each connection
static uint32_t sSeqId;
- static int32_t sPid;
const std::shared_ptr<BufferPoolAllocator> mAllocator;
diff --git a/media/bufferpool/2.0/Android.bp b/media/bufferpool/2.0/Android.bp
index c71ac17..97f114a 100644
--- a/media/bufferpool/2.0/Android.bp
+++ b/media/bufferpool/2.0/Android.bp
@@ -1,9 +1,5 @@
-cc_library {
- name: "libstagefright_bufferpool@2.0",
- vendor_available: true,
- vndk: {
- enabled: true,
- },
+cc_defaults {
+ name: "libstagefright_bufferpool@2.0-default",
srcs: [
"Accessor.cpp",
"AccessorImpl.cpp",
@@ -20,8 +16,6 @@
"libcutils",
"libfmq",
"libhidlbase",
- "libhwbinder",
- "libhidltransport",
"liblog",
"libutils",
"android.hardware.media.bufferpool@2.0",
@@ -31,3 +25,23 @@
"android.hardware.media.bufferpool@2.0",
],
}
+
+cc_library {
+ name: "libstagefright_bufferpool@2.0.1",
+ defaults: ["libstagefright_bufferpool@2.0-default"],
+ vendor_available: true,
+ cflags: [
+ "-DBUFFERPOOL_CLONE_HANDLES",
+ ],
+}
+
+// Deprecated. Do not use. Use libstagefright_bufferpool@2.0.1 instead.
+cc_library {
+ name: "libstagefright_bufferpool@2.0",
+ defaults: ["libstagefright_bufferpool@2.0-default"],
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+}
+
diff --git a/media/bufferpool/2.0/ClientManager.cpp b/media/bufferpool/2.0/ClientManager.cpp
index 48c2da4..87ee4e8 100644
--- a/media/bufferpool/2.0/ClientManager.cpp
+++ b/media/bufferpool/2.0/ClientManager.cpp
@@ -351,6 +351,7 @@
}
client = it->second;
}
+#ifdef BUFFERPOOL_CLONE_HANDLES
native_handle_t *origHandle;
ResultStatus res = client->allocate(params, &origHandle, buffer);
if (res != ResultStatus::OK) {
@@ -362,6 +363,9 @@
return ResultStatus::NO_MEMORY;
}
return ResultStatus::OK;
+#else
+ return client->allocate(params, handle, buffer);
+#endif
}
ResultStatus ClientManager::Impl::receive(
@@ -377,6 +381,7 @@
}
client = it->second;
}
+#ifdef BUFFERPOOL_CLONE_HANDLES
native_handle_t *origHandle;
ResultStatus res = client->receive(
transactionId, bufferId, timestampUs, &origHandle, buffer);
@@ -389,6 +394,9 @@
return ResultStatus::NO_MEMORY;
}
return ResultStatus::OK;
+#else
+ return client->receive(transactionId, bufferId, timestampUs, handle, buffer);
+#endif
}
ResultStatus ClientManager::Impl::postSend(
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index a8f39d5..be52a1d 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -521,16 +521,14 @@
mInputSize += outargs.numInSamples * sizeof(int16_t);
}
- if (outargs.numInSamples > 0) {
- inBuffer[0] = (int16_t *)inBuffer[0] + outargs.numInSamples;
- inBufferSize[0] -= outargs.numInSamples * sizeof(int16_t);
- inargs.numInSamples -= outargs.numInSamples;
- }
-
if (inBuffer[0] == mRemainder) {
inBuffer[0] = const_cast<uint8_t *>(data);
inBufferSize[0] = capacity;
inargs.numInSamples = capacity / sizeof(int16_t);
+ } else if (outargs.numInSamples > 0) {
+ inBuffer[0] = (int16_t *)inBuffer[0] + outargs.numInSamples;
+ inBufferSize[0] -= outargs.numInSamples * sizeof(int16_t);
+ inargs.numInSamples -= outargs.numInSamples;
}
}
ALOGV("encoderErr = %d mInputSize = %zu "
diff --git a/media/codec2/components/aom/Android.bp b/media/codec2/components/aom/Android.bp
index 0fabf5c..61dbd4c 100644
--- a/media/codec2/components/aom/Android.bp
+++ b/media/codec2/components/aom/Android.bp
@@ -1,10 +1,16 @@
cc_library_shared {
- name: "libcodec2_soft_av1dec",
+ name: "libcodec2_soft_av1dec_aom",
defaults: [
"libcodec2_soft-defaults",
"libcodec2_soft_sanitize_all-defaults",
],
+ // coordinated with frameworks/av/media/codec2/components/gav1/Android.bp
+ // so only 1 of them has the official c2.android.av1.decoder name
+ cflags: [
+ "-DCODECNAME=\"c2.android.av1-aom.decoder\"",
+ ],
+
srcs: ["C2SoftAomDec.cpp"],
static_libs: ["libaom"],
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 0cf277f..c7046cb 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -29,7 +29,10 @@
namespace android {
-constexpr char COMPONENT_NAME[] = "c2.android.av1.decoder";
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+
+// codecname set and passed in as a compile flag from Android.bp
+constexpr char COMPONENT_NAME[] = CODECNAME;
class C2SoftAomDec::IntfImpl : public SimpleInterface<void>::BaseParams {
public:
@@ -111,7 +114,7 @@
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
.withDefault(
- new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+ new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
.withFields({
C2F(mMaxInputSize, value).any(),
})
@@ -191,8 +194,8 @@
const C2P<C2StreamMaxPictureSizeTuning::output>& maxSize) {
(void)mayBlock;
// assume compression ratio of 2
- me.set().value = (((maxSize.v.width + 63) / 64) *
- ((maxSize.v.height + 63) / 64) * 3072);
+ me.set().value = c2_max((((maxSize.v.width + 63) / 64)
+ * ((maxSize.v.height + 63) / 64) * 3072), kMinInputBufferSize);
return C2R::Ok();
}
static C2R DefaultColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsTuning::output> &me) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 3f015b4..2662f0f 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -31,9 +31,10 @@
namespace android {
namespace {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
constexpr char COMPONENT_NAME[] = "c2.android.avc.decoder";
-
+constexpr uint32_t kDefaultOutputDelay = 8;
+constexpr uint32_t kMaxOutputDelay = 16;
} // namespace
class C2SoftAvcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -54,7 +55,9 @@
// TODO: Proper support for reorder depth.
addParameter(
DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
- .withConstValue(new C2PortActualDelayTuning::output(8u))
+ .withDefault(new C2PortActualDelayTuning::output(kDefaultOutputDelay))
+ .withFields({C2F(mActualOutputDelay, value).inRange(0, kMaxOutputDelay)})
+ .withSetter(Setter<decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
.build());
// TODO: output latency and reordering
@@ -111,7 +114,7 @@
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+ .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
.withFields({
C2F(mMaxInputSize, value).any(),
})
@@ -196,7 +199,6 @@
0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
.build());
}
-
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
@@ -225,7 +227,8 @@
const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
(void)mayBlock;
// assume compression ratio of 2
- me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 192);
+ me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+ * ((maxSize.v.height + 15) / 16) * 192), kMinInputBufferSize);
return C2R::Ok();
}
@@ -333,6 +336,7 @@
mDecHandle(nullptr),
mOutBufferFlush(nullptr),
mIvColorFormat(IV_YUV_420P),
+ mOutputDelay(kDefaultOutputDelay),
mWidth(320),
mHeight(240),
mHeaderDecoded(false),
@@ -882,6 +886,26 @@
work->result = C2_CORRUPTED;
return;
}
+ if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
+ mOutputDelay = s_decode_op.i4_reorder_depth;
+ ALOGV("New Output delay %d ", mOutputDelay);
+
+ C2PortActualDelayTuning::output outputDelay(mOutputDelay);
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err =
+ mIntf->config({&outputDelay}, C2_MAY_BLOCK, &failures);
+ if (err == OK) {
+ work->worklets.front()->output.configUpdate.push_back(
+ C2Param::Copy(outputDelay));
+ } else {
+ ALOGE("Cannot set output delay");
+ mSignalledError = true;
+ work->workletsProcessed = 1u;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ continue;
+ }
if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
diff --git a/media/codec2/components/avc/C2SoftAvcDec.h b/media/codec2/components/avc/C2SoftAvcDec.h
index 72ee583..4414a26 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.h
+++ b/media/codec2/components/avc/C2SoftAvcDec.h
@@ -157,7 +157,7 @@
size_t mNumCores;
IV_COLOR_FORMAT_T mIvColorFormat;
-
+ uint32_t mOutputDelay;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mStride;
diff --git a/media/codec2/components/cmds/Android.bp b/media/codec2/components/cmds/Android.bp
index 35f689e..a081e28 100644
--- a/media/codec2/components/cmds/Android.bp
+++ b/media/codec2/components/cmds/Android.bp
@@ -9,10 +9,15 @@
include_dirs: [
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"libbase",
"libbinder",
"libcutils",
+ "libdatasource",
"libgui",
"liblog",
"libstagefright",
diff --git a/media/codec2/components/cmds/codec2.cpp b/media/codec2/components/cmds/codec2.cpp
index f2cf545..38eaf88 100644
--- a/media/codec2/components/cmds/codec2.cpp
+++ b/media/codec2/components/cmds/codec2.cpp
@@ -30,15 +30,15 @@
#include <binder/IServiceManager.h>
#include <binder/ProcessState.h>
+#include <datasource/DataSourceFactory.h>
#include <media/DataSource.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IMediaHTTPService.h>
#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaExtractorFactory.h>
@@ -418,7 +418,7 @@
const char *filename = argv[k];
sp<DataSource> dataSource =
- DataSourceFactory::CreateFromURI(nullptr /* httpService */, filename);
+ DataSourceFactory::getInstance()->CreateFromURI(nullptr /* httpService */, filename);
if (strncasecmp(filename, "sine:", 5) && dataSource == nullptr) {
fprintf(stderr, "Unable to create data source.\n");
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 43b843a..4ff0793 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -74,7 +74,7 @@
addParameter(
DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
- .withFields({C2F(mChannelCount, value).equalTo(1)})
+ .withFields({C2F(mChannelCount, value).inRange(1, 6)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index 0a0545d..5c4abb7 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -1,10 +1,16 @@
cc_library_shared {
- name: "libcodec2_soft_gav1dec",
+ name: "libcodec2_soft_av1dec_gav1",
defaults: [
"libcodec2_soft-defaults",
"libcodec2_soft_sanitize_all-defaults",
],
+ // coordinated with frameworks/av/media/codec2/components/aom/Android.bp
+ // so only 1 of them has the official c2.android.av1.decoder name
+ cflags: [
+ "-DCODECNAME=\"c2.android.av1.decoder\"",
+ ],
+
srcs: ["C2SoftGav1Dec.cpp"],
static_libs: ["libgav1"],
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index f5321ba..ec5f549 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -27,7 +27,8 @@
namespace android {
-constexpr char COMPONENT_NAME[] = "c2.android.gav1.decoder";
+// codecname set and passed in as a compile flag from Android.bp
+constexpr char COMPONENT_NAME[] = CODECNAME;
class C2SoftGav1Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
public:
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index 7232572..df677c2 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -33,7 +33,8 @@
namespace {
constexpr char COMPONENT_NAME[] = "c2.android.hevc.decoder";
-
+constexpr uint32_t kDefaultOutputDelay = 8;
+constexpr uint32_t kMaxOutputDelay = 16;
} // namespace
class C2SoftHevcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -54,7 +55,9 @@
// TODO: Proper support for reorder depth.
addParameter(
DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
- .withConstValue(new C2PortActualDelayTuning::output(8u))
+ .withDefault(new C2PortActualDelayTuning::output(kDefaultOutputDelay))
+ .withFields({C2F(mActualOutputDelay, value).inRange(0, kMaxOutputDelay)})
+ .withSetter(Setter<decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
.build());
addParameter(
@@ -327,6 +330,7 @@
mDecHandle(nullptr),
mOutBufferFlush(nullptr),
mIvColorformat(IV_YUV_420P),
+ mOutputDelay(kDefaultOutputDelay),
mWidth(320),
mHeight(240),
mHeaderDecoded(false),
@@ -877,6 +881,26 @@
work->result = C2_CORRUPTED;
return;
}
+ if (s_decode_op.i4_reorder_depth >= 0 && mOutputDelay != s_decode_op.i4_reorder_depth) {
+ mOutputDelay = s_decode_op.i4_reorder_depth;
+ ALOGV("New Output delay %d ", mOutputDelay);
+
+ C2PortActualDelayTuning::output outputDelay(mOutputDelay);
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t err =
+ mIntf->config({&outputDelay}, C2_MAY_BLOCK, &failures);
+ if (err == OK) {
+ work->worklets.front()->output.configUpdate.push_back(
+ C2Param::Copy(outputDelay));
+ } else {
+ ALOGE("Cannot set output delay");
+ mSignalledError = true;
+ work->workletsProcessed = 1u;
+ work->result = C2_CORRUPTED;
+ return;
+ }
+ continue;
+ }
if (0 < s_decode_op.u4_pic_wd && 0 < s_decode_op.u4_pic_ht) {
if (mHeaderDecoded == false) {
mHeaderDecoded = true;
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.h b/media/codec2/components/hevc/C2SoftHevcDec.h
index b7664e6..ce63a6c 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.h
+++ b/media/codec2/components/hevc/C2SoftHevcDec.h
@@ -115,7 +115,7 @@
size_t mNumCores;
IV_COLOR_FORMAT_T mIvColorformat;
-
+ uint32_t mOutputDelay;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mStride;
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index b129b1b..19ccbf9 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -42,6 +42,36 @@
constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
+void ParseGop(
+ const C2StreamGopTuning::output &gop,
+ uint32_t *syncInterval, uint32_t *iInterval, uint32_t *maxBframes) {
+ uint32_t syncInt = 1;
+ uint32_t iInt = 1;
+ for (size_t i = 0; i < gop.flexCount(); ++i) {
+ const C2GopLayerStruct &layer = gop.m.values[i];
+ if (layer.count == UINT32_MAX) {
+ syncInt = 0;
+ } else if (syncInt <= UINT32_MAX / (layer.count + 1)) {
+ syncInt *= (layer.count + 1);
+ }
+ if ((layer.type_ & I_FRAME) == 0) {
+ if (layer.count == UINT32_MAX) {
+ iInt = 0;
+ } else if (iInt <= UINT32_MAX / (layer.count + 1)) {
+ iInt *= (layer.count + 1);
+ }
+ }
+ if (layer.type_ == C2Config::picture_type_t(P_FRAME | B_FRAME) && maxBframes) {
+ *maxBframes = layer.count;
+ }
+ }
+ if (syncInterval) {
+ *syncInterval = syncInt;
+ }
+ if (iInterval) {
+ *iInterval = iInt;
+ }
+}
} // namepsace
class C2SoftHevcEnc::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -60,13 +90,21 @@
setDerivedInstance(this);
addParameter(
+ DefineParam(mGop, C2_PARAMKEY_GOP)
+ .withDefault(C2StreamGopTuning::output::AllocShared(
+ 0 /* flexCount */, 0u /* stream */))
+ .withFields({C2F(mGop, m.values[0].type_).any(),
+ C2F(mGop, m.values[0].count).any()})
+ .withSetter(GopSetter)
+ .build());
+
+ addParameter(
DefineParam(mActualInputDelay, C2_PARAMKEY_INPUT_DELAY)
.withDefault(new C2PortActualDelayTuning::input(
DEFAULT_B_FRAMES + DEFAULT_RC_LOOKAHEAD))
.withFields({C2F(mActualInputDelay, value).inRange(
0, MAX_B_FRAMES + MAX_RC_LOOKAHEAD)})
- .withSetter(
- Setter<decltype(*mActualInputDelay)>::StrictValueWithNoDeps)
+ .calculatedAs(InputDelaySetter, mGop)
.build());
addParameter(
@@ -172,6 +210,17 @@
.build());
}
+ static C2R InputDelaySetter(
+ bool mayBlock,
+ C2P<C2PortActualDelayTuning::input> &me,
+ const C2P<C2StreamGopTuning::output> &gop) {
+ (void)mayBlock;
+ uint32_t maxBframes = 0;
+ ParseGop(gop.v, nullptr, nullptr, &maxBframes);
+ me.set().value = maxBframes + DEFAULT_RC_LOOKAHEAD;
+ return C2R::Ok();
+ }
+
static C2R BitrateSetter(bool mayBlock,
C2P<C2StreamBitrateInfo::output>& me) {
(void)mayBlock;
@@ -270,6 +319,18 @@
return C2R::Ok();
}
+ static C2R GopSetter(bool mayBlock, C2P<C2StreamGopTuning::output> &me) {
+ (void)mayBlock;
+ for (size_t i = 0; i < me.v.flexCount(); ++i) {
+ const C2GopLayerStruct &layer = me.v.m.values[0];
+ if (layer.type_ == C2Config::picture_type_t(P_FRAME | B_FRAME)
+ && layer.count > MAX_B_FRAMES) {
+ me.set().m.values[i].count = MAX_B_FRAMES;
+ }
+ }
+ return C2R::Ok();
+ }
+
UWORD32 getProfile_l() const {
switch (mProfileLevel->profile) {
case PROFILE_HEVC_MAIN: [[fallthrough]];
@@ -338,6 +399,9 @@
std::shared_ptr<C2StreamQualityTuning::output> getQuality_l() const {
return mQuality;
}
+ std::shared_ptr<C2StreamGopTuning::output> getGop_l() const {
+ return mGop;
+ }
private:
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
@@ -350,6 +414,7 @@
std::shared_ptr<C2StreamQualityTuning::output> mQuality;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
+ std::shared_ptr<C2StreamGopTuning::output> mGop;
};
static size_t GetCPUCoreCount() {
@@ -449,7 +514,25 @@
ALOGE("HEVC default init failed : 0x%x", err);
return C2_CORRUPTED;
}
-
+ mBframes = 0;
+ if (mGop && mGop->flexCount() > 0) {
+ uint32_t syncInterval = 1;
+ uint32_t iInterval = 1;
+ uint32_t maxBframes = 0;
+ ParseGop(*mGop, &syncInterval, &iInterval, &maxBframes);
+ if (syncInterval > 0) {
+ ALOGD("Updating IDR interval from GOP: old %u new %u", mIDRInterval, syncInterval);
+ mIDRInterval = syncInterval;
+ }
+ if (iInterval > 0) {
+ ALOGD("Updating I interval from GOP: old %u new %u", mIInterval, iInterval);
+ mIInterval = iInterval;
+ }
+ if (mBframes != maxBframes) {
+ ALOGD("Updating max B frames from GOP: old %u new %u", mBframes, maxBframes);
+ mBframes = maxBframes;
+ }
+ }
// update configuration
mEncParams.s_src_prms.i4_width = mSize->width;
mEncParams.s_src_prms.i4_height = mSize->height;
@@ -463,12 +546,20 @@
mBitrate->value << 1;
mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_codec_level = mHevcEncLevel;
mEncParams.s_coding_tools_prms.i4_max_i_open_gop_period = mIDRInterval;
- mEncParams.s_coding_tools_prms.i4_max_cra_open_gop_period = mIDRInterval;
+ mEncParams.s_coding_tools_prms.i4_max_cra_open_gop_period = mIInterval;
mIvVideoColorFormat = IV_YUV_420P;
mEncParams.s_multi_thrd_prms.i4_max_num_cores = mNumCores;
mEncParams.s_out_strm_prms.i4_codec_profile = mHevcEncProfile;
mEncParams.s_lap_prms.i4_rc_look_ahead_pics = DEFAULT_RC_LOOKAHEAD;
- mEncParams.s_coding_tools_prms.i4_max_temporal_layers = DEFAULT_B_FRAMES;
+ if (mBframes == 0) {
+ mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 0;
+ } else if (mBframes <= 2) {
+ mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 1;
+ } else if (mBframes <= 6) {
+ mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 2;
+ } else {
+ mEncParams.s_coding_tools_prms.i4_max_temporal_layers = 3;
+ }
switch (mBitrateMode->value) {
case C2Config::BITRATE_IGNORE:
@@ -523,6 +614,7 @@
c2_status_t C2SoftHevcEnc::initEncoder() {
CHECK(!mCodecCtx);
+
{
IntfImpl::Lock lock = mIntf->lock();
mSize = mIntf->getSize_l();
@@ -532,8 +624,10 @@
mHevcEncProfile = mIntf->getProfile_l();
mHevcEncLevel = mIntf->getLevel_l();
mIDRInterval = mIntf->getSyncFramePeriod_l();
+ mIInterval = mIntf->getSyncFramePeriod_l();
mComplexity = mIntf->getComplexity_l();
mQuality = mIntf->getQuality_l();
+ mGop = mIntf->getGop_l();
}
c2_status_t status = initEncParams();
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index f2c7642..140b4a9 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -67,6 +67,8 @@
ihevce_static_cfg_params_t mEncParams;
size_t mNumCores;
UWORD32 mIDRInterval;
+ UWORD32 mIInterval;
+ UWORD32 mBframes;
IV_COLOR_FORMAT_T mIvVideoColorFormat;
UWORD32 mHevcEncProfile;
UWORD32 mHevcEncLevel;
@@ -85,7 +87,7 @@
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
std::shared_ptr<C2StreamQualityTuning::output> mQuality;
-
+ std::shared_ptr<C2StreamGopTuning::output> mGop;
#ifdef FILE_DUMP_ENABLE
char mInFile[200];
char mOutFile[200];
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index a1f8ff3..e0365fc 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -29,7 +29,7 @@
#include "impeg2d.h"
namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
constexpr char COMPONENT_NAME[] = "c2.android.mpeg2.decoder";
class C2SoftMpeg2Dec::IntfImpl : public SimpleInterface<void>::BaseParams {
@@ -99,7 +99,7 @@
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 2))
+ .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
.withFields({
C2F(mMaxInputSize, value).any(),
})
@@ -213,7 +213,8 @@
const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
(void)mayBlock;
// assume compression ratio of 1
- me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 384);
+ me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+ * ((maxSize.v.height + 15) / 16) * 384), kMinInputBufferSize);
return C2R::Ok();
}
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 7e6685e..61b286c 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -33,7 +33,7 @@
#include "mp4dec_api.h"
namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
#ifdef MPEG4
constexpr char COMPONENT_NAME[] = "c2.android.mpeg4.decoder";
#else
@@ -149,11 +149,7 @@
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
-#ifdef MPEG4
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 1920 * 1088 * 3 / 2))
-#else
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 352 * 288 * 3 / 2))
-#endif
+ .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
.withFields({
C2F(mMaxInputSize, value).any(),
})
@@ -218,7 +214,8 @@
const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
(void)mayBlock;
// assume compression ratio of 1
- me.set().value = (((maxSize.v.width + 15) / 16) * ((maxSize.v.height + 15) / 16) * 384);
+ me.set().value = c2_max((((maxSize.v.width + 15) / 16)
+ * ((maxSize.v.height + 15) / 16) * 384), kMinInputBufferSize);
return C2R::Ok();
}
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index 36053f6..54c8c47 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -517,9 +517,11 @@
if (layout.planes[layout.PLANE_Y].colInc == 1
&& layout.planes[layout.PLANE_U].colInc == 1
&& layout.planes[layout.PLANE_V].colInc == 1
+ && yStride == align(width, 16)
&& uStride == vStride
&& yStride == 2 * vStride) {
- // I420 compatible - planes are already set up above
+ // I420 compatible with yStride being equal to aligned width
+ // planes are already set up above
break;
}
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index a52ca15..62076f8 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -30,7 +30,7 @@
#include "C2SoftVpxDec.h"
namespace android {
-
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
#ifdef VP9
constexpr char COMPONENT_NAME[] = "c2.android.vp9.decoder";
#else
@@ -166,7 +166,7 @@
addParameter(
DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
- .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, 320 * 240 * 3 / 4))
+ .withDefault(new C2StreamMaxBufferSizeInfo::input(0u, kMinInputBufferSize))
.withFields({
C2F(mMaxInputSize, value).any(),
})
@@ -244,7 +244,8 @@
const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
(void)mayBlock;
// assume compression ratio of 2
- me.set().value = (((maxSize.v.width + 63) / 64) * ((maxSize.v.height + 63) / 64) * 3072);
+ me.set().value = c2_max((((maxSize.v.width + 63) / 64)
+ * ((maxSize.v.height + 63) / 64) * 3072), kMinInputBufferSize);
return C2R::Ok();
}
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 3820f90..f9eb2fa 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -243,6 +243,9 @@
kParamIndexTimestampGapAdjustment, // input-surface, struct
kParamIndexSurfaceAllocator, // u32
+
+ // low latency mode for decoders
+ kParamIndexLowLatencyMode, // bool
};
}
@@ -521,6 +524,7 @@
PROFILE_DV_HE_07 = _C2_PL_DV_BASE + 7, ///< Dolby Vision dvhe.07 profile
PROFILE_DV_HE_08 = _C2_PL_DV_BASE + 8, ///< Dolby Vision dvhe.08 profile
PROFILE_DV_AV_09 = _C2_PL_DV_BASE + 9, ///< Dolby Vision dvav.09 profile
+ PROFILE_DV_AV1_10 = _C2_PL_DV_BASE + 10, ///< Dolby Vision dav1.10 profile
// AV1 profiles
PROFILE_AV1_0 = _C2_PL_AV1_BASE, ///< AV1 Profile 0 (4:2:0, 8 to 10 bit)
@@ -804,6 +808,15 @@
constexpr char C2_PARAMKEY_PIPELINE_DELAY[] = "algo.delay";
/**
+ * Enable/disable low latency decoding mode.
+ * If true, low latency decoding mode is enabled, and the decoder doesn't hold input and output
+ * data more than required by the codec standards.
+ */
+typedef C2GlobalParam<C2Tuning, C2EasyBoolValue, kParamIndexLowLatencyMode>
+ C2GlobalLowLatencyModeTuning;
+constexpr char C2_PARAMKEY_LOW_LATENCY_MODE[] = "algo.low-latency";
+
+/**
* Reference characteristics.
*
* The component may hold onto input and output buffers even after completing the corresponding
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index 63fe36b..4a9dc55 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -6,7 +6,7 @@
defaults: ["hidl_defaults"],
srcs: [
- "ClientBlockHelper.cpp",
+ "OutputBufferQueue.cpp",
"types.cpp",
],
@@ -24,7 +24,7 @@
"libgui",
"libhidlbase",
"liblog",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libui",
"libutils",
],
@@ -37,7 +37,7 @@
"android.hardware.media.c2@1.0",
"libcodec2",
"libgui",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libui",
],
}
@@ -63,6 +63,7 @@
],
header_libs: [
+ "libbinder_headers",
"libsystem_headers",
"libcodec2_internal", // private
],
@@ -80,10 +81,8 @@
"libcodec2_vndk",
"libcutils",
"libhidlbase",
- "libhidltransport",
- "libhwbinder",
"liblog",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libstagefright_bufferqueue_helper",
"libui",
"libutils",
@@ -98,7 +97,7 @@
"libcodec2",
"libcodec2_vndk",
"libhidlbase",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libui",
],
}
diff --git a/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp b/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp
deleted file mode 100644
index 50790bc..0000000
--- a/media/codec2/hidl/1.0/utils/ClientBlockHelper.cpp
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Codec2-block_helper"
-#include <android-base/logging.h>
-
-#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
-#include <codec2/hidl/1.0/ClientBlockHelper.h>
-#include <gui/bufferqueue/2.0/B2HGraphicBufferProducer.h>
-
-#include <C2AllocatorGralloc.h>
-#include <C2BlockInternal.h>
-#include <C2Buffer.h>
-#include <C2PlatformSupport.h>
-
-#include <iomanip>
-
-namespace android {
-namespace hardware {
-namespace media {
-namespace c2 {
-namespace V1_0 {
-namespace utils {
-
-using HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
- V2_0::IGraphicBufferProducer;
-using B2HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
- V2_0::utils::B2HGraphicBufferProducer;
-
-namespace /* unnamed */ {
-
-// Create a GraphicBuffer object from a graphic block.
-sp<GraphicBuffer> createGraphicBuffer(const C2ConstGraphicBlock& block) {
- uint32_t width;
- uint32_t height;
- uint32_t format;
- uint64_t usage;
- uint32_t stride;
- uint32_t generation;
- uint64_t bqId;
- int32_t bqSlot;
- _UnwrapNativeCodec2GrallocMetadata(
- block.handle(), &width, &height, &format, &usage,
- &stride, &generation, &bqId, reinterpret_cast<uint32_t*>(&bqSlot));
- native_handle_t *grallocHandle =
- UnwrapNativeCodec2GrallocHandle(block.handle());
- sp<GraphicBuffer> graphicBuffer =
- new GraphicBuffer(grallocHandle,
- GraphicBuffer::CLONE_HANDLE,
- width, height, format,
- 1, usage, stride);
- native_handle_delete(grallocHandle);
- return graphicBuffer;
-}
-
-template <typename BlockProcessor>
-void forEachBlock(C2FrameData& frameData,
- BlockProcessor process) {
- for (const std::shared_ptr<C2Buffer>& buffer : frameData.buffers) {
- if (buffer) {
- for (const C2ConstGraphicBlock& block :
- buffer->data().graphicBlocks()) {
- process(block);
- }
- }
- }
-}
-
-template <typename BlockProcessor>
-void forEachBlock(const std::list<std::unique_ptr<C2Work>>& workList,
- BlockProcessor process) {
- for (const std::unique_ptr<C2Work>& work : workList) {
- if (!work) {
- continue;
- }
- for (const std::unique_ptr<C2Worklet>& worklet : work->worklets) {
- if (worklet) {
- forEachBlock(worklet->output, process);
- }
- }
- }
-}
-
-sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
- sp<HGraphicBufferProducer> hgbp =
- igbp->getHalInterface<HGraphicBufferProducer>();
- return hgbp ? hgbp :
- new B2HGraphicBufferProducer(igbp);
-}
-
-status_t attachToBufferQueue(const C2ConstGraphicBlock& block,
- const sp<IGraphicBufferProducer>& igbp,
- uint32_t generation,
- int32_t* bqSlot) {
- if (!igbp) {
- LOG(WARNING) << "attachToBufferQueue -- null producer.";
- return NO_INIT;
- }
-
- sp<GraphicBuffer> graphicBuffer = createGraphicBuffer(block);
- graphicBuffer->setGenerationNumber(generation);
-
- LOG(VERBOSE) << "attachToBufferQueue -- attaching buffer:"
- << " block dimension " << block.width() << "x"
- << block.height()
- << ", graphicBuffer dimension " << graphicBuffer->getWidth() << "x"
- << graphicBuffer->getHeight()
- << std::hex << std::setfill('0')
- << ", format 0x" << std::setw(8) << graphicBuffer->getPixelFormat()
- << ", usage 0x" << std::setw(16) << graphicBuffer->getUsage()
- << std::dec << std::setfill(' ')
- << ", stride " << graphicBuffer->getStride()
- << ", generation " << graphicBuffer->getGenerationNumber();
-
- status_t result = igbp->attachBuffer(bqSlot, graphicBuffer);
- if (result != OK) {
- LOG(WARNING) << "attachToBufferQueue -- attachBuffer failed: "
- "status = " << result << ".";
- return result;
- }
- LOG(VERBOSE) << "attachToBufferQueue -- attachBuffer returned slot #"
- << *bqSlot << ".";
- return OK;
-}
-
-bool getBufferQueueAssignment(const C2ConstGraphicBlock& block,
- uint32_t* generation,
- uint64_t* bqId,
- int32_t* bqSlot) {
- return _C2BlockFactory::GetBufferQueueData(
- _C2BlockFactory::GetGraphicBlockPoolData(block),
- generation, bqId, bqSlot);
-}
-} // unnamed namespace
-
-class OutputBufferQueue::Impl {
- std::mutex mMutex;
- sp<IGraphicBufferProducer> mIgbp;
- uint32_t mGeneration;
- uint64_t mBqId;
- std::shared_ptr<int> mOwner;
- // To migrate existing buffers
- sp<GraphicBuffer> mBuffers[BufferQueueDefs::NUM_BUFFER_SLOTS]; // find a better way
- std::weak_ptr<_C2BlockPoolData>
- mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
-
-public:
- Impl(): mGeneration(0), mBqId(0) {}
-
- bool configure(const sp<IGraphicBufferProducer>& igbp,
- uint32_t generation,
- uint64_t bqId) {
- size_t tryNum = 0;
- size_t success = 0;
- sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
- std::weak_ptr<_C2BlockPoolData>
- poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
- {
- std::scoped_lock<std::mutex> l(mMutex);
- if (generation == mGeneration) {
- return false;
- }
- mIgbp = igbp;
- mGeneration = generation;
- mBqId = bqId;
- mOwner = std::make_shared<int>(0);
- for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
- if (mBqId == 0 || !mBuffers[i]) {
- continue;
- }
- std::shared_ptr<_C2BlockPoolData> data = mPoolDatas[i].lock();
- if (!data ||
- !_C2BlockFactory::BeginAttachBlockToBufferQueue(data)) {
- continue;
- }
- ++tryNum;
- int bqSlot;
- mBuffers[i]->setGenerationNumber(generation);
- status_t result = igbp->attachBuffer(&bqSlot, mBuffers[i]);
- if (result != OK) {
- continue;
- }
- bool attach =
- _C2BlockFactory::EndAttachBlockToBufferQueue(
- data, mOwner, getHgbp(mIgbp),
- generation, bqId, bqSlot);
- if (!attach) {
- igbp->cancelBuffer(bqSlot, Fence::NO_FENCE);
- continue;
- }
- buffers[bqSlot] = mBuffers[i];
- poolDatas[bqSlot] = data;
- ++success;
- }
- for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
- mBuffers[i] = buffers[i];
- mPoolDatas[i] = poolDatas[i];
- }
- }
- ALOGD("remote graphic buffer migration %zu/%zu", success, tryNum);
- return true;
- }
-
- bool registerBuffer(const C2ConstGraphicBlock& block) {
- std::shared_ptr<_C2BlockPoolData> data =
- _C2BlockFactory::GetGraphicBlockPoolData(block);
- if (!data) {
- return false;
- }
- std::scoped_lock<std::mutex> l(mMutex);
-
- if (!mIgbp) {
- return false;
- }
-
- uint32_t oldGeneration;
- uint64_t oldId;
- int32_t oldSlot;
- // If the block is not bufferqueue-based, do nothing.
- if (!_C2BlockFactory::GetBufferQueueData(
- data, &oldGeneration, &oldId, &oldSlot) || (oldId == 0)) {
- return false;
- }
- // If the block's bqId is the same as the desired bqId, just hold.
- if ((oldId == mBqId) && (oldGeneration == mGeneration)) {
- LOG(VERBOSE) << "holdBufferQueueBlock -- import without attaching:"
- << " bqId " << oldId
- << ", bqSlot " << oldSlot
- << ", generation " << mGeneration
- << ".";
- _C2BlockFactory::HoldBlockFromBufferQueue(data, mOwner, getHgbp(mIgbp));
- mPoolDatas[oldSlot] = data;
- mBuffers[oldSlot] = createGraphicBuffer(block);
- mBuffers[oldSlot]->setGenerationNumber(mGeneration);
- return true;
- }
- int32_t d = (int32_t) mGeneration - (int32_t) oldGeneration;
- LOG(WARNING) << "receiving stale buffer: generation "
- << mGeneration << " , diff " << d << " : slot "
- << oldSlot;
- return false;
- }
-
- status_t outputBuffer(
- const C2ConstGraphicBlock& block,
- const BnGraphicBufferProducer::QueueBufferInput& input,
- BnGraphicBufferProducer::QueueBufferOutput* output) {
- uint32_t generation;
- uint64_t bqId;
- int32_t bqSlot;
- bool display = displayBufferQueueBlock(block);
- if (!getBufferQueueAssignment(block, &generation, &bqId, &bqSlot) ||
- bqId == 0) {
- // Block not from bufferqueue -- it must be attached before queuing.
-
- mMutex.lock();
- sp<IGraphicBufferProducer> outputIgbp = mIgbp;
- uint32_t outputGeneration = mGeneration;
- mMutex.unlock();
-
- status_t status = attachToBufferQueue(
- block, outputIgbp, outputGeneration, &bqSlot);
- if (status != OK) {
- LOG(WARNING) << "outputBuffer -- attaching failed.";
- return INVALID_OPERATION;
- }
-
- status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
- input, output);
- if (status != OK) {
- LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
- "on non-bufferqueue-based block. "
- "Error = " << status << ".";
- return status;
- }
- return OK;
- }
-
- mMutex.lock();
- sp<IGraphicBufferProducer> outputIgbp = mIgbp;
- uint32_t outputGeneration = mGeneration;
- uint64_t outputBqId = mBqId;
- mMutex.unlock();
-
- if (!outputIgbp) {
- LOG(VERBOSE) << "outputBuffer -- output surface is null.";
- return NO_INIT;
- }
-
- if (!display) {
- LOG(WARNING) << "outputBuffer -- cannot display "
- "bufferqueue-based block to the bufferqueue.";
- return UNKNOWN_ERROR;
- }
- if (bqId != outputBqId || generation != outputGeneration) {
- int32_t diff = (int32_t) outputGeneration - (int32_t) generation;
- LOG(WARNING) << "outputBuffer -- buffers from old generation to "
- << outputGeneration << " , diff: " << diff
- << " , slot: " << bqSlot;
- return DEAD_OBJECT;
- }
-
- status_t status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
- input, output);
- if (status != OK) {
- LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
- "on bufferqueue-based block. "
- "Error = " << status << ".";
- return status;
- }
- return OK;
- }
-
- Impl *getPtr() {
- return this;
- }
-
- ~Impl() {}
-};
-
-OutputBufferQueue::OutputBufferQueue(): mImpl(new Impl()) {}
-
-OutputBufferQueue::~OutputBufferQueue() {}
-
-bool OutputBufferQueue::configure(const sp<IGraphicBufferProducer>& igbp,
- uint32_t generation,
- uint64_t bqId) {
- return mImpl && mImpl->configure(igbp, generation, bqId);
-}
-
-status_t OutputBufferQueue::outputBuffer(
- const C2ConstGraphicBlock& block,
- const BnGraphicBufferProducer::QueueBufferInput& input,
- BnGraphicBufferProducer::QueueBufferOutput* output) {
- if (mImpl) {
- return mImpl->outputBuffer(block, input, output);
- }
- return DEAD_OBJECT;
-}
-
-void OutputBufferQueue::holdBufferQueueBlocks(
- const std::list<std::unique_ptr<C2Work>>& workList) {
- if (!mImpl) {
- return;
- }
- forEachBlock(workList,
- std::bind(&OutputBufferQueue::Impl::registerBuffer,
- mImpl->getPtr(), std::placeholders::_1));
-}
-
-} // namespace utils
-} // namespace V1_0
-} // namespace c2
-} // namespace media
-} // namespace hardware
-} // namespace android
-
diff --git a/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp b/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp
new file mode 100644
index 0000000..c4a72ef
--- /dev/null
+++ b/media/codec2/hidl/1.0/utils/OutputBufferQueue.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Codec2-OutputBufferQueue"
+#include <android-base/logging.h>
+
+#include <android/hardware/graphics/bufferqueue/2.0/IGraphicBufferProducer.h>
+#include <codec2/hidl/1.0/OutputBufferQueue.h>
+#include <gui/bufferqueue/2.0/B2HGraphicBufferProducer.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2BlockInternal.h>
+#include <C2Buffer.h>
+#include <C2PlatformSupport.h>
+
+#include <iomanip>
+
+namespace android {
+namespace hardware {
+namespace media {
+namespace c2 {
+namespace V1_0 {
+namespace utils {
+
+using HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
+ V2_0::IGraphicBufferProducer;
+using B2HGraphicBufferProducer = ::android::hardware::graphics::bufferqueue::
+ V2_0::utils::B2HGraphicBufferProducer;
+
+namespace /* unnamed */ {
+
+// Create a GraphicBuffer object from a graphic block.
+sp<GraphicBuffer> createGraphicBuffer(const C2ConstGraphicBlock& block) {
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ uint64_t usage;
+ uint32_t stride;
+ uint32_t generation;
+ uint64_t bqId;
+ int32_t bqSlot;
+ _UnwrapNativeCodec2GrallocMetadata(
+ block.handle(), &width, &height, &format, &usage,
+ &stride, &generation, &bqId, reinterpret_cast<uint32_t*>(&bqSlot));
+ native_handle_t *grallocHandle =
+ UnwrapNativeCodec2GrallocHandle(block.handle());
+ sp<GraphicBuffer> graphicBuffer =
+ new GraphicBuffer(grallocHandle,
+ GraphicBuffer::CLONE_HANDLE,
+ width, height, format,
+ 1, usage, stride);
+ native_handle_delete(grallocHandle);
+ return graphicBuffer;
+}
+
+template <typename BlockProcessor>
+void forEachBlock(C2FrameData& frameData,
+ BlockProcessor process) {
+ for (const std::shared_ptr<C2Buffer>& buffer : frameData.buffers) {
+ if (buffer) {
+ for (const C2ConstGraphicBlock& block :
+ buffer->data().graphicBlocks()) {
+ process(block);
+ }
+ }
+ }
+}
+
+template <typename BlockProcessor>
+void forEachBlock(const std::list<std::unique_ptr<C2Work>>& workList,
+ BlockProcessor process) {
+ for (const std::unique_ptr<C2Work>& work : workList) {
+ if (!work) {
+ continue;
+ }
+ for (const std::unique_ptr<C2Worklet>& worklet : work->worklets) {
+ if (worklet) {
+ forEachBlock(worklet->output, process);
+ }
+ }
+ }
+}
+
+sp<HGraphicBufferProducer> getHgbp(const sp<IGraphicBufferProducer>& igbp) {
+ sp<HGraphicBufferProducer> hgbp =
+ igbp->getHalInterface<HGraphicBufferProducer>();
+ return hgbp ? hgbp :
+ new B2HGraphicBufferProducer(igbp);
+}
+
+status_t attachToBufferQueue(const C2ConstGraphicBlock& block,
+ const sp<IGraphicBufferProducer>& igbp,
+ uint32_t generation,
+ int32_t* bqSlot) {
+ if (!igbp) {
+ LOG(WARNING) << "attachToBufferQueue -- null producer.";
+ return NO_INIT;
+ }
+
+ sp<GraphicBuffer> graphicBuffer = createGraphicBuffer(block);
+ graphicBuffer->setGenerationNumber(generation);
+
+ LOG(VERBOSE) << "attachToBufferQueue -- attaching buffer:"
+ << " block dimension " << block.width() << "x"
+ << block.height()
+ << ", graphicBuffer dimension " << graphicBuffer->getWidth() << "x"
+ << graphicBuffer->getHeight()
+ << std::hex << std::setfill('0')
+ << ", format 0x" << std::setw(8) << graphicBuffer->getPixelFormat()
+ << ", usage 0x" << std::setw(16) << graphicBuffer->getUsage()
+ << std::dec << std::setfill(' ')
+ << ", stride " << graphicBuffer->getStride()
+ << ", generation " << graphicBuffer->getGenerationNumber();
+
+ status_t result = igbp->attachBuffer(bqSlot, graphicBuffer);
+ if (result != OK) {
+ LOG(WARNING) << "attachToBufferQueue -- attachBuffer failed: "
+ "status = " << result << ".";
+ return result;
+ }
+ LOG(VERBOSE) << "attachToBufferQueue -- attachBuffer returned slot #"
+ << *bqSlot << ".";
+ return OK;
+}
+
+bool getBufferQueueAssignment(const C2ConstGraphicBlock& block,
+ uint32_t* generation,
+ uint64_t* bqId,
+ int32_t* bqSlot) {
+ return _C2BlockFactory::GetBufferQueueData(
+ _C2BlockFactory::GetGraphicBlockPoolData(block),
+ generation, bqId, bqSlot);
+}
+
+} // unnamed namespace
+
+OutputBufferQueue::OutputBufferQueue()
+ : mGeneration{0}, mBqId{0} {
+}
+
+OutputBufferQueue::~OutputBufferQueue() {
+}
+
+bool OutputBufferQueue::configure(const sp<IGraphicBufferProducer>& igbp,
+ uint32_t generation,
+ uint64_t bqId) {
+ size_t tryNum = 0;
+ size_t success = 0;
+ sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
+ std::weak_ptr<_C2BlockPoolData>
+ poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+ {
+ std::scoped_lock<std::mutex> l(mMutex);
+ if (generation == mGeneration) {
+ return false;
+ }
+ mIgbp = igbp;
+ mGeneration = generation;
+ mBqId = bqId;
+ mOwner = std::make_shared<int>(0);
+ for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
+ if (mBqId == 0 || !mBuffers[i]) {
+ continue;
+ }
+ std::shared_ptr<_C2BlockPoolData> data = mPoolDatas[i].lock();
+ if (!data ||
+ !_C2BlockFactory::BeginAttachBlockToBufferQueue(data)) {
+ continue;
+ }
+ ++tryNum;
+ int bqSlot;
+ mBuffers[i]->setGenerationNumber(generation);
+ status_t result = igbp->attachBuffer(&bqSlot, mBuffers[i]);
+ if (result != OK) {
+ continue;
+ }
+ bool attach =
+ _C2BlockFactory::EndAttachBlockToBufferQueue(
+ data, mOwner, getHgbp(mIgbp),
+ generation, bqId, bqSlot);
+ if (!attach) {
+ igbp->cancelBuffer(bqSlot, Fence::NO_FENCE);
+ continue;
+ }
+ buffers[bqSlot] = mBuffers[i];
+ poolDatas[bqSlot] = data;
+ ++success;
+ }
+ for (int i = 0; i < BufferQueueDefs::NUM_BUFFER_SLOTS; ++i) {
+ mBuffers[i] = buffers[i];
+ mPoolDatas[i] = poolDatas[i];
+ }
+ }
+ ALOGD("remote graphic buffer migration %zu/%zu", success, tryNum);
+ return true;
+}
+
+bool OutputBufferQueue::registerBuffer(const C2ConstGraphicBlock& block) {
+ std::shared_ptr<_C2BlockPoolData> data =
+ _C2BlockFactory::GetGraphicBlockPoolData(block);
+ if (!data) {
+ return false;
+ }
+ std::scoped_lock<std::mutex> l(mMutex);
+
+ if (!mIgbp) {
+ return false;
+ }
+
+ uint32_t oldGeneration;
+ uint64_t oldId;
+ int32_t oldSlot;
+ // If the block is not bufferqueue-based, do nothing.
+ if (!_C2BlockFactory::GetBufferQueueData(
+ data, &oldGeneration, &oldId, &oldSlot) || (oldId == 0)) {
+ return false;
+ }
+ // If the block's bqId is the same as the desired bqId, just hold.
+ if ((oldId == mBqId) && (oldGeneration == mGeneration)) {
+ LOG(VERBOSE) << "holdBufferQueueBlock -- import without attaching:"
+ << " bqId " << oldId
+ << ", bqSlot " << oldSlot
+ << ", generation " << mGeneration
+ << ".";
+ _C2BlockFactory::HoldBlockFromBufferQueue(data, mOwner, getHgbp(mIgbp));
+ mPoolDatas[oldSlot] = data;
+ mBuffers[oldSlot] = createGraphicBuffer(block);
+ mBuffers[oldSlot]->setGenerationNumber(mGeneration);
+ return true;
+ }
+ int32_t d = (int32_t) mGeneration - (int32_t) oldGeneration;
+ LOG(WARNING) << "receiving stale buffer: generation "
+ << mGeneration << " , diff " << d << " : slot "
+ << oldSlot;
+ return false;
+}
+
+status_t OutputBufferQueue::outputBuffer(
+ const C2ConstGraphicBlock& block,
+ const BnGraphicBufferProducer::QueueBufferInput& input,
+ BnGraphicBufferProducer::QueueBufferOutput* output) {
+ uint32_t generation;
+ uint64_t bqId;
+ int32_t bqSlot;
+ bool display = displayBufferQueueBlock(block);
+ if (!getBufferQueueAssignment(block, &generation, &bqId, &bqSlot) ||
+ bqId == 0) {
+ // Block not from bufferqueue -- it must be attached before queuing.
+
+ mMutex.lock();
+ sp<IGraphicBufferProducer> outputIgbp = mIgbp;
+ uint32_t outputGeneration = mGeneration;
+ mMutex.unlock();
+
+ status_t status = attachToBufferQueue(
+ block, outputIgbp, outputGeneration, &bqSlot);
+ if (status != OK) {
+ LOG(WARNING) << "outputBuffer -- attaching failed.";
+ return INVALID_OPERATION;
+ }
+
+ status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
+ input, output);
+ if (status != OK) {
+ LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
+ "on non-bufferqueue-based block. "
+ "Error = " << status << ".";
+ return status;
+ }
+ return OK;
+ }
+
+ mMutex.lock();
+ sp<IGraphicBufferProducer> outputIgbp = mIgbp;
+ uint32_t outputGeneration = mGeneration;
+ uint64_t outputBqId = mBqId;
+ mMutex.unlock();
+
+ if (!outputIgbp) {
+ LOG(VERBOSE) << "outputBuffer -- output surface is null.";
+ return NO_INIT;
+ }
+
+ if (!display) {
+ LOG(WARNING) << "outputBuffer -- cannot display "
+ "bufferqueue-based block to the bufferqueue.";
+ return UNKNOWN_ERROR;
+ }
+ if (bqId != outputBqId || generation != outputGeneration) {
+ int32_t diff = (int32_t) outputGeneration - (int32_t) generation;
+ LOG(WARNING) << "outputBuffer -- buffers from old generation to "
+ << outputGeneration << " , diff: " << diff
+ << " , slot: " << bqSlot;
+ return DEAD_OBJECT;
+ }
+
+ status_t status = outputIgbp->queueBuffer(static_cast<int>(bqSlot),
+ input, output);
+ if (status != OK) {
+ LOG(ERROR) << "outputBuffer -- queueBuffer() failed "
+ "on bufferqueue-based block. "
+ "Error = " << status << ".";
+ return status;
+ }
+ return OK;
+}
+
+void OutputBufferQueue::holdBufferQueueBlocks(
+ const std::list<std::unique_ptr<C2Work>>& workList) {
+ forEachBlock(workList,
+ std::bind(&OutputBufferQueue::registerBuffer,
+ this, std::placeholders::_1));
+}
+
+} // namespace utils
+} // namespace V1_0
+} // namespace c2
+} // namespace media
+} // namespace hardware
+} // namespace android
+
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
similarity index 78%
rename from media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h
rename to media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
index 0a2298c..80368f7 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/ClientBlockHelper.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/OutputBufferQueue.h
@@ -14,13 +14,15 @@
* limitations under the License.
*/
-#ifndef CLIENT_BLOCK_HELPER_H
-#define CLIENT_BLOCK_HELPER_H
+#ifndef CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
+#define CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
#include <gui/IGraphicBufferProducer.h>
#include <codec2/hidl/1.0/types.h>
#include <C2Work.h>
+struct C2_HIDE _C2BlockPoolData;
+
namespace android {
namespace hardware {
namespace media {
@@ -61,8 +63,16 @@
private:
- class Impl;
- std::unique_ptr<Impl> mImpl;
+ std::mutex mMutex;
+ sp<IGraphicBufferProducer> mIgbp;
+ uint32_t mGeneration;
+ uint64_t mBqId;
+ std::shared_ptr<int> mOwner;
+ // To migrate existing buffers
+ sp<GraphicBuffer> mBuffers[BufferQueueDefs::NUM_BUFFER_SLOTS]; // find a better way
+ std::weak_ptr<_C2BlockPoolData> mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+
+ bool registerBuffer(const C2ConstGraphicBlock& block);
};
} // namespace utils
@@ -72,4 +82,4 @@
} // namespace hardware
} // namespace android
-#endif // CLIENT_BLOCK_HELPER_H
+#endif // CODEC2_HIDL_V1_0_UTILS_OUTPUT_BUFFER_QUEUE
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
index 6469735..a8a552c 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHalMediaC2V1_0TargetAudioDecTest.cpp
@@ -547,6 +547,10 @@
if (mCompName == raw) {
bitStreamInfo[0] = 8000;
bitStreamInfo[1] = 1;
+ } else if (mCompName == g711alaw || mCompName == g711mlaw) {
+ // g711 test data is all 1-channel and has no embedded config info.
+ bitStreamInfo[0] = 8000;
+ bitStreamInfo[1] = 1;
} else {
ASSERT_NO_FATAL_FAILURE(
getInputChannelInfo(mComponent, mCompName, bitStreamInfo));
diff --git a/media/codec2/hidl/1.0/vts/functional/common/README.md b/media/codec2/hidl/1.0/vts/functional/common/README.md
index 50e8356..f2f579c 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/README.md
+++ b/media/codec2/hidl/1.0/vts/functional/common/README.md
@@ -1,31 +1,36 @@
-## Codec2 VTS Hal @ 1.0 tests ##
----
-#### master :
+# Codec2 VTS Hal @ 1.0 tests #
+
+## master :
Functionality of master is to enumerate all the Codec2 components available in C2 media service.
-usage: VtsHalMediaC2V1\_0TargetMasterTest -I default
+usage: `VtsHalMediaC2V1_0TargetMasterTest -I default`
-#### component :
+## component :
Functionality of component test is to validate common functionality across all the Codec2 components available in C2 media service. For a standard C2 component, these tests are expected to pass.
-usage: VtsHalMediaC2V1\_0TargetComponentTest -I software -C <comp name>
-example: VtsHalMediaC2V1\_0TargetComponentTest -I software -C c2.android.vorbis.decoder
+usage: `VtsHalMediaC2V1_0TargetComponentTest -I software -C <comp name>`
-#### audio :
-Functionality of audio test is to validate audio specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
+example: `VtsHalMediaC2V1_0TargetComponentTest -I software -C c2.android.vorbis.decoder`
-usage: VtsHalMediaC2V1\_0TargetAudioDecTest -I default -C <comp name> -P /sdcard/media/
-usage: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C <comp name> -P /sdcard/media/
+## audio :
+Functionality of audio test is to validate audio specific functionality of Codec2 components. The resource files for this test are taken from `frameworks/av/media/codec2/hidl/1.0/vts/functional/res`. The path to these files on the device can be specified with `-P`. (If the device path is omitted, `/data/local/tmp/media/` is the default value.)
-example: VtsHalMediaC2V1\_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /sdcard/media/
-example: VtsHalMediaC2V1\_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /sdcard/media/
+usage: `VtsHalMediaC2V1_0TargetAudioDecTest -I default -C <comp name> -P <path to resource files>`
-#### video :
-Functionality of video test is to validate video specific functionality Codec2 components. The resource files for this test are taken from media/codec2/hidl/1.0/vts/functional/res. The path to these files on the device is required to be given for bitstream tests.
+usage: `VtsHalMediaC2V1_0TargetAudioEncTest -I software -C <comp name> -P <path to resource files>`
-usage: VtsHalMediaC2V1\_0TargetVideoDecTest -I default -C <comp name> -P /sdcard/media/
-usage: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C <comp name> -P /sdcard/media/
+example: `VtsHalMediaC2V1_0TargetAudioDecTest -I software -C c2.android.flac.decoder -P /data/local/tmp/media/`
-example: VtsHalMediaC2V1\_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /sdcard/media/
-example: VtsHalMediaC2V1\_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /sdcard/media/
+example: `VtsHalMediaC2V1_0TargetAudioEncTest -I software -C c2.android.opus.encoder -P /data/local/tmp/media/`
+
+## video :
+Functionality of video test is to validate video specific functionality of Codec2 components. The resource files for this test are taken from `frameworks/av/media/codec2/hidl/1.0/vts/functional/res`. The path to these files on the device can be specified with `-P`. (If the device path is omitted, `/data/local/tmp/media/` is the default value.)
+
+usage: `VtsHalMediaC2V1_0TargetVideoDecTest -I default -C <comp name> -P <path to resource files>`
+
+usage: `VtsHalMediaC2V1_0TargetVideoEncTest -I software -C <comp name> -P <path to resource files>`
+
+example: `VtsHalMediaC2V1_0TargetVideoDecTest -I software -C c2.android.avc.decoder -P /data/local/tmp/media/`
+
+example: `VtsHalMediaC2V1_0TargetVideoEncTest -I software -C c2.android.vp9.encoder -P /data/local/tmp/media/`
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
index c577dac..db59e54 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.h
@@ -118,7 +118,7 @@
registerTestService<IComponentStore>();
}
- ComponentTestEnvironment() : res("/sdcard/media/") {}
+ ComponentTestEnvironment() : res("/data/local/tmp/media/") {}
void setComponent(const char* _component) { component = _component; }
diff --git a/media/codec2/hidl/client/Android.bp b/media/codec2/hidl/client/Android.bp
index 6038a40..89c1c4a 100644
--- a/media/codec2/hidl/client/Android.bp
+++ b/media/codec2/hidl/client/Android.bp
@@ -17,9 +17,8 @@
"libcutils",
"libgui",
"libhidlbase",
- "libhidltransport",
"liblog",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libui",
"libutils",
],
diff --git a/media/codec2/hidl/client/client.cpp b/media/codec2/hidl/client/client.cpp
index 5ed54f1..c747190 100644
--- a/media/codec2/hidl/client/client.cpp
+++ b/media/codec2/hidl/client/client.cpp
@@ -125,6 +125,9 @@
if (!mClient) {
mClient = Codec2Client::_CreateFromIndex(mIndex);
}
+ CHECK(mClient) << "Failed to create Codec2Client to service \""
+ << GetServiceNames()[mIndex] << "\". (Index = "
+ << mIndex << ").";
return mClient;
}
@@ -832,6 +835,7 @@
c2_status_t Codec2Client::ForAllServices(
const std::string &key,
+ size_t numberOfAttempts,
std::function<c2_status_t(const std::shared_ptr<Codec2Client>&)>
predicate) {
c2_status_t status = C2_NO_INIT; // no IComponentStores present
@@ -860,33 +864,45 @@
for (size_t index : indices) {
Cache& cache = Cache::List()[index];
- std::shared_ptr<Codec2Client> client{cache.getClient()};
- if (client) {
+ for (size_t tries = numberOfAttempts; tries > 0; --tries) {
+ std::shared_ptr<Codec2Client> client{cache.getClient()};
status = predicate(client);
if (status == C2_OK) {
std::scoped_lock lock{key2IndexMutex};
key2Index[key] = index; // update last known client index
return C2_OK;
+ } else if (status == C2_TRANSACTION_FAILED) {
+ LOG(WARNING) << "\"" << key << "\" failed for service \""
+ << client->getName()
+ << "\" due to transaction failure. "
+ << "(Service may have crashed.)"
+ << (tries > 1 ? " Retrying..." : "");
+ cache.invalidate();
+ continue;
}
- }
- if (wasMapped) {
- LOG(INFO) << "Could not find \"" << key << "\""
- " in the last instance. Retrying...";
- wasMapped = false;
- cache.invalidate();
+ if (wasMapped) {
+ LOG(INFO) << "\"" << key << "\" became invalid in service \""
+ << client->getName() << "\". Retrying...";
+ wasMapped = false;
+ }
+ break;
}
}
- return status; // return the last status from a valid client
+ return status; // return the last status from a valid client
}
std::shared_ptr<Codec2Client::Component>
Codec2Client::CreateComponentByName(
const char* componentName,
const std::shared_ptr<Listener>& listener,
- std::shared_ptr<Codec2Client>* owner) {
+ std::shared_ptr<Codec2Client>* owner,
+ size_t numberOfAttempts) {
+ std::string key{"create:"};
+ key.append(componentName);
std::shared_ptr<Component> component;
c2_status_t status = ForAllServices(
- componentName,
+ key,
+ numberOfAttempts,
[owner, &component, componentName, &listener](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
@@ -907,8 +923,9 @@
return status;
});
if (status != C2_OK) {
- LOG(DEBUG) << "Could not create component \"" << componentName << "\". "
- "Status = " << status << ".";
+ LOG(DEBUG) << "Failed to create component \"" << componentName
+ << "\" from all known services. "
+ "Last returned status = " << status << ".";
}
return component;
}
@@ -916,10 +933,14 @@
std::shared_ptr<Codec2Client::Interface>
Codec2Client::CreateInterfaceByName(
const char* interfaceName,
- std::shared_ptr<Codec2Client>* owner) {
+ std::shared_ptr<Codec2Client>* owner,
+ size_t numberOfAttempts) {
+ std::string key{"create:"};
+ key.append(interfaceName);
std::shared_ptr<Interface> interface;
c2_status_t status = ForAllServices(
- interfaceName,
+ key,
+ numberOfAttempts,
[owner, &interface, interfaceName](
const std::shared_ptr<Codec2Client> &client)
-> c2_status_t {
@@ -939,8 +960,9 @@
return status;
});
if (status != C2_OK) {
- LOG(DEBUG) << "Could not create interface \"" << interfaceName << "\". "
- "Status = " << status << ".";
+ LOG(DEBUG) << "Failed to create interface \"" << interfaceName
+ << "\" from all known services. "
+ "Last returned status = " << status << ".";
}
return interface;
}
diff --git a/media/codec2/hidl/client/include/codec2/hidl/client.h b/media/codec2/hidl/client/include/codec2/hidl/client.h
index b8a7fb5..dca28f7 100644
--- a/media/codec2/hidl/client/include/codec2/hidl/client.h
+++ b/media/codec2/hidl/client/include/codec2/hidl/client.h
@@ -18,7 +18,7 @@
#define CODEC2_HIDL_CLIENT_H
#include <gui/IGraphicBufferProducer.h>
-#include <codec2/hidl/1.0/ClientBlockHelper.h>
+#include <codec2/hidl/1.0/OutputBufferQueue.h>
#include <C2PlatformSupport.h>
#include <C2Component.h>
#include <C2Buffer.h>
@@ -179,17 +179,21 @@
static std::vector<std::shared_ptr<Codec2Client>> CreateFromAllServices();
// Try to create a component with a given name from all known
- // IComponentStore services.
+ // IComponentStore services. numberOfAttempts determines the number of times
+ // to retry the HIDL call if the transaction fails.
static std::shared_ptr<Component> CreateComponentByName(
char const* componentName,
std::shared_ptr<Listener> const& listener,
- std::shared_ptr<Codec2Client>* owner = nullptr);
+ std::shared_ptr<Codec2Client>* owner = nullptr,
+ size_t numberOfAttempts = 10);
// Try to create a component interface with a given name from all known
- // IComponentStore services.
+ // IComponentStore services. numberOfAttempts determines the number of times
+ // to retry the HIDL call if the transaction fails.
static std::shared_ptr<Interface> CreateInterfaceByName(
char const* interfaceName,
- std::shared_ptr<Codec2Client>* owner = nullptr);
+ std::shared_ptr<Codec2Client>* owner = nullptr,
+ size_t numberOfAttempts = 10);
// List traits from all known IComponentStore services.
static std::vector<C2Component::Traits> const& ListComponents();
@@ -204,11 +208,25 @@
protected:
sp<Base> mBase;
- // Finds the first store where the predicate returns OK, and returns the last
- // predicate result. Uses key to remember the last store found, and if cached,
- // it tries that store before trying all stores (one retry).
+ // Finds the first store where the predicate returns C2_OK and returns the
+ // last predicate result. The predicate will be tried on all stores. The
+ // function will return C2_OK the first time the predicate returns C2_OK,
+ // or it will return the value from the last time that predicate is tried.
+ // (The latter case corresponds to a failure on every store.) The order of
+ // the stores to try is the same as the return value of GetServiceNames().
+ //
+ // key is used to remember the last store with which the predicate last
+ // succeeded. If the last successful store is cached, it will be tried
+ // first before all the stores are tried. Note that the last successful
+ // store will be tried twice---first before all the stores, and another time
+ // with all the stores.
+ //
+ // If an attempt to evaluate the predicate results in a transaction failure,
+ // repeated attempts will be made until the predicate returns without a
+ // transaction failure or numberOfAttempts attempts have been made.
static c2_status_t ForAllServices(
const std::string& key,
+ size_t numberOfAttempts,
std::function<c2_status_t(std::shared_ptr<Codec2Client> const&)>
predicate);
diff --git a/media/codec2/hidl/services/Android.bp b/media/codec2/hidl/services/Android.bp
index 216525e..0403a1f 100644
--- a/media/codec2/hidl/services/Android.bp
+++ b/media/codec2/hidl/services/Android.bp
@@ -17,8 +17,6 @@
"libcodec2_hidl@1.0",
"libcodec2_vndk",
"libhidlbase",
- "libhidltransport",
- "libhwbinder",
"liblog",
"libstagefright_omx",
"libstagefright_xmlparser",
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 9c84c71..e174751 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -9,7 +9,6 @@
"CCodecConfig.cpp",
"Codec2Buffer.cpp",
"Codec2InfoBuilder.cpp",
- "Omx2IGraphicBufferSource.cpp",
"PipelineWatcher.cpp",
"ReflectedParamUpdater.cpp",
"SkipCutBuffer.cpp",
@@ -22,16 +21,16 @@
header_libs: [
"libcodec2_internal",
+ "libmediadrm_headers",
+ "media_ndk_headers",
],
shared_libs: [
"android.hardware.cas.native@1.0",
- "android.hardware.graphics.bufferqueue@1.0",
"android.hardware.media.c2@1.0",
"android.hardware.media.omx@1.0",
"libbase",
"libbinder",
- "libcodec2",
"libcodec2_client",
"libcodec2_vndk",
"libcutils",
@@ -39,14 +38,13 @@
"libhidlallocatorutils",
"libhidlbase",
"liblog",
- "libmedia",
+ "libmedia_codeclist",
"libmedia_omx",
"libsfplugin_ccodec_utils",
"libstagefright_bufferqueue_helper",
"libstagefright_codecbase",
"libstagefright_foundation",
"libstagefright_omx",
- "libstagefright_omx_utils",
"libstagefright_xmlparser",
"libui",
"libutils",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 8223273..1cbbbb8 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -26,7 +26,6 @@
#include <C2ParamInternal.h>
#include <C2PlatformSupport.h>
-#include <android/IGraphicBufferSource.h>
#include <android/IOMXBufferSource.h>
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmx.h>
@@ -35,8 +34,11 @@
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <gui/bufferqueue/1.0/H2BGraphicBufferProducer.h>
-#include <media/omx/1.0/WGraphicBufferSource.h>
+#include <media/omx/1.0/WOmxNode.h>
+#include <media/openmax/OMX_Core.h>
#include <media/openmax/OMX_IndexExt.h>
+#include <media/stagefright/omx/1.0/WGraphicBufferSource.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/PersistentSurface.h>
@@ -45,7 +47,6 @@
#include "CCodec.h"
#include "CCodecBufferChannel.h"
#include "InputSurfaceWrapper.h"
-#include "Omx2IGraphicBufferSource.h"
extern "C" android::PersistentSurface *CreateInputSurface();
@@ -54,9 +55,10 @@
using namespace std::chrono_literals;
using ::android::hardware::graphics::bufferqueue::V1_0::utils::H2BGraphicBufferProducer;
using android::base::StringPrintf;
-using BGraphicBufferSource = ::android::IGraphicBufferSource;
using ::android::hardware::media::c2::V1_0::IInputSurface;
+typedef hardware::media::omx::V1_0::IGraphicBufferSource HGraphicBufferSource;
+
namespace {
class CCodecWatchdog : public AHandler {
@@ -180,9 +182,10 @@
class GraphicBufferSourceWrapper : public InputSurfaceWrapper {
public:
-// explicit GraphicBufferSourceWrapper(const sp<BGraphicBufferSource> &source) : mSource(source) {}
+ typedef hardware::media::omx::V1_0::Status OmxStatus;
+
GraphicBufferSourceWrapper(
- const sp<BGraphicBufferSource> &source,
+ const sp<HGraphicBufferSource> &source,
uint32_t width,
uint32_t height,
uint64_t usage)
@@ -194,6 +197,7 @@
status_t connect(const std::shared_ptr<Codec2Client::Component> &comp) override {
mNode = new C2OMXNode(comp);
+ mOmxNode = new hardware::media::omx::V1_0::utils::TWOmxNode(mNode);
mNode->setFrameSize(mWidth, mHeight);
// Usage is queried during configure(), so setting it beforehand.
@@ -204,7 +208,8 @@
// NOTE: we do not use/pass through color aspects from GraphicBufferSource as we
// communicate that directly to the component.
- mSource->configure(mNode, mDataSpace);
+ mSource->configure(
+ mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace));
return OK;
}
@@ -220,21 +225,16 @@
source->onOmxIdle();
source->onOmxLoaded();
mNode.clear();
+ mOmxNode.clear();
}
- status_t GetStatus(const binder::Status &status) {
- status_t err = OK;
- if (!status.isOk()) {
- err = status.serviceSpecificErrorCode();
- if (err == OK) {
- err = status.transactionError();
- if (err == OK) {
- // binder status failed, but there is no servie or transaction error
- err = UNKNOWN_ERROR;
- }
- }
+ status_t GetStatus(hardware::Return<OmxStatus> &&status) {
+ if (status.isOk()) {
+ return static_cast<status_t>(status.withDefault(OmxStatus::UNKNOWN_ERROR));
+ } else if (status.isDeadObject()) {
+ return DEAD_OBJECT;
}
- return err;
+ return UNKNOWN_ERROR;
}
status_t start() override {
@@ -359,7 +359,15 @@
err = res;
} else {
status << " delayUs";
- res = GetStatus(mSource->getStopTimeOffsetUs(&config.mInputDelayUs));
+ hardware::Return<void> trans = mSource->getStopTimeOffsetUs(
+ [&res, &delayUs = config.mInputDelayUs](
+ auto status, auto stopTimeOffsetUs) {
+ res = static_cast<status_t>(status);
+ delayUs = stopTimeOffsetUs;
+ });
+ if (!trans.isOk()) {
+ res = trans.isDeadObject() ? DEAD_OBJECT : UNKNOWN_ERROR;
+ }
if (res != OK) {
status << " (=> " << asString(res) << ")";
} else {
@@ -388,8 +396,9 @@
}
private:
- sp<BGraphicBufferSource> mSource;
+ sp<HGraphicBufferSource> mSource;
sp<C2OMXNode> mNode;
+ sp<hardware::media::omx::V1_0::IOmxNode> mOmxNode;
uint32_t mWidth;
uint32_t mHeight;
Config mConfig;
@@ -814,9 +823,17 @@
}
{
- double value;
- if (msg->findDouble("time-lapse-fps", &value)) {
- config->mISConfig->mCaptureFps = value;
+ bool captureFpsFound = false;
+ double timeLapseFps;
+ float captureRate;
+ if (msg->findDouble("time-lapse-fps", &timeLapseFps)) {
+ config->mISConfig->mCaptureFps = timeLapseFps;
+ captureFpsFound = true;
+ } else if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
+ config->mISConfig->mCaptureFps = captureRate;
+ captureFpsFound = true;
+ }
+ if (captureFpsFound) {
(void)msg->findAsFloat(KEY_FRAME_RATE, &config->mISConfig->mCodedFps);
}
}
@@ -1084,9 +1101,7 @@
gbs = source;
});
if (transStatus.isOk() && s == OmxStatus::OK) {
- return new PersistentSurface(
- new H2BGraphicBufferProducer(gbp),
- sp<::android::IGraphicBufferSource>(new LWGraphicBufferSource(gbs)));
+ return new PersistentSurface(new H2BGraphicBufferProducer(gbp), gbs);
}
return nullptr;
@@ -1117,28 +1132,28 @@
}
sp<PersistentSurface> persistentSurface = CreateCompatibleInputSurface();
+ sp<hidl::base::V1_0::IBase> hidlTarget = persistentSurface->getHidlTarget();
+ sp<IInputSurface> hidlInputSurface = IInputSurface::castFrom(hidlTarget);
+ sp<HGraphicBufferSource> gbs = HGraphicBufferSource::castFrom(hidlTarget);
- if (persistentSurface->getHidlTarget()) {
- sp<IInputSurface> hidlInputSurface = IInputSurface::castFrom(
- persistentSurface->getHidlTarget());
- if (!hidlInputSurface) {
- ALOGE("Corrupted input surface");
- mCallback->onInputSurfaceCreationFailed(UNKNOWN_ERROR);
- return;
- }
+ if (hidlInputSurface) {
std::shared_ptr<Codec2Client::InputSurface> inputSurface =
std::make_shared<Codec2Client::InputSurface>(hidlInputSurface);
err = setupInputSurface(std::make_shared<C2InputSurfaceWrapper>(
inputSurface));
bufferProducer = inputSurface->getGraphicBufferProducer();
- } else {
+ } else if (gbs) {
int32_t width = 0;
(void)outputFormat->findInt32("width", &width);
int32_t height = 0;
(void)outputFormat->findInt32("height", &height);
err = setupInputSurface(std::make_shared<GraphicBufferSourceWrapper>(
- persistentSurface->getBufferSource(), width, height, usage));
+ gbs, width, height, usage));
bufferProducer = persistentSurface->getBufferProducer();
+ } else {
+ ALOGE("Corrupted input surface");
+ mCallback->onInputSurfaceCreationFailed(UNKNOWN_ERROR);
+ return;
}
if (err != OK) {
@@ -1204,15 +1219,10 @@
outputFormat = config->mOutputFormat;
usage = config->mISConfig ? config->mISConfig->mUsage : 0;
}
- auto hidlTarget = surface->getHidlTarget();
- if (hidlTarget) {
- sp<IInputSurface> inputSurface =
- IInputSurface::castFrom(hidlTarget);
- if (!inputSurface) {
- ALOGE("Failed to set input surface: Corrupted surface.");
- mCallback->onInputSurfaceDeclined(UNKNOWN_ERROR);
- return;
- }
+ sp<hidl::base::V1_0::IBase> hidlTarget = surface->getHidlTarget();
+ sp<IInputSurface> inputSurface = IInputSurface::castFrom(hidlTarget);
+ sp<HGraphicBufferSource> gbs = HGraphicBufferSource::castFrom(hidlTarget);
+ if (inputSurface) {
status_t err = setupInputSurface(std::make_shared<C2InputSurfaceWrapper>(
std::make_shared<Codec2Client::InputSurface>(inputSurface)));
if (err != OK) {
@@ -1220,18 +1230,22 @@
mCallback->onInputSurfaceDeclined(err);
return;
}
- } else {
+ } else if (gbs) {
int32_t width = 0;
(void)outputFormat->findInt32("width", &width);
int32_t height = 0;
(void)outputFormat->findInt32("height", &height);
status_t err = setupInputSurface(std::make_shared<GraphicBufferSourceWrapper>(
- surface->getBufferSource(), width, height, usage));
+ gbs, width, height, usage));
if (err != OK) {
ALOGE("Failed to set up input surface: %d", err);
mCallback->onInputSurfaceDeclined(err);
return;
}
+ } else {
+ ALOGE("Failed to set input surface: Corrupted surface.");
+ mCallback->onInputSurfaceDeclined(UNKNOWN_ERROR);
+ return;
}
mCallback->onInputSurfaceAccepted(inputFormat, outputFormat);
}
@@ -1859,6 +1873,7 @@
// Create Codec 2.0 input surface
extern "C" android::PersistentSurface *CreateInputSurface() {
using namespace android;
+ using ::android::hardware::media::omx::V1_0::implementation::TWGraphicBufferSource;
// Attempt to create a Codec2's input surface.
std::shared_ptr<Codec2Client::InputSurface> inputSurface =
Codec2Client::CreateInputSurface();
@@ -1872,9 +1887,7 @@
return nullptr;
}
return new PersistentSurface(
- gbs->getIGraphicBufferProducer(),
- sp<IGraphicBufferSource>(
- new Omx2IGraphicBufferSource(gbs)));
+ gbs->getIGraphicBufferProducer(), new TWGraphicBufferSource(gbs));
} else {
return nullptr;
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 0e1bb0a..d61b751 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -29,6 +29,7 @@
#include <android/hardware/cas/native/1.0/IDescrambler.h>
#include <android-base/stringprintf.h>
#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
#include <gui/Surface.h>
#include <media/openmax/OMX_Core.h>
#include <media/stagefright/foundation/ABuffer.h>
@@ -224,7 +225,7 @@
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
mInputMetEos(false) {
- mOutputSurface.lock()->maxDequeueBuffers = 2 * kSmoothnessFactor + kRenderingDepth;
+ mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
{
Mutexed<Input>::Locked input(mInput);
input->buffers.reset(new DummyInputBuffers(""));
@@ -948,8 +949,11 @@
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+ output->maxDequeueBuffers = numOutputSlots +
reorderDepth.value + kRenderingDepth;
+ if (!secure) {
+ output->maxDequeueBuffers += numInputSlots;
+ }
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
if (outputSurface) {
@@ -1329,14 +1333,18 @@
case C2PortReorderBufferDepthTuning::CORE_INDEX: {
C2PortReorderBufferDepthTuning::output reorderDepth;
if (reorderDepth.updateFrom(*param)) {
+ bool secure = mComponent->getName().find(".secure") != std::string::npos;
mReorderStash.lock()->setDepth(reorderDepth.value);
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
size_t numOutputSlots = mOutput.lock()->numSlots;
size_t numInputSlots = mInput.lock()->numSlots;
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers = numOutputSlots + numInputSlots +
+ output->maxDequeueBuffers = numOutputSlots +
reorderDepth.value + kRenderingDepth;
+ if (!secure) {
+ output->maxDequeueBuffers += numInputSlots;
+ }
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}
@@ -1380,6 +1388,7 @@
if (outputDelay.updateFrom(*param)) {
ALOGV("[%s] onWorkDone: updating output delay %u",
mName, outputDelay.value);
+ bool secure = mComponent->getName().find(".secure") != std::string::npos;
(void)mPipelineWatcher.lock()->outputDelay(outputDelay.value);
bool outputBuffersChanged = false;
@@ -1409,8 +1418,10 @@
uint32_t depth = mReorderStash.lock()->depth();
Mutexed<OutputSurface>::Locked output(mOutputSurface);
- output->maxDequeueBuffers = numOutputSlots + numInputSlots +
- depth + kRenderingDepth;
+ output->maxDequeueBuffers = numOutputSlots + depth + kRenderingDepth;
+ if (!secure) {
+ output->maxDequeueBuffers += numInputSlots;
+ }
if (output->surface) {
output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index ee3455d..c0fa138 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -29,7 +29,6 @@
#include <codec2/hidl/client.h>
#include <media/stagefright/foundation/Mutexed.h>
#include <media/stagefright/CodecBase.h>
-#include <media/ICrypto.h>
#include "CCodecBuffers.h"
#include "InputSurfaceWrapper.h"
diff --git a/media/codec2/sfplugin/CCodecBuffers.cpp b/media/codec2/sfplugin/CCodecBuffers.cpp
index 26c702d..ed8b832 100644
--- a/media/codec2/sfplugin/CCodecBuffers.cpp
+++ b/media/codec2/sfplugin/CCodecBuffers.cpp
@@ -878,9 +878,10 @@
switch (c2buffer->data().type()) {
case C2BufferData::LINEAR: {
uint32_t size = kLinearBufferSize;
- const C2ConstLinearBlock &block = c2buffer->data().linearBlocks().front();
- if (block.size() < kMaxLinearBufferSize / 2) {
- size = block.size() * 2;
+ const std::vector<C2ConstLinearBlock> &linear_blocks = c2buffer->data().linearBlocks();
+ const uint32_t block_size = linear_blocks.front().size();
+ if (block_size < kMaxLinearBufferSize / 2) {
+ size = block_size * 2;
} else {
size = kMaxLinearBufferSize;
}
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 5adcd94..ee3cdf6 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -823,6 +823,14 @@
add(ConfigMapper(C2_PARAMKEY_INPUT_TIME_STRETCH, C2_PARAMKEY_INPUT_TIME_STRETCH, "value"));
+ add(ConfigMapper(KEY_LOW_LATENCY, C2_PARAMKEY_LOW_LATENCY_MODE, "value")
+ .limitTo(D::DECODER & (D::CONFIG | D::PARAM))
+ .withMapper([](C2Value v) -> C2Value {
+ int32_t value = 0;
+ (void)v.get(&value);
+ return value == 0 ? C2_FALSE : C2_TRUE;
+ }));
+
/* still to do
constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 5c8ad56..b339a92 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -764,7 +764,11 @@
const std::shared_ptr<C2LinearBlock> &block,
const sp<IMemory> &memory,
int32_t heapSeqNum)
- : Codec2Buffer(format, new ABuffer(memory->pointer(), memory->size())),
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ : Codec2Buffer(format, new ABuffer(memory->unsecurePointer(), memory->size())),
mBlock(block),
mMemory(memory),
mHeapSeqNum(heapSeqNum) {
@@ -800,7 +804,7 @@
if (view.size() < length) {
return false;
}
- memcpy(view.data(), decrypted->pointer(), length);
+ memcpy(view.data(), decrypted->unsecurePointer(), length);
return true;
}
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index 36dcab9..6f87101 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -25,7 +25,7 @@
#include <media/hardware/VideoAPI.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/MediaCodecBuffer.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
namespace android {
diff --git a/media/codec2/sfplugin/tests/Android.bp b/media/codec2/sfplugin/tests/Android.bp
index be7f55c..b6eb2b4 100644
--- a/media/codec2/sfplugin/tests/Android.bp
+++ b/media/codec2/sfplugin/tests/Android.bp
@@ -33,6 +33,10 @@
"frameworks/av/media/codec2/sfplugin",
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"libbinder",
"libcodec2",
diff --git a/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp b/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp
index ba3687b..6deede0 100644
--- a/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp
+++ b/media/codec2/sfplugin/tests/MediaCodec_sanity_test.cpp
@@ -21,7 +21,7 @@
#include <binder/ProcessState.h>
#include <gtest/gtest.h>
#include <gui/Surface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include <media/hardware/VideoAPI.h>
#include <media/stagefright/MediaCodec.h>
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index ef6af48..2f3d688 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -190,6 +190,7 @@
{ C2Config::PROFILE_DV_HE_07, DolbyVisionProfileDvheDtb },
{ C2Config::PROFILE_DV_HE_08, DolbyVisionProfileDvheSt },
{ C2Config::PROFILE_DV_AV_09, DolbyVisionProfileDvavSe },
+ { C2Config::PROFILE_DV_AV1_10, DolbyVisionProfileDvav110 },
};
ALookup<C2Config::level_t, int32_t> sH263Levels = {
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index b6ddfab..4c529a6 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -51,12 +51,13 @@
shared_libs: [
"android.hardware.graphics.allocator@2.0",
"android.hardware.graphics.allocator@3.0",
+ "android.hardware.graphics.allocator@4.0",
"android.hardware.graphics.bufferqueue@2.0",
"android.hardware.graphics.mapper@2.0",
"android.hardware.graphics.mapper@3.0",
+ "android.hardware.graphics.mapper@4.0",
"android.hardware.media.bufferpool@2.0",
"libbase",
- "libbinder",
"libcutils",
"libdl",
"libhardware",
@@ -66,7 +67,7 @@
"liblog",
"libnativewindow",
"libstagefright_foundation",
- "libstagefright_bufferpool@2.0",
+ "libstagefright_bufferpool@2.0.1",
"libui",
"libutils",
],
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index af97e61..8fffa5e 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -22,6 +22,8 @@
#include <android/hardware/graphics/mapper/2.0/IMapper.h>
#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+#include <android/hardware/graphics/allocator/4.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
#include <cutils/native_handle.h>
#include <hardware/gralloc.h>
@@ -66,6 +68,7 @@
using ::android::hardware::graphics::common::V1_0::BufferUsage;
using PixelFormat2 = ::android::hardware::graphics::common::V1_0::PixelFormat;
using PixelFormat3 = ::android::hardware::graphics::common::V1_2::PixelFormat;
+using PixelFormat4 = ::android::hardware::graphics::common::V1_2::PixelFormat;
using IAllocator2 = ::android::hardware::graphics::allocator::V2_0::IAllocator;
using BufferDescriptor2 = ::android::hardware::graphics::mapper::V2_0::BufferDescriptor;
@@ -77,6 +80,11 @@
using Error3 = ::android::hardware::graphics::mapper::V3_0::Error;
using IMapper3 = ::android::hardware::graphics::mapper::V3_0::IMapper;
+using IAllocator4 = ::android::hardware::graphics::allocator::V4_0::IAllocator;
+using BufferDescriptor4 = ::android::hardware::graphics::mapper::V4_0::BufferDescriptor;
+using Error4 = ::android::hardware::graphics::mapper::V4_0::Error;
+using IMapper4 = ::android::hardware::graphics::mapper::V4_0::IMapper;
+
namespace /* unnamed */ {
struct BufferDescriptorInfo2 {
@@ -89,6 +97,11 @@
uint32_t stride;
};
+struct BufferDescriptorInfo4 {
+ IMapper4::BufferDescriptorInfo mapperInfo;
+ uint32_t stride;
+};
+
/* ===================================== GRALLOC ALLOCATION ==================================== */
c2_status_t maperr2error(Error2 maperr) {
switch (maperr) {
@@ -114,6 +127,18 @@
return C2_CORRUPTED;
}
+c2_status_t maperr2error(Error4 maperr) {
+ switch (maperr) {
+ case Error4::NONE: return C2_OK;
+ case Error4::BAD_DESCRIPTOR: return C2_BAD_VALUE;
+ case Error4::BAD_BUFFER: return C2_BAD_VALUE;
+ case Error4::BAD_VALUE: return C2_BAD_VALUE;
+ case Error4::NO_RESOURCES: return C2_NO_MEMORY;
+ case Error4::UNSUPPORTED: return C2_CANNOT_DO;
+ }
+ return C2_CORRUPTED;
+}
+
bool native_handle_is_invalid(const native_handle_t *const handle) {
// perform basic validation of a native handle
if (handle == nullptr) {
@@ -321,6 +346,12 @@
hidl_handle &hidlHandle,
const C2HandleGralloc *const handle,
C2Allocator::id_t allocatorId);
+ C2AllocationGralloc(
+ const BufferDescriptorInfo4 &info,
+ const sp<IMapper4> &mapper,
+ hidl_handle &hidlHandle,
+ const C2HandleGralloc *const handle,
+ C2Allocator::id_t allocatorId);
int dup() const;
c2_status_t status() const;
@@ -329,6 +360,8 @@
const sp<IMapper2> mMapper2{nullptr};
const BufferDescriptorInfo3 mInfo3{};
const sp<IMapper3> mMapper3{nullptr};
+ const BufferDescriptorInfo4 mInfo4{};
+ const sp<IMapper4> mMapper4{nullptr};
const hidl_handle mHidlHandle;
const C2HandleGralloc *mHandle;
buffer_handle_t mBuffer;
@@ -372,6 +405,23 @@
mAllocatorId(allocatorId) {
}
+C2AllocationGralloc::C2AllocationGralloc(
+ const BufferDescriptorInfo4 &info,
+ const sp<IMapper4> &mapper,
+ hidl_handle &hidlHandle,
+ const C2HandleGralloc *const handle,
+ C2Allocator::id_t allocatorId)
+ : C2GraphicAllocation(info.mapperInfo.width, info.mapperInfo.height),
+ mInfo4(info),
+ mMapper4(mapper),
+ mHidlHandle(std::move(hidlHandle)),
+ mHandle(handle),
+ mBuffer(nullptr),
+ mLockedHandle(nullptr),
+ mLocked(false),
+ mAllocatorId(allocatorId) {
+}
+
C2AllocationGralloc::~C2AllocationGralloc() {
if (mBuffer && mLocked) {
// implementation ignores addresss and rect
@@ -384,12 +434,18 @@
mBuffer)).isOk()) {
ALOGE("failed transaction: freeBuffer");
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->freeBuffer(const_cast<native_handle_t *>(
mBuffer)).isOk()) {
ALOGE("failed transaction: freeBuffer");
}
+ } else {
+ if (!mMapper4->freeBuffer(const_cast<native_handle_t *>(
+ mBuffer)).isOk()) {
+ ALOGE("failed transaction: freeBuffer");
+ }
}
+
}
if (mHandle) {
native_handle_delete(
@@ -435,7 +491,7 @@
ALOGE("failed transaction: importBuffer");
return C2_CORRUPTED;
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->importBuffer(
mHidlHandle, [&err, this](const auto &maperr, const auto &buffer) {
err = maperr2error(maperr);
@@ -446,6 +502,17 @@
ALOGE("failed transaction: importBuffer (@3.0)");
return C2_CORRUPTED;
}
+ } else {
+ if (!mMapper4->importBuffer(
+ mHidlHandle, [&err, this](const auto &maperr, const auto &buffer) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ mBuffer = static_cast<buffer_handle_t>(buffer);
+ }
+ }).isOk()) {
+ ALOGE("failed transaction: importBuffer (@4.0)");
+ return C2_CORRUPTED;
+ }
}
if (err != C2_OK) {
ALOGD("importBuffer failed: %d", err);
@@ -466,19 +533,29 @@
mBuffer, mInfo2.mapperInfo.width, mInfo2.mapperInfo.height,
(uint32_t)mInfo2.mapperInfo.format, mInfo2.mapperInfo.usage,
mInfo2.stride, generation, igbp_id, igbp_slot);
- } else {
+ } else if (mMapper3) {
mLockedHandle = C2HandleGralloc::WrapAndMoveNativeHandle(
mBuffer, mInfo3.mapperInfo.width, mInfo3.mapperInfo.height,
(uint32_t)mInfo3.mapperInfo.format, mInfo3.mapperInfo.usage,
mInfo3.stride, generation, igbp_id, igbp_slot);
+ } else {
+ mLockedHandle = C2HandleGralloc::WrapAndMoveNativeHandle(
+ mBuffer, mInfo4.mapperInfo.width, mInfo4.mapperInfo.height,
+ (uint32_t)mInfo4.mapperInfo.format, mInfo4.mapperInfo.usage,
+ mInfo4.stride, generation, igbp_id, igbp_slot);
}
}
- PixelFormat3 format = mMapper2 ?
- PixelFormat3(mInfo2.mapperInfo.format) :
- PixelFormat3(mInfo3.mapperInfo.format);
+ PixelFormat4 format;
+ if (mMapper2) {
+ format = PixelFormat4(mInfo2.mapperInfo.format);
+ } else if (mMapper3) {
+ format = PixelFormat4(mInfo3.mapperInfo.format);
+ } else {
+ format = PixelFormat4(mInfo4.mapperInfo.format);
+ }
switch (format) {
- case PixelFormat3::RGBA_1010102: {
+ case PixelFormat4::RGBA_1010102: {
// TRICKY: this is used for media as YUV444 in the case when it is queued directly to a
// Surface. In all other cases it is RGBA. We don't know which case it is here, so
// default to YUV for now.
@@ -500,7 +577,7 @@
ALOGE("failed transaction: lock(RGBA_1010102)");
return C2_CORRUPTED;
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->lock(
const_cast<native_handle_t *>(mBuffer),
grallocUsage,
@@ -520,6 +597,26 @@
ALOGE("failed transaction: lock(RGBA_1010102) (@3.0)");
return C2_CORRUPTED;
}
+ } else {
+ if (!mMapper4->lock(
+ const_cast<native_handle_t *>(mBuffer),
+ grallocUsage,
+ { (int32_t)rect.left, (int32_t)rect.top,
+ (int32_t)rect.width, (int32_t)rect.height },
+ // TODO: fence
+ hidl_handle(),
+ [&err, &pointer](const auto &maperr, const auto &mapPointer,
+ int32_t bytesPerPixel, int32_t bytesPerStride) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ pointer = mapPointer;
+ }
+ (void)bytesPerPixel;
+ (void)bytesPerStride;
+ }).isOk()) {
+ ALOGE("failed transaction: lock(RGBA_1010102) (@4.0)");
+ return C2_CORRUPTED;
+ }
}
if (err != C2_OK) {
ALOGD("lock failed: %d", err);
@@ -533,9 +630,14 @@
layout->type = C2PlanarLayout::TYPE_YUVA;
layout->numPlanes = 4;
layout->rootPlanes = 1;
- int32_t stride = mMapper2 ?
- int32_t(mInfo2.stride) :
- int32_t(mInfo3.stride);
+ int32_t stride;
+ if (mMapper2) {
+ stride = int32_t(mInfo2.stride);
+ } else if (mMapper3) {
+ stride = int32_t(mInfo3.stride);
+ } else {
+ stride = int32_t(mInfo4.stride);
+ }
layout->planes[C2PlanarLayout::PLANE_Y] = {
C2PlaneInfo::CHANNEL_Y, // channel
4, // colInc
@@ -591,10 +693,10 @@
break;
}
- case PixelFormat3::RGBA_8888:
+ case PixelFormat4::RGBA_8888:
// TODO: alpha channel
// fall-through
- case PixelFormat3::RGBX_8888: {
+ case PixelFormat4::RGBX_8888: {
void *pointer = nullptr;
if (mMapper2) {
if (!mMapper2->lock(
@@ -613,7 +715,7 @@
ALOGE("failed transaction: lock(RGBA_8888)");
return C2_CORRUPTED;
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->lock(
const_cast<native_handle_t *>(mBuffer),
grallocUsage,
@@ -633,6 +735,26 @@
ALOGE("failed transaction: lock(RGBA_8888) (@3.0)");
return C2_CORRUPTED;
}
+ } else {
+ if (!mMapper4->lock(
+ const_cast<native_handle_t *>(mBuffer),
+ grallocUsage,
+ { (int32_t)rect.left, (int32_t)rect.top,
+ (int32_t)rect.width, (int32_t)rect.height },
+ // TODO: fence
+ hidl_handle(),
+ [&err, &pointer](const auto &maperr, const auto &mapPointer,
+ int32_t bytesPerPixel, int32_t bytesPerStride) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ pointer = mapPointer;
+ }
+ (void)bytesPerPixel;
+ (void)bytesPerStride;
+ }).isOk()) {
+ ALOGE("failed transaction: lock(RGBA_8888) (@4.0)");
+ return C2_CORRUPTED;
+ }
}
if (err != C2_OK) {
ALOGD("lock failed: %d", err);
@@ -644,9 +766,14 @@
layout->type = C2PlanarLayout::TYPE_RGB;
layout->numPlanes = 3;
layout->rootPlanes = 1;
- int32_t stride = mMapper2 ?
- int32_t(mInfo2.stride) :
- int32_t(mInfo3.stride);
+ int32_t stride;
+ if (mMapper2) {
+ stride = int32_t(mInfo2.stride);
+ } else if (mMapper3) {
+ stride = int32_t(mInfo3.stride);
+ } else {
+ stride = int32_t(mInfo4.stride);
+ }
layout->planes[C2PlanarLayout::PLANE_R] = {
C2PlaneInfo::CHANNEL_R, // channel
4, // colInc
@@ -689,9 +816,9 @@
break;
}
- case PixelFormat3::YCBCR_420_888:
+ case PixelFormat4::YCBCR_420_888:
// fall-through
- case PixelFormat3::YV12:
+ case PixelFormat4::YV12:
// fall-through
default: {
struct YCbCrLayout {
@@ -725,7 +852,7 @@
ALOGE("failed transaction: lockYCbCr");
return C2_CORRUPTED;
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->lockYCbCr(
const_cast<native_handle_t *>(mBuffer), grallocUsage,
{ (int32_t)rect.left, (int32_t)rect.top,
@@ -747,6 +874,28 @@
ALOGE("failed transaction: lockYCbCr (@3.0)");
return C2_CORRUPTED;
}
+ } else {
+ if (!mMapper4->lockYCbCr(
+ const_cast<native_handle_t *>(mBuffer), grallocUsage,
+ { (int32_t)rect.left, (int32_t)rect.top,
+ (int32_t)rect.width, (int32_t)rect.height },
+ // TODO: fence
+ hidl_handle(),
+ [&err, &ycbcrLayout](const auto &maperr, const auto &mapLayout) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ ycbcrLayout = YCbCrLayout{
+ mapLayout.y,
+ mapLayout.cb,
+ mapLayout.cr,
+ mapLayout.yStride,
+ mapLayout.cStride,
+ mapLayout.chromaStep};
+ }
+ }).isOk()) {
+ ALOGE("failed transaction: lockYCbCr (@4.0)");
+ return C2_CORRUPTED;
+ }
}
if (err != C2_OK) {
ALOGD("lockYCbCr failed: %d", err);
@@ -839,7 +988,7 @@
ALOGE("failed transaction: unlock");
return C2_CORRUPTED;
}
- } else {
+ } else if (mMapper3) {
if (!mMapper3->unlock(
const_cast<native_handle_t *>(mBuffer),
[&err, &fence](const auto &maperr, const auto &releaseFence) {
@@ -854,6 +1003,21 @@
ALOGE("failed transaction: unlock (@3.0)");
return C2_CORRUPTED;
}
+ } else {
+ if (!mMapper4->unlock(
+ const_cast<native_handle_t *>(mBuffer),
+ [&err, &fence](const auto &maperr, const auto &releaseFence) {
+ // TODO
+ (void) fence;
+ (void) releaseFence;
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ // TODO: fence
+ }
+ }).isOk()) {
+ ALOGE("failed transaction: unlock (@4.0)");
+ return C2_CORRUPTED;
+ }
}
if (err == C2_OK) {
mLocked = false;
@@ -899,6 +1063,8 @@
sp<IMapper2> mMapper2;
sp<IAllocator3> mAllocator3;
sp<IMapper3> mMapper3;
+ sp<IAllocator4> mAllocator4;
+ sp<IMapper4> mMapper4;
const bool mBufferQueue;
};
@@ -918,17 +1084,23 @@
mTraits = std::make_shared<C2Allocator::Traits>(traits);
// gralloc allocator is a singleton, so all objects share a global service
- mAllocator3 = IAllocator3::getService();
- mMapper3 = IMapper3::getService();
- if (!mAllocator3 || !mMapper3) {
- mAllocator3 = nullptr;
- mMapper3 = nullptr;
- mAllocator2 = IAllocator2::getService();
- mMapper2 = IMapper2::getService();
- if (!mAllocator2 || !mMapper2) {
- mAllocator2 = nullptr;
- mMapper2 = nullptr;
- mInit = C2_CORRUPTED;
+ mAllocator4 = IAllocator4::getService();
+ mMapper4 = IMapper4::getService();
+ if (!mAllocator4 || !mMapper4) {
+ mAllocator4 = nullptr;
+ mMapper4 = nullptr;
+ mAllocator3 = IAllocator3::getService();
+ mMapper3 = IMapper3::getService();
+ if (!mAllocator3 || !mMapper3) {
+ mAllocator3 = nullptr;
+ mMapper3 = nullptr;
+ mAllocator2 = IAllocator2::getService();
+ mMapper2 = IMapper2::getService();
+ if (!mAllocator2 || !mMapper2) {
+ mAllocator2 = nullptr;
+ mMapper2 = nullptr;
+ mInit = C2_CORRUPTED;
+ }
}
}
}
@@ -1000,13 +1172,13 @@
0, 0, mBufferQueue ? ~0 : 0),
mTraits->id));
return C2_OK;
- } else {
+ } else if (mMapper3) {
BufferDescriptorInfo3 info = {
{
width,
height,
1u, // layerCount
- PixelFormat3(format),
+ PixelFormat4(format),
grallocUsage,
},
0u, // stride placeholder
@@ -1057,6 +1229,63 @@
0, 0, mBufferQueue ? ~0 : 0),
mTraits->id));
return C2_OK;
+ } else {
+ BufferDescriptorInfo4 info = {
+ {
+ width,
+ height,
+ 1u, // layerCount
+ PixelFormat4(format),
+ grallocUsage,
+ },
+ 0u, // stride placeholder
+ };
+ BufferDescriptor4 desc;
+ if (!mMapper4->createDescriptor(
+ info.mapperInfo, [&err, &desc](const auto &maperr, const auto &descriptor) {
+ err = maperr2error(maperr);
+ if (err == C2_OK) {
+ desc = descriptor;
+ }
+ }).isOk()) {
+ ALOGE("failed transaction: createDescriptor");
+ return C2_CORRUPTED;
+ }
+ if (err != C2_OK) {
+ return err;
+ }
+
+ // IAllocator shares IMapper error codes.
+ if (!mAllocator4->allocate(
+ desc,
+ 1u,
+ [&err, &buffer, &info](const auto &maperr, const auto &stride, auto &buffers) {
+ err = maperr2error(maperr);
+ if (err != C2_OK) {
+ return;
+ }
+ if (buffers.size() != 1u) {
+ err = C2_CORRUPTED;
+ return;
+ }
+ info.stride = stride;
+ buffer = buffers[0];
+ }).isOk()) {
+ ALOGE("failed transaction: allocate");
+ return C2_CORRUPTED;
+ }
+ if (err != C2_OK) {
+ return err;
+ }
+ allocation->reset(new C2AllocationGralloc(
+ info, mMapper4, buffer,
+ C2HandleGralloc::WrapAndMoveNativeHandle(
+ buffer.getNativeHandle(),
+ width, height,
+ format, grallocUsage, info.stride,
+ 0, 0, mBufferQueue ? ~0 : 0),
+ mTraits->id));
+ return C2_OK;
}
}
@@ -1086,7 +1315,7 @@
allocation->reset(new C2AllocationGralloc(
info, mMapper2, hidlHandle, grallocHandle, mTraits->id));
return C2_OK;
- } else {
+ } else if (mMapper3) {
BufferDescriptorInfo3 info;
info.mapperInfo.layerCount = 1u;
uint32_t generation;
@@ -1109,6 +1338,29 @@
allocation->reset(new C2AllocationGralloc(
info, mMapper3, hidlHandle, grallocHandle, mTraits->id));
return C2_OK;
+ } else {
+ BufferDescriptorInfo4 info;
+ info.mapperInfo.layerCount = 1u;
+ uint32_t generation;
+ uint64_t igbp_id;
+ uint32_t igbp_slot;
+ const C2HandleGralloc *grallocHandle = C2HandleGralloc::Import(
+ handle,
+ &info.mapperInfo.width, &info.mapperInfo.height,
+ (uint32_t *)&info.mapperInfo.format,
+ (uint64_t *)&info.mapperInfo.usage,
+ &info.stride,
+ &generation, &igbp_id, &igbp_slot);
+ if (grallocHandle == nullptr) {
+ return C2_BAD_VALUE;
+ }
+
+ hidl_handle hidlHandle;
+ hidlHandle.setTo(C2HandleGralloc::UnwrapNativeHandle(grallocHandle), true);
+
+ allocation->reset(new C2AllocationGralloc(
+ info, mMapper4, hidlHandle, grallocHandle, mTraits->id));
+ return C2_OK;
}
}
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 752bc46..0470a31 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -600,7 +600,7 @@
}
std::shared_ptr<C2AllocationIon> alloc
- = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, mTraits->id);
+ = std::make_shared<C2AllocationIon>(dup(mIonFd), capacity, align, heapMask, flags, getId());
ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
@@ -622,7 +622,7 @@
// TODO: get capacity and validate it
const C2HandleIon *h = static_cast<const C2HandleIon*>(handle);
std::shared_ptr<C2AllocationIon> alloc
- = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), mTraits->id);
+ = std::make_shared<C2AllocationIon>(dup(mIonFd), h->size(), h->bufferFd(), getId());
c2_status_t ret = alloc->status();
if (ret == C2_OK) {
*allocation = alloc;
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 6b4ed35..5b2bd7b 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -848,8 +848,8 @@
emplace("libcodec2_soft_amrnbenc.so");
emplace("libcodec2_soft_amrwbdec.so");
emplace("libcodec2_soft_amrwbenc.so");
- emplace("libcodec2_soft_av1dec.so");
- emplace("libcodec2_soft_gav1dec.so");
+ //emplace("libcodec2_soft_av1dec_aom.so"); // deprecated for the gav1 implementation
+ emplace("libcodec2_soft_av1dec_gav1.so");
emplace("libcodec2_soft_avcdec.so");
emplace("libcodec2_soft_avcenc.so");
emplace("libcodec2_soft_flacdec.so");
diff --git a/media/extractors/amr/AMRExtractor.cpp b/media/extractors/amr/AMRExtractor.cpp
index ffeff42..26431a4 100644
--- a/media/extractors/amr/AMRExtractor.cpp
+++ b/media/extractors/amr/AMRExtractor.cpp
@@ -144,6 +144,7 @@
AMRExtractor::AMRExtractor(DataSourceHelper *source)
: mDataSource(source),
+ mMeta(NULL),
mInitCheck(NO_INIT),
mOffsetTableLength(0) {
float confidence;
@@ -191,7 +192,9 @@
AMRExtractor::~AMRExtractor() {
delete mDataSource;
- AMediaFormat_delete(mMeta);
+ if (mMeta) {
+ AMediaFormat_delete(mMeta);
+ }
}
media_status_t AMRExtractor::getMetaData(AMediaFormat *meta) {
diff --git a/media/extractors/flac/FLACExtractor.h b/media/extractors/flac/FLACExtractor.h
index 5a73d20..223d359 100644
--- a/media/extractors/flac/FLACExtractor.h
+++ b/media/extractors/flac/FLACExtractor.h
@@ -17,7 +17,6 @@
#ifndef FLAC_EXTRACTOR_H_
#define FLAC_EXTRACTOR_H_
-#include <media/DataSourceBase.h>
#include <media/MediaExtractorPluginApi.h>
#include <media/MediaExtractorPluginHelper.h>
#include <media/NdkMediaFormat.h>
diff --git a/media/extractors/midi/Android.bp b/media/extractors/midi/Android.bp
index 7d42e70..d36cb49 100644
--- a/media/extractors/midi/Android.bp
+++ b/media/extractors/midi/Android.bp
@@ -6,6 +6,10 @@
"frameworks/av/media/libstagefright/include",
],
+ header_libs: [
+ "libmedia_headers",
+ ],
+
shared_libs: [
"liblog",
"libmediandk",
diff --git a/media/extractors/midi/MidiExtractor.h b/media/extractors/midi/MidiExtractor.h
index 2e78086..b486fc6 100644
--- a/media/extractors/midi/MidiExtractor.h
+++ b/media/extractors/midi/MidiExtractor.h
@@ -17,7 +17,6 @@
#ifndef MIDI_EXTRACTOR_H_
#define MIDI_EXTRACTOR_H_
-#include <media/DataSourceBase.h>
#include <media/MediaExtractorPluginApi.h>
#include <media/MediaExtractorPluginHelper.h>
#include <media/stagefright/MediaBufferBase.h>
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 36cab1d..81e1b8c 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -133,6 +133,7 @@
bool mIsAVC;
bool mIsHEVC;
+ bool mIsDolbyVision;
bool mIsAC4;
bool mIsPcm;
size_t mNALLengthSize;
@@ -337,6 +338,14 @@
case FOURCC("hvc1"):
case FOURCC("hev1"):
return MEDIA_MIMETYPE_VIDEO_HEVC;
+
+ case FOURCC("dvav"):
+ case FOURCC("dva1"):
+ case FOURCC("dvhe"):
+ case FOURCC("dvh1"):
+ case FOURCC("dav1"):
+ return MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
+
case FOURCC("ac-4"):
return MEDIA_MIMETYPE_AUDIO_AC4;
case FOURCC("Opus"):
@@ -1062,6 +1071,62 @@
mLastTrack->mTx3gBuffer = NULL;
}
+ const char *mime;
+ AMediaFormat_getString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME, &mime);
+
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ void *data;
+ size_t size;
+
+ if (AMediaFormat_getBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+ const uint8_t *ptr = (const uint8_t *)data;
+ const uint8_t profile = ptr[2] >> 1;
+ const uint8_t bl_compatibility_id = (ptr[4]) >> 4;
+
+ if (4 == profile || 7 == profile ||
+ (profile >= 8 && profile < 11 && bl_compatibility_id)) {
+ // we need a backward compatible track
+ ALOGV("Adding new backward compatible track");
+ Track *track_b = new Track;
+
+ track_b->timescale = mLastTrack->timescale;
+ track_b->sampleTable = mLastTrack->sampleTable;
+ track_b->includes_expensive_metadata = mLastTrack->includes_expensive_metadata;
+ track_b->skipTrack = mLastTrack->skipTrack;
+ track_b->has_elst = mLastTrack->has_elst;
+ track_b->elst_media_time = mLastTrack->elst_media_time;
+ track_b->elst_segment_duration = mLastTrack->elst_segment_duration;
+ track_b->elstShiftStartTicks = mLastTrack->elstShiftStartTicks;
+ track_b->subsample_encryption = mLastTrack->subsample_encryption;
+
+ track_b->mTx3gBuffer = mLastTrack->mTx3gBuffer;
+ track_b->mTx3gSize = mLastTrack->mTx3gSize;
+ track_b->mTx3gFilled = mLastTrack->mTx3gFilled;
+
+ track_b->meta = AMediaFormat_new();
+ AMediaFormat_copy(track_b->meta, mLastTrack->meta);
+
+ mLastTrack->next = track_b;
+ track_b->next = NULL;
+
+ auto id = track_b->meta->mFormat->findEntryByName(AMEDIAFORMAT_KEY_CSD_2);
+ track_b->meta->mFormat->removeEntryAt(id);
+
+ if (4 == profile || 7 == profile || 8 == profile ) {
+ AMediaFormat_setString(track_b->meta,
+ AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_HEVC);
+ } else if (9 == profile) {
+ AMediaFormat_setString(track_b->meta,
+ AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AVC);
+ } else if (10 == profile) {
+ AMediaFormat_setString(track_b->meta,
+ AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AV1);
+ } // Should never get to else part
+
+ mLastTrack = track_b;
+ }
+ }
+ }
} else if (chunk_type == FOURCC("moov")) {
mInitCheck = OK;
@@ -1830,6 +1895,11 @@
case FOURCC("avc1"):
case FOURCC("hvc1"):
case FOURCC("hev1"):
+ case FOURCC("dvav"):
+ case FOURCC("dva1"):
+ case FOURCC("dvhe"):
+ case FOURCC("dvh1"):
+ case FOURCC("dav1"):
case FOURCC("av01"):
{
uint8_t buffer[78];
@@ -1984,7 +2054,8 @@
// for audio, use 128KB
max_size = 1024 * 128;
} else if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)
+ || !strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
// AVC & HEVC requires compression ratio of at least 2, and uses
// macroblocks
max_size = ((width + 15) / 16) * ((height + 15) / 16) * 192;
@@ -2315,6 +2386,30 @@
*offset += chunk_size;
break;
}
+ case FOURCC("dvcC"):
+ case FOURCC("dvvC"): {
+ auto buffer = heapbuffer<uint8_t>(chunk_data_size);
+
+ if (buffer.get() == NULL) {
+ ALOGE("b/28471206");
+ return NO_MEMORY;
+ }
+
+ if (mDataSource->readAt(data_offset, buffer.get(), chunk_data_size) < chunk_data_size) {
+ return ERROR_IO;
+ }
+
+ if (mLastTrack == NULL)
+ return ERROR_MALFORMED;
+
+ AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
+ buffer.get(), chunk_data_size);
+ AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME,
+ MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
+
+ *offset += chunk_size;
+ break;
+ }
case FOURCC("d263"):
{
*offset += chunk_size;
@@ -4127,7 +4222,20 @@
if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
itemTable = mItemTable;
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ void *data;
+ size_t size;
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+ return NULL;
+ }
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ // dv_major.dv_minor Should be 1.0 or 2.1
+ if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
+ return NULL;
+ }
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
void *data;
size_t size;
if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
@@ -4172,6 +4280,10 @@
if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_HEVC, &data, &size)) {
return ERROR_MALFORMED;
}
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+ return ERROR_MALFORMED;
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)) {
if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_0, &data, &size)) {
return ERROR_MALFORMED;
@@ -4659,6 +4771,7 @@
mCurrentSampleInfoOffsets(NULL),
mIsAVC(false),
mIsHEVC(false),
+ mIsDolbyVision(false),
mIsAC4(false),
mIsPcm(false),
mNALLengthSize(0),
@@ -4698,6 +4811,7 @@
mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
mIsAC4 = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4);
+ mIsDolbyVision = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
if (mIsAVC) {
void *data;
@@ -4722,6 +4836,42 @@
CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
mNALLengthSize = 1 + (ptr[14 + 7] & 3);
+ } else if (mIsDolbyVision) {
+ ALOGV("%s DolbyVision stream detected", __FUNCTION__);
+ void *data;
+ size_t size;
+ CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_2, &data, &size));
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size == 24);
+
+ // dv_major.dv_minor Should be 1.0 or 2.1
+ CHECK(!((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)));
+
+ const uint8_t profile = ptr[2] >> 1;
+ // profile == (unknown,1,9) --> AVC; profile = (2,3,4,5,6,7,8) --> HEVC;
+ // profile == (10) --> AV1
+ if (profile > 1 && profile < 9) {
+ CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_HEVC, &data, &size));
+
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 22);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+
+ mNALLengthSize = 1 + (ptr[14 + 7] & 3);
+ } else if (10 == profile) {
+ /* AV1 profile nothing to do */
+ } else {
+ CHECK(AMediaFormat_getBuffer(format, AMEDIAFORMAT_KEY_CSD_AVC, &data, &size));
+ const uint8_t *ptr = (const uint8_t *)data;
+
+ CHECK(size >= 7);
+ CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1
+ // The number of bytes used to encode the length of a NAL unit.
+ mNALLengthSize = 1 + (ptr[4] & 3);
+ }
}
mIsPcm = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW);
@@ -5789,7 +5939,7 @@
}
}
- if (!mIsAVC && !mIsHEVC && !mIsAC4) {
+ if (!mIsAVC && !mIsHEVC && !(mIsDolbyVision && mNALLengthSize) && !mIsAC4) {
if (newBuffer) {
if (mIsPcm) {
// The twos' PCM block reader assumes that all samples has the same size.
@@ -5820,11 +5970,11 @@
meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
- int32_t byteOrder;
- AMediaFormat_getInt32(mFormat,
+ int32_t byteOrder = 0;
+ bool isGetBigEndian = AMediaFormat_getInt32(mFormat,
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN, &byteOrder);
- if (byteOrder == 1) {
+ if (isGetBigEndian && byteOrder == 1) {
// Big-endian -> little-endian
uint16_t *dstData = (uint16_t *)buf;
uint16_t *srcData = (uint16_t *)buf;
@@ -6179,7 +6329,7 @@
AMediaFormat_setBuffer(bufmeta, AMEDIAFORMAT_KEY_CRYPTO_IV, iv, ivlength);
}
- if (!mIsAVC && !mIsHEVC) {
+ if (!mIsAVC && !mIsHEVC && !(mIsDolbyVision && mNALLengthSize)) {
if (newBuffer) {
if (!isInRange((size_t)0u, mBuffer->size(), size)) {
mBuffer->release();
diff --git a/media/extractors/mp4/SampleIterator.cpp b/media/extractors/mp4/SampleIterator.cpp
index 2890b26..85fbf97 100644
--- a/media/extractors/mp4/SampleIterator.cpp
+++ b/media/extractors/mp4/SampleIterator.cpp
@@ -22,7 +22,6 @@
#include <arpa/inet.h>
-#include <media/DataSourceBase.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ByteUtils.h>
@@ -355,7 +354,7 @@
if (offset > 0) {
*time += offset;
} else {
- *time -= (offset == INT64_MIN ? INT64_MAX : (-offset));
+ *time -= (offset == INT32_MIN ? INT64_MAX : (-offset));
}
*duration = mTTSDuration;
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index e7e8901..59c8200 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -652,6 +652,7 @@
}
mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+ memset(mSampleTimeEntries, 0, sizeof(SampleTimeEntry) * mNumSampleSizes);
if (!mSampleTimeEntries) {
ALOGE("Cannot allocate sample entry table with %llu entries.",
(unsigned long long)mNumSampleSizes);
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 0f0c72c..1d9e1e6 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -16,6 +16,7 @@
"android.hardware.cas.native@1.0",
"android.hidl.token@1.0-utils",
"android.hidl.allocator@1.0",
+ "libcrypto",
"libhidlmemory",
"libhidlbase",
"liblog",
@@ -23,13 +24,13 @@
],
header_libs: [
+ "libaudioclient_headers",
"libbase_headers",
"libstagefright_headers",
"libmedia_headers",
],
static_libs: [
- "libcrypto",
"libstagefright_foundation_without_imemory",
"libstagefright_mpeg2support",
"libutils",
diff --git a/media/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
index 731584d..002a855 100644
--- a/media/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -23,7 +23,6 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include "mpeg2ts/ESQueue.h"
-#include <media/DataSourceBase.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -111,8 +110,10 @@
AMediaFormat *meta = AMediaFormat_new();
for (size_t i = mTracks.size(); i > 0;) {
i--;
- if (mTracks.valueAt(i)->getFormat(meta) != AMEDIA_OK) {
+ Track *track = mTracks.valueAt(i);
+ if (track->getFormat(meta) != AMEDIA_OK) {
mTracks.removeItemsAt(i);
+ delete track;
}
}
AMediaFormat_delete(meta);
@@ -122,6 +123,10 @@
MPEG2PSExtractor::~MPEG2PSExtractor() {
delete mDataSource;
+ for (size_t i = mTracks.size(); i > 0;) {
+ i--;
+ delete mTracks.valueAt(i);
+ }
}
size_t MPEG2PSExtractor::countTracks() {
@@ -793,7 +798,9 @@
}
media_status_t MPEG2PSExtractor::WrappedTrack::start() {
+ delete mTrack->mBufferGroup;
mTrack->mBufferGroup = mBufferGroup;
+ mBufferGroup = nullptr;
return mTrack->start();
}
diff --git a/media/extractors/ogg/OggExtractor.cpp b/media/extractors/ogg/OggExtractor.cpp
index 298dab1..4012ece 100644
--- a/media/extractors/ogg/OggExtractor.cpp
+++ b/media/extractors/ogg/OggExtractor.cpp
@@ -1391,7 +1391,7 @@
return NULL;
}
- *confidence = 0.2f;
+ *confidence = 0.5f;
return CreateExtractor;
}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index ee5d089..a4322a1 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -472,6 +472,8 @@
* This is intended for developers to use when debugging.
* It is not for display to users.
*
+ * Available since API level 26.
+ *
* @return pointer to a text representation of an AAudio result code.
*/
AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) __INTRODUCED_IN(26);
@@ -482,6 +484,8 @@
* This is intended for developers to use when debugging.
* It is not for display to users.
*
+ * Available since API level 26.
+ *
* @return pointer to a text representation of an AAudio state.
*/
AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state)
@@ -502,6 +506,8 @@
* chosen by the device when it is opened.
*
* AAudioStreamBuilder_delete() must be called when you are done using the builder.
+ *
+ * Available since API level 26.
*/
AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
__INTRODUCED_IN(26);
@@ -513,6 +519,8 @@
* The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED},
* in which case the primary device will be used.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param deviceId device identifier or {@link #AAUDIO_UNSPECIFIED}
*/
@@ -530,6 +538,8 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
*/
@@ -547,6 +557,8 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param channelCount Number of channels desired.
*/
@@ -556,6 +568,8 @@
/**
* Identical to AAudioStreamBuilder_setChannelCount().
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param samplesPerFrame Number of samples in a frame.
*/
@@ -573,6 +587,8 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param format common formats are {@link #AAUDIO_FORMAT_PCM_FLOAT} and
* {@link #AAUDIO_FORMAT_PCM_I16}.
@@ -588,6 +604,8 @@
* The requested sharing mode may not be available.
* The application can query for the actual mode after the stream is opened.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sharingMode {@link #AAUDIO_SHARING_MODE_SHARED} or {@link #AAUDIO_SHARING_MODE_EXCLUSIVE}
*/
@@ -599,6 +617,8 @@
*
* The default, if you do not call this function, is {@link #AAUDIO_DIRECTION_OUTPUT}.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param direction {@link #AAUDIO_DIRECTION_OUTPUT} or {@link #AAUDIO_DIRECTION_INPUT}
*/
@@ -611,6 +631,8 @@
*
* The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param numFrames the desired buffer capacity in frames or {@link #AAUDIO_UNSPECIFIED}
*/
@@ -629,6 +651,8 @@
* You can call AAudioStream_getPerformanceMode()
* to find out the final mode for the stream.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param mode the desired performance mode, eg. {@link #AAUDIO_PERFORMANCE_MODE_LOW_LATENCY}
*/
@@ -644,7 +668,7 @@
*
* The default, if you do not call this function, is {@link #AAUDIO_USAGE_MEDIA}.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param usage the desired usage, eg. {@link #AAUDIO_USAGE_GAME}
@@ -661,7 +685,7 @@
*
* The default, if you do not call this function, is {@link #AAUDIO_CONTENT_TYPE_MUSIC}.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param contentType the type of audio data, eg. {@link #AAUDIO_CONTENT_TYPE_SPEECH}
@@ -681,7 +705,7 @@
* That is because VOICE_RECOGNITION is the preset with the lowest latency
* on many platforms.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param inputPreset the desired configuration for recording
@@ -697,10 +721,10 @@
* Note that an application can also set its global policy, in which case the most restrictive
* policy is always applied. See {@link android.media.AudioAttributes#setAllowedCapturePolicy(int)}
*
- * Added in API level 29.
+ * Available since API level 29.
*
* @param builder reference provided by AAudio_createStreamBuilder()
- * @param inputPreset the desired level of opt-out from being captured.
+ * @param capturePolicy the desired level of opt-out from being captured.
*/
AAUDIO_API void AAudioStreamBuilder_setAllowedCapturePolicy(AAudioStreamBuilder* builder,
aaudio_allowed_capture_policy_t capturePolicy) __INTRODUCED_IN(29);
@@ -727,7 +751,7 @@
*
* Allocated session IDs will always be positive and nonzero.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sessionId an allocated sessionID or {@link #AAUDIO_SESSION_ID_ALLOCATE}
@@ -826,6 +850,8 @@
*
* Note that the AAudio callbacks will never be called simultaneously from multiple threads.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param callback pointer to a function that will process audio data.
* @param userData pointer to an application data structure that will be passed
@@ -854,6 +880,8 @@
* If you do call this function then the requested size should be less than
* half the buffer capacity, to allow double buffering.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param numFrames the desired buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
*/
@@ -905,6 +933,8 @@
*
* Note that the AAudio callbacks will never be called simultaneously from multiple threads.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param callback pointer to a function that will be called if an error occurs.
* @param userData pointer to an application data structure that will be passed
@@ -919,6 +949,8 @@
* AAudioStream_close() must be called when finished with the stream to recover
* the memory and to free the associated resources.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @param stream pointer to a variable to receive the new stream reference
* @return {@link #AAUDIO_OK} or a negative error.
@@ -929,6 +961,8 @@
/**
* Delete the resources associated with the StreamBuilder.
*
+ * Available since API level 26.
+ *
* @param builder reference provided by AAudio_createStreamBuilder()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -942,6 +976,8 @@
/**
* Free the resources associated with a stream created by AAudioStreamBuilder_openStream()
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -954,6 +990,8 @@
* After this call the state will be in {@link #AAUDIO_STREAM_STATE_STARTING} or
* {@link #AAUDIO_STREAM_STATE_STARTED}.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -969,6 +1007,8 @@
* This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
* For input streams use AAudioStream_requestStop().
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -984,6 +1024,8 @@
*
* This will return {@link #AAUDIO_ERROR_UNIMPLEMENTED} for input streams.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -995,6 +1037,8 @@
* After this call the state will be in {@link #AAUDIO_STREAM_STATE_STOPPING} or
* {@link #AAUDIO_STREAM_STATE_STOPPED}.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
@@ -1008,6 +1052,8 @@
* call AAudioStream_waitForStateChange() with currentState
* set to {@link #AAUDIO_STREAM_STATE_UNKNOWN} and a zero timeout.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
*/
AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream) __INTRODUCED_IN(26);
@@ -1028,6 +1074,8 @@
* }
* </code></pre>
*
+ * Available since API level 26.
+ *
* @param stream A reference provided by AAudioStreamBuilder_openStream()
* @param inputState The state we want to avoid.
* @param nextState Pointer to a variable that will be set to the new state.
@@ -1056,6 +1104,8 @@
*
* If the call times out then zero or a partial frame count will be returned.
*
+ * Available since API level 26.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to read. Only complete frames will be written.
@@ -1079,6 +1129,8 @@
*
* If the call times out then zero or a partial frame count will be returned.
*
+ * Available since API level 26.
+ *
* @param stream A stream created using AAudioStreamBuilder_openStream().
* @param buffer The address of the first sample.
* @param numFrames Number of frames to write. Only complete frames will be written.
@@ -1104,6 +1156,8 @@
* You can check the return value or call AAudioStream_getBufferSizeInFrames()
* to see what the actual final size is.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param numFrames requested number of frames that can be filled without blocking
* @return actual buffer size in frames or a negative error
@@ -1114,6 +1168,8 @@
/**
* Query the maximum number of frames that can be filled without blocking.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return buffer size in frames.
*/
@@ -1129,6 +1185,8 @@
* For some endpoints, the burst size can vary dynamically.
* But these tend to be devices with high latency.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return burst size
*/
@@ -1137,6 +1195,8 @@
/**
* Query maximum buffer capacity in frames.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return buffer capacity in frames
*/
@@ -1158,6 +1218,8 @@
* {@link #AAUDIO_UNSPECIFIED} indicates that the callback buffer size for this stream
* may vary from one dataProc callback to the next.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return callback buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
*/
@@ -1175,12 +1237,16 @@
* Note that some INPUT devices may not support this function.
* In that case a 0 will always be returned.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return the underrun or overrun count
*/
AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream) __INTRODUCED_IN(26);
/**
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual sample rate
*/
@@ -1190,6 +1256,8 @@
* A stream has one or more channels of data.
* A frame will contain one sample for each channel.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual number of channels
*/
@@ -1198,18 +1266,24 @@
/**
* Identical to AAudioStream_getChannelCount().
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual number of samples frame
*/
AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream) __INTRODUCED_IN(26);
/**
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual device ID
*/
AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream) __INTRODUCED_IN(26);
/**
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual data format
*/
@@ -1217,6 +1291,9 @@
/**
* Provide actual sharing mode.
+ *
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual sharing mode
*/
@@ -1226,12 +1303,16 @@
/**
* Get the performance mode used by the stream.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
*/
AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream)
__INTRODUCED_IN(26);
/**
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return direction
*/
@@ -1245,6 +1326,8 @@
*
* The frame position is monotonically increasing.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames written
*/
@@ -1258,6 +1341,8 @@
*
* The frame position is monotonically increasing.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
*/
@@ -1281,7 +1366,7 @@
*
* The sessionID for a stream should not change once the stream has been opened.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return session ID or {@link #AAUDIO_SESSION_ID_NONE}
@@ -1304,6 +1389,8 @@
*
* The position and time passed back are monotonically increasing.
*
+ * Available since API level 26.
+ *
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @param clockid CLOCK_MONOTONIC or CLOCK_BOOTTIME
* @param framePosition pointer to a variable to receive the position
@@ -1316,7 +1403,7 @@
/**
* Return the use case for the stream.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
@@ -1326,7 +1413,7 @@
/**
* Return the content type for the stream.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return content type, for example {@link #AAUDIO_CONTENT_TYPE_MUSIC}
@@ -1337,7 +1424,7 @@
/**
* Return the input preset for the stream.
*
- * Added in API level 28.
+ * Available since API level 28.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return input preset, for example {@link #AAUDIO_INPUT_PRESET_CAMCORDER}
@@ -1349,7 +1436,7 @@
* Return the policy that determines whether the audio may or may not be captured
* by other apps or the system.
*
- * Added in API level 29.
+ * Available since API level 29.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return the allowed capture policy, for example {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
index 56c0170..850b1d0 100644
--- a/media/libaaudio/src/Android.bp
+++ b/media/libaaudio/src/Android.bp
@@ -10,7 +10,9 @@
"legacy",
"utility",
],
- header_libs: ["libaaudio_headers"],
+ header_libs: [
+ "libaaudio_headers",
+ ],
export_header_lib_headers: ["libaaudio_headers"],
version_script: "libaaudio.map.txt",
@@ -53,7 +55,10 @@
],
export_include_dirs: ["."],
- header_libs: ["libaaudio_headers"],
+ header_libs: [
+ "libaaudio_headers",
+ "libmedia_headers"
+ ],
export_header_lib_headers: ["libaaudio_headers"],
shared_libs: [
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 52eadd4..7481daa 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -232,6 +232,26 @@
mCallbackBuffer = new uint8_t[callbackBufferSize];
}
+ // For debugging and analyzing the distribution of MMAP timestamps.
+ // For OUTPUT, use a NEGATIVE offset to move the CPU writes further BEFORE the HW reads.
+ // For INPUT, use a POSITIVE offset to move the CPU reads further AFTER the HW writes.
+ // You can use this offset to reduce glitching.
+ // You can also use this offset to force glitching. By iterating over multiple
+ // values you can reveal the distribution of the hardware timing jitter.
+ if (mAudioEndpoint.isFreeRunning()) { // MMAP?
+ int32_t offsetMicros = (getDirection() == AAUDIO_DIRECTION_OUTPUT)
+ ? AAudioProperty_getOutputMMapOffsetMicros()
+ : AAudioProperty_getInputMMapOffsetMicros();
+ // This log is used to debug some tricky glitch issues. Please leave.
+ ALOGD_IF(offsetMicros, "%s() - %s mmap offset = %d micros",
+ __func__,
+ (getDirection() == AAUDIO_DIRECTION_OUTPUT) ? "output" : "input",
+ offsetMicros);
+ mTimeOffsetNanos = offsetMicros * AAUDIO_NANOS_PER_MICROSECOND;
+ }
+
+ setBufferSize(capacity / 2); // Default buffer size to match Q
+
setState(AAUDIO_STREAM_STATE_OPEN);
return result;
@@ -478,7 +498,8 @@
#if LOG_TIMESTAMPS
logTimestamp(*message);
#endif
- processTimestamp(message->timestamp.position, message->timestamp.timestamp);
+ processTimestamp(message->timestamp.position,
+ message->timestamp.timestamp + mTimeOffsetNanos);
return AAUDIO_OK;
}
@@ -634,7 +655,7 @@
// Should we block?
if (timeoutNanoseconds == 0) {
break; // don't block
- } else if (framesLeft > 0) {
+ } else if (wakeTimeNanos != 0) {
if (!mAudioEndpoint.isFreeRunning()) {
// If there is software on the other end of the FIFO then it may get delayed.
// So wake up just a little after we expect it to be ready.
@@ -693,37 +714,39 @@
aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
int32_t adjustedFrames = requestedFrames;
- int32_t actualFrames = 0;
- int32_t maximumSize = getBufferCapacity();
+ const int32_t maximumSize = getBufferCapacity() - mFramesPerBurst;
+ // The buffer size can be set to zero.
+ // This means that the callback may be called when the internal buffer becomes empty.
+ // This will be fine on some devices in ideal circumstances and will result in the
+ // lowest possible latency.
+ // If there are glitches then they should be detected as XRuns and the size can be increased.
+ static const int32_t minimumSize = 0;
// Clip to minimum size so that rounding up will work better.
- if (adjustedFrames < 1) {
- adjustedFrames = 1;
- }
+ adjustedFrames = std::max(minimumSize, adjustedFrames);
- if (adjustedFrames > maximumSize) {
- // Clip to maximum size.
+ // Prevent arithmetic overflow by clipping before we round.
+ if (adjustedFrames >= maximumSize) {
adjustedFrames = maximumSize;
} else {
// Round to the next highest burst size.
int32_t numBursts = (adjustedFrames + mFramesPerBurst - 1) / mFramesPerBurst;
adjustedFrames = numBursts * mFramesPerBurst;
- // Rounding may have gone above maximum.
- if (adjustedFrames > maximumSize) {
- adjustedFrames = maximumSize;
- }
}
- aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(adjustedFrames, &actualFrames);
- if (result < 0) {
- return result;
- } else {
- return (aaudio_result_t) actualFrames;
- }
+ // Clip against the actual size from the endpoint.
+ int32_t actualFrames = 0;
+ mAudioEndpoint.setBufferSizeInFrames(maximumSize, &actualFrames);
+ // actualFrames should be <= maximumSize
+ adjustedFrames = std::min(actualFrames, adjustedFrames);
+
+ mBufferSizeInFrames = adjustedFrames;
+ ALOGV("%s(%d) returns %d", __func__, requestedFrames, adjustedFrames);
+ return (aaudio_result_t) adjustedFrames;
}
int32_t AudioStreamInternal::getBufferSize() const {
- return mAudioEndpoint.getBufferSizeInFrames();
+ return mBufferSizeInFrames;
}
int32_t AudioStreamInternal::getBufferCapacity() const {
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 86c4698..596d37f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -194,6 +194,7 @@
// By delaying slightly we can avoid waking up before other side is ready.
const int32_t mWakeupDelayNanos; // delay past typical wakeup jitter
const int32_t mMinimumSleepNanos; // minimum sleep while polling
+ int32_t mTimeOffsetNanos = 0; // add to time part of an MMAP timestamp
AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service
EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses
@@ -203,6 +204,9 @@
// Sometimes the hardware is operating with a different channel count from the app.
// Then we require conversion in AAudio.
int32_t mDeviceChannelCount = 0;
+
+ int32_t mBufferSizeInFrames = 0; // local threshold to control latency
+
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
index 366cc87..9684ee4 100644
--- a/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalCapture.cpp
@@ -106,9 +106,10 @@
mNeedCatchUp.acknowledge();
}
- // If the write index passed the read index then consider it an overrun.
+ // If the capture buffer is full beyond capacity then consider it an overrun.
// For shared streams, the xRunCount is passed up from the service.
- if (mAudioEndpoint.isFreeRunning() && mAudioEndpoint.getEmptyFramesAvailable() < 0) {
+ if (mAudioEndpoint.isFreeRunning()
+ && mAudioEndpoint.getFullFramesAvailable() > mAudioEndpoint.getBufferCapacityInFrames()) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaOverRuns", mXRunCount);
diff --git a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
index b8ef247..dc9f48c 100644
--- a/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternalPlay.cpp
@@ -167,8 +167,10 @@
ATRACE_INT("aaWrote", framesWritten);
}
+ // Sleep if there is too much data in the buffer.
// Calculate an ideal time to wake up.
- if (wakeTimePtr != nullptr && framesWritten >= 0) {
+ if (wakeTimePtr != nullptr
+ && (mAudioEndpoint.getFullFramesAvailable() >= getBufferSize())) {
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
@@ -184,14 +186,10 @@
break;
case AAUDIO_STREAM_STATE_STARTED:
{
- // When do we expect the next read burst to occur?
-
- // Calculate frame position based off of the writeCounter because
- // the readCounter might have just advanced in the background,
- // causing us to sleep until a later burst.
- int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
- - mAudioEndpoint.getBufferSizeInFrames();
- wakeTime = mClockModel.convertPositionToTime(nextPosition);
+ // Sleep until the readCounter catches up and we only have
+ // the getBufferSize() frames of data sitting in the buffer.
+ int64_t nextReadPosition = mAudioEndpoint.getDataWriteCounter() - getBufferSize();
+ wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
}
break;
default:
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 9abdf53..bd46d05 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -18,22 +18,43 @@
//#define LOG_NDEBUG 0
#include <log/log.h>
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
#include <stdint.h>
#include <algorithm>
#include "utility/AudioClock.h"
+#include "utility/AAudioUtilities.h"
#include "IsochronousClockModel.h"
using namespace aaudio;
+using namespace android::audio_utils;
+
+#ifndef ICM_LOG_DRIFT
+#define ICM_LOG_DRIFT 0
+#endif // ICM_LOG_DRIFT
+
+// To enable the timestamp histogram, enter this before opening the stream:
+// adb root
+// adb shell setprop aaudio.log_mask 1
+// A histogram of the lateness of the timestamps will be cleared when the stream is started.
+// It will be updated when the model is stable and receives a timestamp,
+// and dumped to the log when the stream is stopped.
+
IsochronousClockModel::IsochronousClockModel()
: mMarkerFramePosition(0)
, mMarkerNanoTime(0)
, mSampleRate(48000)
- , mFramesPerBurst(64)
+ , mFramesPerBurst(48)
, mMaxMeasuredLatenessNanos(0)
+ , mLatenessForDriftNanos(kInitialLatenessForDriftNanos)
, mState(STATE_STOPPED)
{
+ if ((AAudioProperty_getLogMask() & AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM) != 0) {
+ mHistogramMicros = std::make_unique<Histogram>(kHistogramBinCount,
+ kHistogramBinWidthMicros);
+ }
}
IsochronousClockModel::~IsochronousClockModel() {
@@ -49,6 +70,9 @@
ALOGV("start(nanos = %lld)\n", (long long) nanoTime);
mMarkerNanoTime = nanoTime;
mState = STATE_STARTING;
+ if (mHistogramMicros) {
+ mHistogramMicros->clear();
+ }
}
void IsochronousClockModel::stop(int64_t nanoTime) {
@@ -58,6 +82,9 @@
setPositionAndTime(convertTimeToPosition(nanoTime), nanoTime);
// TODO should we set position?
mState = STATE_STOPPED;
+ if (mHistogramMicros) {
+ dumpHistogram();
+ }
}
bool IsochronousClockModel::isStarting() const {
@@ -90,6 +117,7 @@
// ALOGD("processTimestamp() - mSampleRate = %d", mSampleRate);
// ALOGD("processTimestamp() - mState = %d", mState);
+ int64_t latenessNanos = nanosDelta - expectedNanosDelta;
switch (mState) {
case STATE_STOPPED:
break;
@@ -99,7 +127,7 @@
break;
case STATE_SYNCING:
// This will handle a burst of rapid transfer at the beginning.
- if (nanosDelta < expectedNanosDelta) {
+ if (latenessNanos < 0) {
setPositionAndTime(framePosition, nanoTime);
} else {
// ALOGD("processTimestamp() - advance to STATE_RUNNING");
@@ -107,65 +135,67 @@
}
break;
case STATE_RUNNING:
- if (nanosDelta < expectedNanosDelta) {
+ if (mHistogramMicros) {
+ mHistogramMicros->add(latenessNanos / AAUDIO_NANOS_PER_MICROSECOND);
+ }
+ // Modify estimated position based on lateness.
+ // This affects the "early" side of the window, which controls output glitches.
+ if (latenessNanos < 0) {
// Earlier than expected timestamp.
// This data is probably more accurate, so use it.
// Or we may be drifting due to a fast HW clock.
- //int microsDelta = (int) (nanosDelta / 1000);
- //int expectedMicrosDelta = (int) (expectedNanosDelta / 1000);
- //ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
- //__func__, mTimestampCount, expectedMicrosDelta - microsDelta);
-
setPositionAndTime(framePosition, nanoTime);
- } else if (nanosDelta > (expectedNanosDelta + (2 * mBurstPeriodNanos))) {
- // In this case we do not update mMaxMeasuredLatenessNanos because it
- // would force it too high.
- // mMaxMeasuredLatenessNanos should range from 1 to 2 * mBurstPeriodNanos
- //int32_t measuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
- //ALOGD("%s() - STATE_RUNNING - #%d, lateness %d - max %d = %4d micros VERY LATE",
- //__func__,
- //mTimestampCount,
- //measuredLatenessNanos / 1000,
- //mMaxMeasuredLatenessNanos / 1000,
- //(measuredLatenessNanos - mMaxMeasuredLatenessNanos) / 1000
- //);
-
- // This typically happens when we are modelling a service instead of a DSP.
- setPositionAndTime(framePosition, nanoTime - (2 * mBurstPeriodNanos));
- } else if (nanosDelta > (expectedNanosDelta + mMaxMeasuredLatenessNanos)) {
- //int32_t previousLatenessNanos = mMaxMeasuredLatenessNanos;
- mMaxMeasuredLatenessNanos = (int32_t)(nanosDelta - expectedNanosDelta);
-
- //ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
- //__func__,
- //mTimestampCount,
- //mMaxMeasuredLatenessNanos / 1000,
- //previousLatenessNanos / 1000,
- //(mMaxMeasuredLatenessNanos - previousLatenessNanos) / 1000
- //);
-
- // When we are late, it may be because of preemption in the kernel,
+#if ICM_LOG_DRIFT
+ int earlyDeltaMicros = (int) ((expectedNanosDelta - nanosDelta)/ 1000);
+ ALOGD("%s() - STATE_RUNNING - #%d, %4d micros EARLY",
+ __func__, mTimestampCount, earlyDeltaMicros);
+#endif
+ } else if (latenessNanos > mLatenessForDriftNanos) {
+ // When we are on the late side, it may be because of preemption in the kernel,
// or timing jitter caused by resampling in the DSP,
// or we may be drifting due to a slow HW clock.
// We add slight drift value just in case there is actual long term drift
// forward caused by a slower clock.
// If the clock is faster than the model will get pushed earlier
- // by the code in the preceding branch.
+ // by the code in the earlier branch.
// The two opposing forces should allow the model to track the real clock
// over a long time.
int64_t driftingTime = mMarkerNanoTime + expectedNanosDelta + kDriftNanos;
setPositionAndTime(framePosition, driftingTime);
- //ALOGD("%s() - #%d, max lateness = %d micros",
- //__func__,
- //mTimestampCount,
- //(int) (mMaxMeasuredLatenessNanos / 1000));
+#if ICM_LOG_DRIFT
+ ALOGD("%s() - STATE_RUNNING - #%d, DRIFT, lateness = %d micros",
+ __func__,
+ mTimestampCount,
+ (int) (latenessNanos / 1000));
+#endif
+ }
+
+ // Modify mMaxMeasuredLatenessNanos.
+ // This affects the "late" side of the window, which controls input glitches.
+ if (latenessNanos > mMaxMeasuredLatenessNanos) { // increase
+#if ICM_LOG_DRIFT
+ ALOGD("%s() - STATE_RUNNING - #%d, newmax %d - oldmax %d = %4d micros LATE",
+ __func__,
+ mTimestampCount,
+ (int) (latenessNanos / 1000),
+ mMaxMeasuredLatenessNanos / 1000,
+ (int) ((latenessNanos - mMaxMeasuredLatenessNanos) / 1000)
+ );
+#endif
+ mMaxMeasuredLatenessNanos = (int32_t) latenessNanos;
+ // Calculate upper region that will trigger a drift forwards.
+ mLatenessForDriftNanos = mMaxMeasuredLatenessNanos - (mMaxMeasuredLatenessNanos >> 4);
+ } else { // decrease
+ // If these is an outlier in lateness then mMaxMeasuredLatenessNanos can go high
+ // and stay there. So we slowly reduce mMaxMeasuredLatenessNanos for better
+ // long term stability. The two opposing forces will keep mMaxMeasuredLatenessNanos
+ // within a reasonable range.
+ mMaxMeasuredLatenessNanos -= kDriftNanos;
}
break;
default:
break;
}
-
-// ALOGD("processTimestamp() - mState = %d", mState);
}
void IsochronousClockModel::setSampleRate(int32_t sampleRate) {
@@ -181,9 +211,6 @@
// Update expected lateness based on sampleRate and framesPerBurst
void IsochronousClockModel::update() {
mBurstPeriodNanos = convertDeltaPositionToTime(mFramesPerBurst); // uses mSampleRate
- // Timestamps may be late by up to a burst because we are randomly sampling the time period
- // after the DSP position is actually updated.
- mMaxMeasuredLatenessNanos = mBurstPeriodNanos;
}
int64_t IsochronousClockModel::convertDeltaPositionToTime(int64_t framesDelta) const {
@@ -227,9 +254,7 @@
}
int32_t IsochronousClockModel::getLateTimeOffsetNanos() const {
- // This will never be < 0 because mMaxLatenessNanos starts at
- // mBurstPeriodNanos and only gets bigger.
- return (mMaxMeasuredLatenessNanos - mBurstPeriodNanos) + kExtraLatenessNanos;
+ return mMaxMeasuredLatenessNanos + kExtraLatenessNanos;
}
int64_t IsochronousClockModel::convertPositionToLatestTime(int64_t framePosition) const {
@@ -241,10 +266,19 @@
}
void IsochronousClockModel::dump() const {
- ALOGD("mMarkerFramePosition = %lld", (long long) mMarkerFramePosition);
- ALOGD("mMarkerNanoTime = %lld", (long long) mMarkerNanoTime);
+ ALOGD("mMarkerFramePosition = %" PRIu64, mMarkerFramePosition);
+ ALOGD("mMarkerNanoTime = %" PRIu64, mMarkerNanoTime);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mFramesPerBurst = %6d", mFramesPerBurst);
ALOGD("mMaxMeasuredLatenessNanos = %6d", mMaxMeasuredLatenessNanos);
ALOGD("mState = %6d", mState);
}
+
+void IsochronousClockModel::dumpHistogram() const {
+ if (!mHistogramMicros) return;
+ std::istringstream istr(mHistogramMicros->dump());
+ std::string line;
+ while (std::getline(istr, line)) {
+ ALOGD("lateness, %s", line.c_str());
+ }
+}
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 582bf4e..40f066b 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -18,6 +18,9 @@
#define ANDROID_AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
#include <stdint.h>
+
+#include <audio_utils/Histogram.h>
+
#include "utility/AudioClock.h"
namespace aaudio {
@@ -122,6 +125,8 @@
void dump() const;
+ void dumpHistogram() const;
+
private:
int32_t getLateTimeOffsetNanos() const;
@@ -134,21 +139,30 @@
};
// Amount of time to drift forward when we get a late timestamp.
- // This value was calculated to allow tracking of a clock with 50 ppm error.
- static constexpr int32_t kDriftNanos = 10 * 1000;
- // TODO review value of kExtraLatenessNanos
+ static constexpr int32_t kDriftNanos = 1 * 1000;
+ // Safety margin to add to the late edge of the timestamp window.
static constexpr int32_t kExtraLatenessNanos = 100 * 1000;
+ // Initial small threshold for causing a drift later in time.
+ static constexpr int32_t kInitialLatenessForDriftNanos = 10 * 1000;
- int64_t mMarkerFramePosition;
- int64_t mMarkerNanoTime;
+ static constexpr int32_t kHistogramBinWidthMicros = 50;
+ static constexpr int32_t kHistogramBinCount = 128;
+
+ int64_t mMarkerFramePosition; // Estimated HW position.
+ int64_t mMarkerNanoTime; // Estimated HW time.
int32_t mSampleRate;
- int32_t mFramesPerBurst;
- int32_t mBurstPeriodNanos;
+ int32_t mFramesPerBurst; // number of frames transferred at one time.
+ int32_t mBurstPeriodNanos; // Time between HW bursts.
// Includes mBurstPeriodNanos because we sample randomly over time.
int32_t mMaxMeasuredLatenessNanos;
- clock_model_state_t mState;
+ // Threshold for lateness that triggers a drift later in time.
+ int32_t mLatenessForDriftNanos;
+ clock_model_state_t mState; // State machine handles startup sequence.
- int32_t mTimestampCount = 0;
+ int32_t mTimestampCount = 0; // For logging.
+
+ // distribution of timestamps relative to earliest
+ std::unique_ptr<android::audio_utils::Histogram> mHistogramMicros;
void update();
};
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index cdd02c0..c2f7fd0 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -335,6 +335,34 @@
return prop;
}
+static int32_t AAudioProperty_getMMapOffsetMicros(const char *functionName,
+ const char *propertyName) {
+ const int32_t minMicros = -20000; // arbitrary
+ const int32_t defaultMicros = 0; // arbitrary
+ const int32_t maxMicros = 20000; // arbitrary
+ int32_t prop = property_get_int32(propertyName, defaultMicros);
+ if (prop < minMicros) {
+ ALOGW("%s: clipped %d to %d", functionName, prop, minMicros);
+ prop = minMicros;
+ } else if (prop > maxMicros) {
+ ALOGW("%s: clipped %d to %d", functionName, prop, minMicros);
+ prop = maxMicros;
+ }
+ return prop;
+}
+
+int32_t AAudioProperty_getInputMMapOffsetMicros() {
+ return AAudioProperty_getMMapOffsetMicros(__func__, AAUDIO_PROP_INPUT_MMAP_OFFSET_USEC);
+}
+
+int32_t AAudioProperty_getOutputMMapOffsetMicros() {
+ return AAudioProperty_getMMapOffsetMicros(__func__, AAUDIO_PROP_OUTPUT_MMAP_OFFSET_USEC);
+}
+
+int32_t AAudioProperty_getLogMask() {
+ return property_get_int32(AAUDIO_PROP_LOG_MASK, 0);
+}
+
aaudio_result_t AAudio_isFlushAllowed(aaudio_stream_state_t state) {
aaudio_result_t result = AAUDIO_OK;
switch (state) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 76d0457..5dcddf3 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -94,31 +94,26 @@
// Note that this code may be replaced by Settings or by some other system configuration tool.
+/**
+ * Read system property.
+ * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
+ */
+int32_t AAudioProperty_getMMapPolicy();
#define AAUDIO_PROP_MMAP_POLICY "aaudio.mmap_policy"
/**
* Read system property.
* @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
*/
-int32_t AAudioProperty_getMMapPolicy();
-
-#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
-
-/**
- * Read system property.
- * @return AAUDIO_UNSPECIFIED, AAUDIO_POLICY_NEVER or AAUDIO_POLICY_AUTO or AAUDIO_POLICY_ALWAYS
- */
int32_t AAudioProperty_getMMapExclusivePolicy();
-
-#define AAUDIO_PROP_MIXER_BURSTS "aaudio.mixer_bursts"
+#define AAUDIO_PROP_MMAP_EXCLUSIVE_POLICY "aaudio.mmap_exclusive_policy"
/**
* Read system property.
* @return number of bursts per AAudio service mixer cycle
*/
int32_t AAudioProperty_getMixerBursts();
-
-#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
+#define AAUDIO_PROP_MIXER_BURSTS "aaudio.mixer_bursts"
/**
* Read a system property that specifies the number of extra microseconds that a thread
@@ -130,7 +125,6 @@
* @return number of microseconds to delay the wakeup.
*/
int32_t AAudioProperty_getWakeupDelayMicros();
-
#define AAUDIO_PROP_WAKEUP_DELAY_USEC "aaudio.wakeup_delay_usec"
/**
@@ -139,7 +133,6 @@
* @return minimum number of microseconds to sleep.
*/
int32_t AAudioProperty_getMinimumSleepMicros();
-
#define AAUDIO_PROP_MINIMUM_SLEEP_USEC "aaudio.minimum_sleep_usec"
/**
@@ -153,7 +146,35 @@
* @return minimum number of microseconds for a MMAP HW burst
*/
int32_t AAudioProperty_getHardwareBurstMinMicros();
+#define AAUDIO_PROP_HW_BURST_MIN_USEC "aaudio.hw_burst_min_usec"
+/**
+ * Read a system property that specifies an offset that will be added to MMAP timestamps.
+ * This can be used to correct bias in the timestamp.
+ * It can also be used to analyze the time distribution of the timestamp
+ * by progressively modifying the offset and listening for glitches.
+ *
+ * @return number of microseconds to offset the time part of an MMAP timestamp
+ */
+int32_t AAudioProperty_getInputMMapOffsetMicros();
+#define AAUDIO_PROP_INPUT_MMAP_OFFSET_USEC "aaudio.in_mmap_offset_usec"
+
+int32_t AAudioProperty_getOutputMMapOffsetMicros();
+#define AAUDIO_PROP_OUTPUT_MMAP_OFFSET_USEC "aaudio.out_mmap_offset_usec"
+
+// These are powers of two that can be combined as a bit mask.
+// AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM must be enabled before the stream is opened.
+#define AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM 1
+#define AAUDIO_LOG_RESERVED_2 2
+#define AAUDIO_LOG_RESERVED_4 4
+#define AAUDIO_LOG_RESERVED_8 8
+
+/**
+ * Use a mask to enable various logs in AAudio.
+ * @return mask that enables various AAudio logs, such as AAUDIO_LOG_CLOCK_MODEL_HISTOGRAM
+ */
+int32_t AAudioProperty_getLogMask();
+#define AAUDIO_PROP_LOG_MASK "aaudio.log_mask"
/**
* Is flush allowed for the given state?
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
index 19cd0a0..73fd896 100644
--- a/media/libaaudio/tests/Android.bp
+++ b/media/libaaudio/tests/Android.bp
@@ -215,3 +215,14 @@
srcs: ["test_full_queue.cpp"],
shared_libs: ["libaaudio"],
}
+
+cc_test {
+ name: "test_histogram",
+ defaults: ["libaaudio_tests_defaults"],
+ srcs: ["test_histogram.cpp"],
+ shared_libs: [
+ "libaudioutils",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/test_histogram.cpp b/media/libaaudio/tests/test_histogram.cpp
new file mode 100644
index 0000000..431373d
--- /dev/null
+++ b/media/libaaudio/tests/test_histogram.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test Histogram
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include <audio_utils/Histogram.h>
+
+using namespace android::audio_utils;
+
+static constexpr int32_t kBinWidth = 10;
+static constexpr int32_t kNumBins = 20;
+
+TEST(test_histogram, module_sinki16) {
+ Histogram histogram(kNumBins, kBinWidth);
+ ASSERT_EQ(kNumBins, histogram.getNumBinsInRange());
+
+ // Is it clear initially?
+ for (int i = 0; i < kNumBins; i++) {
+ ASSERT_EQ(0, histogram.getCount(i));
+ }
+ ASSERT_EQ(0, histogram.getCountBelowRange());
+ ASSERT_EQ(0, histogram.getCountAboveRange());
+ ASSERT_EQ(0, histogram.getCount());
+
+ // Add some items.
+ histogram.add(27);
+ histogram.add(53);
+ histogram.add(171);
+ histogram.add(23);
+
+ // Did they count correctly.
+ ASSERT_EQ(2, histogram.getCount(2)); // For items 27 and 23
+ ASSERT_EQ(3, histogram.getLastItemNumber(2)); // Item 23 was the 0,1,2,3th item added.
+ ASSERT_EQ(1, histogram.getCount(5)); // For item 53
+ ASSERT_EQ(1, histogram.getLastItemNumber(5)); // item 53 was the second item added.
+ ASSERT_EQ(1, histogram.getCount(17)); // For item 171
+ ASSERT_EQ(4, histogram.getCount()); // A total of four items were added.
+
+ // Add values out of range.
+ histogram.add(-5);
+ ASSERT_EQ(1, histogram.getCountBelowRange()); // -5 is below zero.
+ ASSERT_EQ(0, histogram.getCountAboveRange());
+ ASSERT_EQ(5, histogram.getCount());
+
+ histogram.add(200);
+ ASSERT_EQ(1, histogram.getCountBelowRange());
+ ASSERT_EQ(1, histogram.getCountAboveRange()); // 200 is above top bin
+ ASSERT_EQ(6, histogram.getCount());
+
+ // Try to read values out of range. Should not crash.
+ // Legal index range is 0 to numBins-1
+ histogram.add(-1);
+ histogram.add(kNumBins);
+ ASSERT_EQ(0, histogram.getCount(-1)); // edge
+ ASSERT_EQ(0, histogram.getCount(kNumBins)); // edge
+ ASSERT_EQ(0, histogram.getCount(-1234)); // extreme
+ ASSERT_EQ(0, histogram.getCount(98765)); // extreme
+ ASSERT_EQ(0, histogram.getLastItemNumber(-1));
+ ASSERT_EQ(0, histogram.getLastItemNumber(kNumBins));
+
+ // Clear all the counts.
+ histogram.clear();
+ // Is it clear?
+ for (int i = 0; i < kNumBins; i++) {
+ ASSERT_EQ(0, histogram.getCount(i));
+ }
+ ASSERT_EQ(0, histogram.getCountBelowRange());
+ ASSERT_EQ(0, histogram.getCountAboveRange());
+ ASSERT_EQ(0, histogram.getCount());
+}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index c516d20..ad79e9c 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -1,7 +1,15 @@
cc_library_headers {
name: "libaudioclient_headers",
vendor_available: true,
- export_include_dirs: ["include"],
+ header_libs: [
+ "libaudiofoundation_headers",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+ export_header_lib_headers: [
+ "libaudiofoundation_headers",
+ ],
}
cc_library_shared {
@@ -63,6 +71,7 @@
"TrackPlayerBase.cpp",
],
shared_libs: [
+ "libaudiofoundation",
"libaudioutils",
"libaudiopolicy",
"libaudiomanager",
@@ -84,6 +93,7 @@
header_libs: [
"libaudioclient_headers",
"libbase_headers",
+ "libmedia_headers",
],
export_header_lib_headers: ["libaudioclient_headers"],
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index cf11936..1cc5fe6 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -159,7 +159,11 @@
mIEffect = iEffect;
mCblkMemory = cblk;
- mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ mCblk = static_cast<effect_param_cblk_t*>(cblk->unsecurePointer());
int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
mCblk->buffer = (uint8_t *)mCblk + bufOffset;
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index a1b04ca..0f2d48e 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -759,7 +759,11 @@
status = NO_INIT;
goto exit;
}
- iMemPointer = output.cblk ->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ iMemPointer = output.cblk ->unsecurePointer();
if (iMemPointer == NULL) {
ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
status = NO_INIT;
@@ -774,7 +778,11 @@
if (output.buffers == 0) {
buffers = cblk + 1;
} else {
- buffers = output.buffers->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ buffers = output.buffers->unsecurePointer();
if (buffers == NULL) {
ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
status = NO_INIT;
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 4a80cd3..e8d7b60 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -406,7 +406,7 @@
mDoNotReconnect = doNotReconnect;
ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
- __func__, sharedBuffer->pointer(), sharedBuffer->size());
+ __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
ALOGV("%s(): streamType %d frameCount %zu flags %04x",
__func__, streamType, frameCount, flags);
@@ -1508,7 +1508,11 @@
status = NO_INIT;
goto exit;
}
- void *iMemPointer = iMem->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ void *iMemPointer = iMem->unsecurePointer();
if (iMemPointer == NULL) {
ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
status = NO_INIT;
@@ -1563,7 +1567,11 @@
if (mSharedBuffer == 0) {
buffers = cblk + 1;
} else {
- buffers = mSharedBuffer->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ buffers = mSharedBuffer->unsecurePointer();
if (buffers == NULL) {
ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
status = NO_INIT;
diff --git a/media/libaudioclient/AudioTrackShared.cpp b/media/libaudioclient/AudioTrackShared.cpp
index ee6c335..f1f8f9c 100644
--- a/media/libaudioclient/AudioTrackShared.cpp
+++ b/media/libaudioclient/AudioTrackShared.cpp
@@ -984,8 +984,9 @@
// ---------------------------------------------------------------------------
StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
- size_t frameCount, size_t frameSize)
- : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
+ size_t frameCount, size_t frameSize, uint32_t sampleRate)
+ : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize, false /*clientInServer*/,
+ sampleRate),
mObserver(&cblk->u.mStatic.mSingleStateQueue),
mPosLoopMutator(&cblk->u.mStatic.mPosLoopQueue),
mFramesReadySafe(frameCount), mFramesReady(frameCount),
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index efa0512..6e9a7cf 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -24,8 +24,8 @@
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include <media/TimeCheck.h>
#include <mediautils/ServiceUtilities.h>
+#include <mediautils/TimeCheck.h>
#include "IAudioFlinger.h"
namespace android {
@@ -340,11 +340,11 @@
return reply.readInt32();
}
- virtual void setRecordSilenced(uid_t uid, bool silenced)
+ virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32(uid);
+ data.writeInt32(portId);
data.writeInt32(silenced ? 1 : 0);
remote()->transact(SET_RECORD_SILENCED, data, &reply);
}
@@ -392,20 +392,18 @@
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
- if (output == NULL || config == NULL || devices == NULL || latencyMs == NULL) {
+ if (output == nullptr || config == nullptr || device == nullptr || latencyMs == nullptr) {
return BAD_VALUE;
}
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(module);
data.write(config, sizeof(audio_config_t));
- data.writeInt32(*devices);
- data.writeString8(address);
+ data.writeParcelable(*device);
data.writeInt32((int32_t) flags);
status_t status = remote()->transact(OPEN_OUTPUT, data, &reply);
if (status != NO_ERROR) {
@@ -420,7 +418,6 @@
*output = (audio_io_handle_t)reply.readInt32();
ALOGV("openOutput() returned output, %d", *output);
reply.read(config, sizeof(audio_config_t));
- *devices = (audio_devices_t)reply.readInt32();
*latencyMs = reply.readInt32();
return NO_ERROR;
}
@@ -1156,11 +1153,9 @@
} break;
case SET_RECORD_SILENCED: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- uid_t uid = data.readInt32();
- audio_source_t source;
- data.read(&source, sizeof(audio_source_t));
+ audio_port_handle_t portId = data.readInt32();
bool silenced = data.readInt32() == 1;
- setRecordSilenced(uid, silenced);
+ setRecordSilenced(portId, silenced);
return NO_ERROR;
} break;
case SET_PARAMETERS: {
@@ -1200,19 +1195,21 @@
if (data.read(&config, sizeof(audio_config_t)) != NO_ERROR) {
ALOGE("b/23905951");
}
- audio_devices_t devices = (audio_devices_t)data.readInt32();
- String8 address(data.readString8());
+ sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
+ status_t status = NO_ERROR;
+ if ((status = data.readParcelable(device.get())) != NO_ERROR) {
+ reply->writeInt32((int32_t)status);
+ return NO_ERROR;
+ }
audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
uint32_t latencyMs = 0;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = openOutput(module, &output, &config,
- &devices, address, &latencyMs, flags);
+ status = openOutput(module, &output, &config, device, &latencyMs, flags);
ALOGV("OPEN_OUTPUT output, %d", output);
reply->writeInt32((int32_t)status);
if (status == NO_ERROR) {
reply->writeInt32((int32_t)output);
reply->write(&config, sizeof(audio_config_t));
- reply->writeInt32(devices);
reply->writeInt32(latencyMs);
}
return NO_ERROR;
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 64f0aca..7cc95e5 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -26,8 +26,8 @@
#include <binder/Parcel.h>
#include <media/AudioEffect.h>
#include <media/IAudioPolicyService.h>
-#include <media/TimeCheck.h>
#include <mediautils/ServiceUtilities.h>
+#include <mediautils/TimeCheck.h>
#include <system/audio.h>
namespace android {
diff --git a/media/libaudioclient/IAudioTrack.cpp b/media/libaudioclient/IAudioTrack.cpp
index 83a568a..6219e7a 100644
--- a/media/libaudioclient/IAudioTrack.cpp
+++ b/media/libaudioclient/IAudioTrack.cpp
@@ -62,7 +62,7 @@
status_t status = remote()->transact(GET_CBLK, data, &reply);
if (status == NO_ERROR) {
cblk = interface_cast<IMemory>(reply.readStrongBinder());
- if (cblk != 0 && cblk->pointer() == NULL) {
+ if (cblk != 0 && cblk->unsecurePointer() == NULL) {
cblk.clear();
}
}
diff --git a/media/libaudioclient/IEffect.cpp b/media/libaudioclient/IEffect.cpp
index ce72dae..5d47dff 100644
--- a/media/libaudioclient/IEffect.cpp
+++ b/media/libaudioclient/IEffect.cpp
@@ -122,7 +122,7 @@
status_t status = remote()->transact(GET_CBLK, data, &reply);
if (status == NO_ERROR) {
cblk = interface_cast<IMemory>(reply.readStrongBinder());
- if (cblk != 0 && cblk->pointer() == NULL) {
+ if (cblk != 0 && cblk->unsecurePointer() == NULL) {
cblk.clear();
}
}
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 8ec8931..0a65857 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -27,6 +27,7 @@
#include <binder/Parcel.h>
#include <binder/Parcelable.h>
#include <media/AudioClient.h>
+#include <media/DeviceDescriptorBase.h>
#include <media/IAudioTrack.h>
#include <media/IAudioFlingerClient.h>
#include <system/audio.h>
@@ -70,8 +71,12 @@
return DEAD_OBJECT;
}
if (parcel->readInt32() != 0) {
+ // TODO: Using unsecurePointer() has some associated security
+ // pitfalls (see declaration for details).
+ // Either document why it is safe in this case or address
+ // the issue (e.g. by copying).
sharedBuffer = interface_cast<IMemory>(parcel->readStrongBinder());
- if (sharedBuffer == 0 || sharedBuffer->pointer() == NULL) {
+ if (sharedBuffer == 0 || sharedBuffer->unsecurePointer() == NULL) {
return BAD_VALUE;
}
}
@@ -269,13 +274,21 @@
(void)parcel->read(&inputId, sizeof(audio_io_handle_t));
if (parcel->readInt32() != 0) {
cblk = interface_cast<IMemory>(parcel->readStrongBinder());
- if (cblk == 0 || cblk->pointer() == NULL) {
+ // TODO: Using unsecurePointer() has some associated security
+ // pitfalls (see declaration for details).
+ // Either document why it is safe in this case or address
+ // the issue (e.g. by copying).
+ if (cblk == 0 || cblk->unsecurePointer() == NULL) {
return BAD_VALUE;
}
}
if (parcel->readInt32() != 0) {
buffers = interface_cast<IMemory>(parcel->readStrongBinder());
- if (buffers == 0 || buffers->pointer() == NULL) {
+ // TODO: Using unsecurePointer() has some associated security
+ // pitfalls (see declaration for details).
+ // Either document why it is safe in this case or address
+ // the issue (e.g. by copying).
+ if (buffers == 0 || buffers->unsecurePointer() == NULL) {
return BAD_VALUE;
}
}
@@ -384,7 +397,7 @@
// mic mute/state
virtual status_t setMicMute(bool state) = 0;
virtual bool getMicMute() const = 0;
- virtual void setRecordSilenced(uid_t uid, bool silenced) = 0;
+ virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced) = 0;
virtual status_t setParameters(audio_io_handle_t ioHandle,
const String8& keyValuePairs) = 0;
@@ -404,8 +417,7 @@
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags) = 0;
virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1,
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 52bb2fb..d509be6 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -11,6 +11,9 @@
defaults: ["libaudioclient_tests_defaults"],
srcs: ["test_create_audiotrack.cpp",
"test_create_utils.cpp"],
+ header_libs: [
+ "libmedia_headers",
+ ],
shared_libs: [
"libaudioclient",
"libbinder",
@@ -25,6 +28,9 @@
defaults: ["libaudioclient_tests_defaults"],
srcs: ["test_create_audiorecord.cpp",
"test_create_utils.cpp"],
+ header_libs: [
+ "libmedia_headers",
+ ],
shared_libs: [
"libaudioclient",
"libbinder",
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index 5045d87..edc06d2 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -2,29 +2,45 @@
name: "libaudiofoundation_headers",
vendor_available: true,
export_include_dirs: ["include"],
+ header_libs: [
+ "libaudio_system_headers",
+ "libmedia_helper_headers",
+ ],
+ export_header_lib_headers: [
+ "libaudio_system_headers",
+ "libmedia_helper_headers",
+ ],
}
-cc_library_shared {
+cc_library {
name: "libaudiofoundation",
vendor_available: true,
+ double_loadable: true,
srcs: [
+ "AudioContainers.cpp",
"AudioGain.cpp",
+ "AudioPort.cpp",
+ "AudioProfile.cpp",
+ "DeviceDescriptorBase.cpp",
],
shared_libs: [
+ "libaudioutils",
"libbase",
"libbinder",
"liblog",
+ "libmedia_helper",
"libutils",
],
header_libs: [
- "libaudio_system_headers",
"libaudiofoundation_headers",
],
- export_header_lib_headers: ["libaudiofoundation_headers"],
+ export_header_lib_headers: [
+ "libaudiofoundation_headers",
+ ],
cflags: [
"-Werror",
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
new file mode 100644
index 0000000..adc5d40
--- /dev/null
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+#include <string>
+
+#include <media/AudioContainers.h>
+
+namespace android {
+
+const DeviceTypeSet& getAudioDeviceOutAllSet() {
+ static const DeviceTypeSet audioDeviceOutAllSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_OUT_ALL_ARRAY),
+ std::end(AUDIO_DEVICE_OUT_ALL_ARRAY));
+ return audioDeviceOutAllSet;
+}
+
+const DeviceTypeSet& getAudioDeviceOutAllA2dpSet() {
+ static const DeviceTypeSet audioDeviceOutAllA2dpSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_OUT_ALL_A2DP_ARRAY),
+ std::end(AUDIO_DEVICE_OUT_ALL_A2DP_ARRAY));
+ return audioDeviceOutAllA2dpSet;
+}
+
+const DeviceTypeSet& getAudioDeviceOutAllScoSet() {
+ static const DeviceTypeSet audioDeviceOutAllScoSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_OUT_ALL_SCO_ARRAY),
+ std::end(AUDIO_DEVICE_OUT_ALL_SCO_ARRAY));
+ return audioDeviceOutAllScoSet;
+}
+
+const DeviceTypeSet& getAudioDeviceInAllSet() {
+ static const DeviceTypeSet audioDeviceInAllSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_IN_ALL_ARRAY),
+ std::end(AUDIO_DEVICE_IN_ALL_ARRAY));
+ return audioDeviceInAllSet;
+}
+
+bool deviceTypesToString(const DeviceTypeSet &deviceTypes, std::string &str) {
+ bool ret = true;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
+ std::string deviceTypeStr;
+ ret = audio_is_output_device(*it) ?
+ OutputDeviceConverter::toString(*it, deviceTypeStr) :
+ InputDeviceConverter::toString(*it, deviceTypeStr);
+ if (!ret) {
+ break;
+ }
+ str.append(deviceTypeStr);
+ if (++it != deviceTypes.end()) {
+ str.append(" , ");
+ }
+ }
+ if (!ret) {
+ str = "Unknown values";
+ }
+ return ret;
+}
+
+std::string dumpDeviceTypes(const DeviceTypeSet &deviceTypes) {
+ std::string ret;
+ for (auto it = deviceTypes.begin(); it != deviceTypes.end();) {
+ std::stringstream ss;
+ ss << "0x" << std::hex << (*it);
+ ret.append(ss.str());
+ if (++it != deviceTypes.end()) {
+ ret.append(" , ");
+ }
+ }
+ return ret;
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/AudioGain.cpp b/media/libaudiofoundation/AudioGain.cpp
index 9d1d6db..0d28335 100644
--- a/media/libaudiofoundation/AudioGain.cpp
+++ b/media/libaudiofoundation/AudioGain.cpp
@@ -24,6 +24,8 @@
#define ALOGVV(a...) do { } while(0)
#endif
+#include <algorithm>
+
#include <android-base/stringprintf.h>
#include <media/AudioGain.h>
#include <utils/Log.h>
@@ -111,6 +113,22 @@
dst->append(base::StringPrintf("%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms));
}
+bool AudioGain::equals(const sp<AudioGain>& other) const
+{
+ return other != nullptr &&
+ mUseInChannelMask == other->mUseInChannelMask &&
+ mUseForVolume == other->mUseForVolume &&
+ // Compare audio gain
+ mGain.mode == other->mGain.mode &&
+ mGain.channel_mask == other->mGain.channel_mask &&
+ mGain.min_value == other->mGain.min_value &&
+ mGain.max_value == other->mGain.max_value &&
+ mGain.default_value == other->mGain.default_value &&
+ mGain.step_value == other->mGain.step_value &&
+ mGain.min_ramp_ms == other->mGain.min_ramp_ms &&
+ mGain.max_ramp_ms == other->mGain.max_ramp_ms;
+}
+
status_t AudioGain::writeToParcel(android::Parcel *parcel) const
{
status_t status = NO_ERROR;
@@ -145,9 +163,17 @@
return status;
}
+bool AudioGains::equals(const AudioGains &other) const
+{
+ return std::equal(begin(), end(), other.begin(), other.end(),
+ [](const sp<AudioGain>& left, const sp<AudioGain>& right) {
+ return left->equals(right);
+ });
+}
+
status_t AudioGains::writeToParcel(android::Parcel *parcel) const {
status_t status = NO_ERROR;
- if ((status = parcel->writeUint64(this->size())) != NO_ERROR) return status;
+ if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
for (const auto &audioGain : *this) {
if ((status = parcel->writeParcelable(*audioGain)) != NO_ERROR) {
break;
@@ -158,15 +184,14 @@
status_t AudioGains::readFromParcel(const android::Parcel *parcel) {
status_t status = NO_ERROR;
- uint64_t count;
- if ((status = parcel->readUint64(&count)) != NO_ERROR) return status;
- for (uint64_t i = 0; i < count; i++) {
- sp<AudioGain> audioGain = new AudioGain(0, false);
- if ((status = parcel->readParcelable(audioGain.get())) != NO_ERROR) {
+ this->clear();
+ if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
+ for (size_t i = 0; i < this->size(); i++) {
+ this->at(i) = new AudioGain(0, false);
+ if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
this->clear();
break;
}
- this->push_back(audioGain);
}
return status;
}
diff --git a/media/libaudiofoundation/AudioPort.cpp b/media/libaudiofoundation/AudioPort.cpp
new file mode 100644
index 0000000..f988690
--- /dev/null
+++ b/media/libaudiofoundation/AudioPort.cpp
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "AudioPort"
+
+#include <algorithm>
+
+#include <android-base/stringprintf.h>
+#include <media/AudioPort.h>
+#include <utils/Log.h>
+
+namespace android {
+
+void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
+{
+ for (const auto& profileToImport : port->mProfiles) {
+ // Import only valid port, i.e. valid format, non empty rates and channels masks
+ if (!profileToImport->isValid()) {
+ continue;
+ }
+ if (std::find_if(mProfiles.begin(), mProfiles.end(),
+ [profileToImport](const auto &profile) {
+ return *profile == *profileToImport; }) == mProfiles.end()) {
+ addAudioProfile(profileToImport);
+ }
+ }
+}
+
+void AudioPort::toAudioPort(struct audio_port *port) const {
+ // TODO: update this function once audio_port structure reflects the new profile definition.
+ // For compatibility reason: flatening the AudioProfile into audio_port structure.
+ FormatSet flatenedFormats;
+ SampleRateSet flatenedRates;
+ ChannelMaskSet flatenedChannels;
+ for (const auto& profile : mProfiles) {
+ if (profile->isValid()) {
+ audio_format_t formatToExport = profile->getFormat();
+ const SampleRateSet &ratesToExport = profile->getSampleRates();
+ const ChannelMaskSet &channelsToExport = profile->getChannels();
+
+ flatenedFormats.insert(formatToExport);
+ flatenedRates.insert(ratesToExport.begin(), ratesToExport.end());
+ flatenedChannels.insert(channelsToExport.begin(), channelsToExport.end());
+
+ if (flatenedRates.size() > AUDIO_PORT_MAX_SAMPLING_RATES ||
+ flatenedChannels.size() > AUDIO_PORT_MAX_CHANNEL_MASKS ||
+ flatenedFormats.size() > AUDIO_PORT_MAX_FORMATS) {
+ ALOGE("%s: bailing out: cannot export profiles to port config", __func__);
+ return;
+ }
+ }
+ }
+ port->role = mRole;
+ port->type = mType;
+ strlcpy(port->name, mName.c_str(), AUDIO_PORT_MAX_NAME_LEN);
+ port->num_sample_rates = flatenedRates.size();
+ port->num_channel_masks = flatenedChannels.size();
+ port->num_formats = flatenedFormats.size();
+ std::copy(flatenedRates.begin(), flatenedRates.end(), port->sample_rates);
+ std::copy(flatenedChannels.begin(), flatenedChannels.end(), port->channel_masks);
+ std::copy(flatenedFormats.begin(), flatenedFormats.end(), port->formats);
+
+ ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
+
+ port->num_gains = std::min(mGains.size(), (size_t) AUDIO_PORT_MAX_GAINS);
+ for (size_t i = 0; i < port->num_gains; i++) {
+ port->gains[i] = mGains[i]->getGain();
+ }
+}
+
+void AudioPort::dump(std::string *dst, int spaces, bool verbose) const {
+ if (!mName.empty()) {
+ dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+ }
+ if (verbose) {
+ std::string profilesStr;
+ mProfiles.dump(&profilesStr, spaces);
+ dst->append(profilesStr);
+
+ if (mGains.size() != 0) {
+ dst->append(base::StringPrintf("%*s- gains:\n", spaces, ""));
+ for (size_t i = 0; i < mGains.size(); i++) {
+ std::string gainStr;
+ mGains[i]->dump(&gainStr, spaces + 2, i);
+ dst->append(gainStr);
+ }
+ }
+ }
+}
+
+void AudioPort::log(const char* indent) const
+{
+ ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.c_str(), mType, mRole);
+}
+
+bool AudioPort::equals(const sp<AudioPort> &other) const
+{
+ return other != nullptr &&
+ mGains.equals(other->getGains()) &&
+ mName.compare(other->getName()) == 0 &&
+ mType == other->getType() &&
+ mRole == other->getRole() &&
+ mProfiles.equals(other->getAudioProfiles());
+}
+
+status_t AudioPort::writeToParcel(Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeUtf8AsUtf16(mName)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mType)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mRole)) != NO_ERROR) return status;
+ if ((status = parcel->writeParcelable(mProfiles)) != NO_ERROR) return status;
+ if ((status = parcel->writeParcelable(mGains)) != NO_ERROR) return status;
+ return status;
+}
+
+status_t AudioPort::readFromParcel(const Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->readUtf8FromUtf16(&mName)) != NO_ERROR) return status;
+ static_assert(sizeof(mType) == sizeof(uint32_t));
+ if ((status = parcel->readUint32(reinterpret_cast<uint32_t*>(&mType))) != NO_ERROR) {
+ return status;
+ }
+ static_assert(sizeof(mRole) == sizeof(uint32_t));
+ if ((status = parcel->readUint32(reinterpret_cast<uint32_t*>(&mRole))) != NO_ERROR) {
+ return status;
+ }
+ mProfiles.clear();
+ if ((status = parcel->readParcelable(&mProfiles)) != NO_ERROR) return status;
+ mGains.clear();
+ if ((status = parcel->readParcelable(&mGains)) != NO_ERROR) return status;
+ return status;
+}
+
+// --- AudioPortConfig class implementation
+
+status_t AudioPortConfig::applyAudioPortConfig(
+ const struct audio_port_config *config,
+ struct audio_port_config *backupConfig __unused)
+{
+ if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+ mSamplingRate = config->sample_rate;
+ }
+ if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+ mChannelMask = config->channel_mask;
+ }
+ if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+ mFormat = config->format;
+ }
+ if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
+ mGain = config->gain;
+ }
+
+ return NO_ERROR;
+}
+
+namespace {
+
+template<typename T>
+void updateField(
+ const T& portConfigField, T audio_port_config::*port_config_field,
+ struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
+ unsigned int configMask, T defaultValue)
+{
+ if (dstConfig->config_mask & configMask) {
+ if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
+ dstConfig->*port_config_field = srcConfig->*port_config_field;
+ } else {
+ dstConfig->*port_config_field = portConfigField;
+ }
+ } else {
+ dstConfig->*port_config_field = defaultValue;
+ }
+}
+
+} // namespace
+
+void AudioPortConfig::toAudioPortConfig(
+ struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig) const
+{
+ updateField(mSamplingRate, &audio_port_config::sample_rate,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
+ updateField(mChannelMask, &audio_port_config::channel_mask,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
+ (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
+ updateField(mFormat, &audio_port_config::format,
+ dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
+ dstConfig->id = mId;
+
+ sp<AudioPort> audioport = getAudioPort();
+ if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
+ dstConfig->gain = mGain;
+ if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)
+ && audioport->checkGain(&srcConfig->gain, srcConfig->gain.index) == OK) {
+ dstConfig->gain = srcConfig->gain;
+ }
+ } else {
+ dstConfig->gain.index = -1;
+ }
+ if (dstConfig->gain.index != -1) {
+ dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
+ } else {
+ dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+ }
+}
+
+bool AudioPortConfig::hasGainController(bool canUseForVolume) const
+{
+ sp<AudioPort> audioport = getAudioPort();
+ if (!audioport) {
+ return false;
+ }
+ return canUseForVolume ? audioport->getGains().canUseForVolume()
+ : audioport->getGains().size() > 0;
+}
+
+bool AudioPortConfig::equals(const sp<AudioPortConfig> &other) const
+{
+ return other != nullptr &&
+ mSamplingRate == other->getSamplingRate() &&
+ mFormat == other->getFormat() &&
+ mChannelMask == other->getChannelMask() &&
+ // Compare audio gain config
+ mGain.index == other->mGain.index &&
+ mGain.mode == other->mGain.mode &&
+ mGain.channel_mask == other->mGain.channel_mask &&
+ std::equal(std::begin(mGain.values), std::end(mGain.values),
+ std::begin(other->mGain.values)) &&
+ mGain.ramp_duration_ms == other->mGain.ramp_duration_ms;
+}
+
+status_t AudioPortConfig::writeToParcel(Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeUint32(mSamplingRate)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mFormat)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mChannelMask)) != NO_ERROR) return status;
+ if ((status = parcel->writeInt32(mId)) != NO_ERROR) return status;
+ // Write mGain to parcel.
+ if ((status = parcel->writeInt32(mGain.index)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.mode)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.channel_mask)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mGain.ramp_duration_ms)) != NO_ERROR) return status;
+ std::vector<int> values(std::begin(mGain.values), std::end(mGain.values));
+ if ((status = parcel->writeInt32Vector(values)) != NO_ERROR) return status;
+ return status;
+}
+
+status_t AudioPortConfig::readFromParcel(const Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->readUint32(&mSamplingRate)) != NO_ERROR) return status;
+ static_assert(sizeof(mFormat) == sizeof(uint32_t));
+ if ((status = parcel->readUint32(reinterpret_cast<uint32_t*>(&mFormat))) != NO_ERROR) {
+ return status;
+ }
+ if ((status = parcel->readUint32(&mChannelMask)) != NO_ERROR) return status;
+ if ((status = parcel->readInt32(&mId)) != NO_ERROR) return status;
+ // Read mGain from parcel.
+ if ((status = parcel->readInt32(&mGain.index)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.mode)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.channel_mask)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mGain.ramp_duration_ms)) != NO_ERROR) return status;
+ std::vector<int> values;
+ if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
+ if (values.size() != std::size(mGain.values)) {
+ return BAD_VALUE;
+ }
+ std::copy(values.begin(), values.end(), mGain.values);
+ return status;
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/AudioProfile.cpp b/media/libaudiofoundation/AudioProfile.cpp
new file mode 100644
index 0000000..91be346
--- /dev/null
+++ b/media/libaudiofoundation/AudioProfile.cpp
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <set>
+
+#define LOG_TAG "AudioProfile"
+//#define LOG_NDEBUG 0
+
+#include <android-base/stringprintf.h>
+#include <media/AudioContainers.h>
+#include <media/AudioProfile.h>
+#include <media/TypeConverter.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+bool operator == (const AudioProfile &left, const AudioProfile &right)
+{
+ return (left.getFormat() == right.getFormat()) &&
+ (left.getChannels() == right.getChannels()) &&
+ (left.getSampleRates() == right.getSampleRates());
+}
+
+// static
+sp<AudioProfile> AudioProfile::createFullDynamic(audio_format_t dynamicFormat)
+{
+ AudioProfile* dynamicProfile = new AudioProfile(dynamicFormat,
+ ChannelMaskSet(), SampleRateSet());
+ dynamicProfile->setDynamicFormat(true);
+ dynamicProfile->setDynamicChannels(true);
+ dynamicProfile->setDynamicRate(true);
+ return dynamicProfile;
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+ audio_channel_mask_t channelMasks,
+ uint32_t samplingRate) :
+ mName(""),
+ mFormat(format)
+{
+ mChannelMasks.insert(channelMasks);
+ mSamplingRates.insert(samplingRate);
+}
+
+AudioProfile::AudioProfile(audio_format_t format,
+ const ChannelMaskSet &channelMasks,
+ const SampleRateSet &samplingRateCollection) :
+ mName(""),
+ mFormat(format),
+ mChannelMasks(channelMasks),
+ mSamplingRates(samplingRateCollection) {}
+
+void AudioProfile::setChannels(const ChannelMaskSet &channelMasks)
+{
+ if (mIsDynamicChannels) {
+ mChannelMasks = channelMasks;
+ }
+}
+
+void AudioProfile::setSampleRates(const SampleRateSet &sampleRates)
+{
+ if (mIsDynamicRate) {
+ mSamplingRates = sampleRates;
+ }
+}
+
+void AudioProfile::clear()
+{
+ if (mIsDynamicChannels) {
+ mChannelMasks.clear();
+ }
+ if (mIsDynamicRate) {
+ mSamplingRates.clear();
+ }
+}
+
+void AudioProfile::dump(std::string *dst, int spaces) const
+{
+ dst->append(base::StringPrintf("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
+ mIsDynamicChannels ? "[dynamic channels]" : "",
+ mIsDynamicRate ? "[dynamic rates]" : ""));
+ if (mName.length() != 0) {
+ dst->append(base::StringPrintf("%*s- name: %s\n", spaces, "", mName.c_str()));
+ }
+ std::string formatLiteral;
+ if (FormatConverter::toString(mFormat, formatLiteral)) {
+ dst->append(base::StringPrintf("%*s- format: %s\n", spaces, "", formatLiteral.c_str()));
+ }
+ if (!mSamplingRates.empty()) {
+ dst->append(base::StringPrintf("%*s- sampling rates:", spaces, ""));
+ for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
+ dst->append(base::StringPrintf("%d", *it));
+ dst->append(++it == mSamplingRates.end() ? "" : ", ");
+ }
+ dst->append("\n");
+ }
+
+ if (!mChannelMasks.empty()) {
+ dst->append(base::StringPrintf("%*s- channel masks:", spaces, ""));
+ for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
+ dst->append(base::StringPrintf("0x%04x", *it));
+ dst->append(++it == mChannelMasks.end() ? "" : ", ");
+ }
+ dst->append("\n");
+ }
+}
+
+bool AudioProfile::equals(const sp<AudioProfile>& other) const
+{
+ return other != nullptr &&
+ mName.compare(other->mName) == 0 &&
+ mFormat == other->getFormat() &&
+ mChannelMasks == other->getChannels() &&
+ mSamplingRates == other->getSampleRates() &&
+ mIsDynamicFormat == other->isDynamicFormat() &&
+ mIsDynamicChannels == other->isDynamicChannels() &&
+ mIsDynamicRate == other->isDynamicRate();
+}
+
+status_t AudioProfile::writeToParcel(Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeUtf8AsUtf16(mName)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mFormat)) != NO_ERROR) return status;
+ std::vector<int> values(mChannelMasks.begin(), mChannelMasks.end());
+ if ((status = parcel->writeInt32Vector(values)) != NO_ERROR) return status;
+ values.clear();
+ values.assign(mSamplingRates.begin(), mSamplingRates.end());
+ if ((status = parcel->writeInt32Vector(values)) != NO_ERROR) return status;
+ if ((status = parcel->writeBool(mIsDynamicFormat)) != NO_ERROR) return status;
+ if ((status = parcel->writeBool(mIsDynamicChannels)) != NO_ERROR) return status;
+ if ((status = parcel->writeBool(mIsDynamicRate)) != NO_ERROR) return status;
+ return status;
+}
+
+status_t AudioProfile::readFromParcel(const Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->readUtf8FromUtf16(&mName)) != NO_ERROR) return status;
+ static_assert(sizeof(mFormat) == sizeof(uint32_t));
+ if ((status = parcel->readUint32(reinterpret_cast<uint32_t*>(&mFormat))) != NO_ERROR) {
+ return status;
+ }
+ std::vector<int> values;
+ if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
+ mChannelMasks.clear();
+ mChannelMasks.insert(values.begin(), values.end());
+ values.clear();
+ if ((status = parcel->readInt32Vector(&values)) != NO_ERROR) return status;
+ mSamplingRates.clear();
+ mSamplingRates.insert(values.begin(), values.end());
+ if ((status = parcel->readBool(&mIsDynamicFormat)) != NO_ERROR) return status;
+ if ((status = parcel->readBool(&mIsDynamicChannels)) != NO_ERROR) return status;
+ if ((status = parcel->readBool(&mIsDynamicRate)) != NO_ERROR) return status;
+ return status;
+}
+
+ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
+{
+ ssize_t index = size();
+ push_back(profile);
+ return index;
+}
+
+void AudioProfileVector::clearProfiles()
+{
+ for (auto it = begin(); it != end();) {
+ if ((*it)->isDynamicFormat() && (*it)->hasValidFormat()) {
+ it = erase(it);
+ } else {
+ (*it)->clear();
+ ++it;
+ }
+ }
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
+{
+ for (const auto &profile : *this) {
+ if (profile->isValid()) {
+ return profile;
+ }
+ }
+ return nullptr;
+}
+
+sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
+{
+ for (const auto &profile : *this) {
+ if (profile->isValid() && profile->getFormat() == format) {
+ return profile;
+ }
+ }
+ return nullptr;
+}
+
+FormatVector AudioProfileVector::getSupportedFormats() const
+{
+ FormatVector supportedFormats;
+ for (const auto &profile : *this) {
+ if (profile->hasValidFormat()) {
+ supportedFormats.push_back(profile->getFormat());
+ }
+ }
+ return supportedFormats;
+}
+
+bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
+{
+ for (const auto &profile : *this) {
+ if (profile->getFormat() == format && profile->isDynamicChannels()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioProfileVector::hasDynamicFormat() const
+{
+ for (const auto &profile : *this) {
+ if (profile->isDynamicFormat()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioProfileVector::hasDynamicProfile() const
+{
+ for (const auto &profile : *this) {
+ if (profile->isDynamic()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
+{
+ for (const auto &profile : *this) {
+ if (profile->getFormat() == format && profile->isDynamicRate()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void AudioProfileVector::dump(std::string *dst, int spaces) const
+{
+ dst->append(base::StringPrintf("%*s- Profiles:\n", spaces, ""));
+ for (size_t i = 0; i < size(); i++) {
+ dst->append(base::StringPrintf("%*sProfile %zu:", spaces + 4, "", i));
+ std::string profileStr;
+ at(i)->dump(&profileStr, spaces + 8);
+ dst->append(profileStr);
+ }
+}
+
+status_t AudioProfileVector::writeToParcel(Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = parcel->writeVectorSize(*this)) != NO_ERROR) return status;
+ for (const auto &audioProfile : *this) {
+ if ((status = parcel->writeParcelable(*audioProfile)) != NO_ERROR) {
+ break;
+ }
+ }
+ return status;
+}
+
+status_t AudioProfileVector::readFromParcel(const Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ this->clear();
+ if ((status = parcel->resizeOutVector(this)) != NO_ERROR) return status;
+ for (size_t i = 0; i < this->size(); ++i) {
+ this->at(i) = new AudioProfile(AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_NONE, 0 /*sampleRate*/);
+ if ((status = parcel->readParcelable(this->at(i).get())) != NO_ERROR) {
+ this->clear();
+ break;
+ }
+ }
+ return status;
+}
+
+bool AudioProfileVector::equals(const AudioProfileVector& other) const
+{
+ return std::equal(begin(), end(), other.begin(), other.end(),
+ [](const sp<AudioProfile>& left, const sp<AudioProfile>& right) {
+ return left->equals(right);
+ });
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
new file mode 100644
index 0000000..18fd184
--- /dev/null
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceDescriptorBase"
+//#define LOG_NDEBUG 0
+
+#include <android-base/stringprintf.h>
+#include <audio_utils/string.h>
+#include <media/DeviceDescriptorBase.h>
+#include <media/TypeConverter.h>
+
+namespace android {
+
+DeviceDescriptorBase::DeviceDescriptorBase(audio_devices_t type) :
+ AudioPort("", AUDIO_PORT_TYPE_DEVICE,
+ audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
+ AUDIO_PORT_ROLE_SOURCE),
+ mDeviceType(type)
+{
+ if (audio_is_remote_submix_device(type)) {
+ mAddress = "0";
+ }
+}
+
+void DeviceDescriptorBase::toAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig) const
+{
+ dstConfig->config_mask = AUDIO_PORT_CONFIG_GAIN;
+ if (mSamplingRate != 0) {
+ dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+ }
+ if (mChannelMask != AUDIO_CHANNEL_NONE) {
+ dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+ }
+ if (mFormat != AUDIO_FORMAT_INVALID) {
+ dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+ }
+
+ if (srcConfig != NULL) {
+ dstConfig->config_mask |= srcConfig->config_mask;
+ }
+
+ AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+ dstConfig->role = audio_is_output_device(mDeviceType) ?
+ AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+ dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
+ dstConfig->ext.device.type = mDeviceType;
+
+ (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.c_str());
+}
+
+void DeviceDescriptorBase::toAudioPort(struct audio_port *port) const
+{
+ ALOGV("DeviceDescriptorBase::toAudioPort() handle %d type %08x", mId, mDeviceType);
+ AudioPort::toAudioPort(port);
+ toAudioPortConfig(&port->active_config);
+ port->id = mId;
+ port->ext.device.type = mDeviceType;
+ (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.c_str());
+}
+
+void DeviceDescriptorBase::dump(std::string *dst, int spaces, int index,
+ const char* extraInfo, bool verbose) const
+{
+ dst->append(base::StringPrintf("%*sDevice %d:\n", spaces, "", index + 1));
+ if (mId != 0) {
+ dst->append(base::StringPrintf("%*s- id: %2d\n", spaces, "", mId));
+ }
+
+ if (extraInfo != nullptr) {
+ dst->append(extraInfo);
+ }
+
+ dst->append(base::StringPrintf("%*s- type: %-48s\n",
+ spaces, "", ::android::toString(mDeviceType).c_str()));
+
+ if (mAddress.size() != 0) {
+ dst->append(base::StringPrintf("%*s- address: %-32s\n", spaces, "", mAddress.c_str()));
+ }
+ AudioPort::dump(dst, spaces, verbose);
+}
+
+std::string DeviceDescriptorBase::toString() const
+{
+ std::stringstream sstream;
+ sstream << "type:0x" << std::hex << type() << ",@:" << mAddress;
+ return sstream.str();
+}
+
+void DeviceDescriptorBase::log() const
+{
+ ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType,
+ ::android::toString(mDeviceType).c_str(),
+ mAddress.c_str());
+
+ AudioPort::log(" ");
+}
+
+bool DeviceDescriptorBase::equals(const sp<DeviceDescriptorBase> &other) const
+{
+ return other != nullptr &&
+ static_cast<const AudioPort*>(this)->equals(other) &&
+ static_cast<const AudioPortConfig*>(this)->equals(other) &&
+ mAddress.compare(other->address()) == 0 &&
+ mDeviceType == other->type();
+}
+
+status_t DeviceDescriptorBase::writeToParcel(Parcel *parcel) const
+{
+ status_t status = NO_ERROR;
+ if ((status = AudioPort::writeToParcel(parcel)) != NO_ERROR) return status;
+ if ((status = AudioPortConfig::writeToParcel(parcel)) != NO_ERROR) return status;
+ if ((status = parcel->writeUtf8AsUtf16(mAddress)) != NO_ERROR) return status;
+ if ((status = parcel->writeUint32(mDeviceType)) != NO_ERROR) return status;
+ return status;
+}
+
+status_t DeviceDescriptorBase::readFromParcel(const Parcel *parcel)
+{
+ status_t status = NO_ERROR;
+ if ((status = AudioPort::readFromParcel(parcel)) != NO_ERROR) return status;
+ if ((status = AudioPortConfig::readFromParcel(parcel)) != NO_ERROR) return status;
+ if ((status = parcel->readUtf8FromUtf16(&mAddress)) != NO_ERROR) return status;
+ if ((status = parcel->readUint32(&mDeviceType)) != NO_ERROR) return status;
+ return status;
+}
+
+} // namespace android
diff --git a/media/libaudiofoundation/TEST_MAPPING b/media/libaudiofoundation/TEST_MAPPING
new file mode 100644
index 0000000..f6d249a
--- /dev/null
+++ b/media/libaudiofoundation/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "audiofoundation_parcelable_test"
+ }
+ ]
+}
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 3313224..05e68fa 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -16,19 +16,38 @@
#pragma once
+#include <algorithm>
+#include <functional>
+#include <iterator>
#include <set>
#include <vector>
+#include <media/TypeConverter.h>
#include <system/audio.h>
namespace android {
using ChannelMaskSet = std::set<audio_channel_mask_t>;
+using DeviceTypeSet = std::set<audio_devices_t>;
using FormatSet = std::set<audio_format_t>;
using SampleRateSet = std::set<uint32_t>;
using FormatVector = std::vector<audio_format_t>;
+const DeviceTypeSet& getAudioDeviceOutAllSet();
+const DeviceTypeSet& getAudioDeviceOutAllA2dpSet();
+const DeviceTypeSet& getAudioDeviceOutAllScoSet();
+const DeviceTypeSet& getAudioDeviceInAllSet();
+
+template<typename T>
+static std::vector<T> Intersection(const std::set<T>& a, const std::set<T>& b) {
+ std::vector<T> intersection;
+ std::set_intersection(a.begin(), a.end(),
+ b.begin(), b.end(),
+ std::back_inserter(intersection));
+ return intersection;
+}
+
static inline ChannelMaskSet asInMask(const ChannelMaskSet& channelMasks) {
ChannelMaskSet inMaskSet;
for (const auto &channel : channelMasks) {
@@ -49,4 +68,60 @@
return outMaskSet;
}
+static inline bool isSingleDeviceType(const DeviceTypeSet& deviceTypes,
+ audio_devices_t deviceType) {
+ return deviceTypes.size() == 1 && *(deviceTypes.begin()) == deviceType;
+}
+
+typedef bool (*DeviceTypeUnaryPredicate)(audio_devices_t);
+static inline bool isSingleDeviceType(const DeviceTypeSet& deviceTypes,
+ DeviceTypeUnaryPredicate p) {
+ return deviceTypes.size() == 1 && p(*(deviceTypes.begin()));
+}
+
+static inline bool areAllOfSameDeviceType(const DeviceTypeSet& deviceTypes,
+ std::function<bool(audio_devices_t)> p) {
+ return std::all_of(deviceTypes.begin(), deviceTypes.end(), p);
+}
+
+static inline void resetDeviceTypes(DeviceTypeSet& deviceTypes, audio_devices_t typeToAdd) {
+ deviceTypes.clear();
+ deviceTypes.insert(typeToAdd);
+}
+
+// FIXME: This is temporary helper function. Remove this when getting rid of all
+// bit mask usages of audio device types.
+static inline audio_devices_t deviceTypesToBitMask(const DeviceTypeSet& deviceTypes) {
+ audio_devices_t types = AUDIO_DEVICE_NONE;
+ for (auto deviceType : deviceTypes) {
+ types |= deviceType;
+ }
+ return types;
+}
+
+// FIXME: This is temporary helper function. Remove this when getting rid of all
+// bit mask usages of audio device types.
+static inline DeviceTypeSet deviceTypesFromBitMask(audio_devices_t types) {
+ DeviceTypeSet deviceTypes;
+ if ((types & AUDIO_DEVICE_BIT_IN) == 0) {
+ for (auto deviceType : AUDIO_DEVICE_OUT_ALL_ARRAY) {
+ if ((types & deviceType) == deviceType) {
+ deviceTypes.insert(deviceType);
+ }
+ }
+ } else {
+ for (auto deviceType : AUDIO_DEVICE_IN_ALL_ARRAY) {
+ if ((types & deviceType) == deviceType) {
+ deviceTypes.insert(deviceType);
+ }
+ }
+ }
+ return deviceTypes;
+}
+
+bool deviceTypesToString(const DeviceTypeSet& deviceTypes, std::string &str);
+
+std::string dumpDeviceTypes(const DeviceTypeSet& deviceTypes);
+
+
} // namespace android
\ No newline at end of file
diff --git a/media/libaudiofoundation/include/media/AudioGain.h b/media/libaudiofoundation/include/media/AudioGain.h
index 6a7fb55..859f1e7 100644
--- a/media/libaudiofoundation/include/media/AudioGain.h
+++ b/media/libaudiofoundation/include/media/AudioGain.h
@@ -67,6 +67,8 @@
const struct audio_gain &getGain() const { return mGain; }
+ bool equals(const sp<AudioGain>& other) const;
+
status_t writeToParcel(Parcel* parcel) const override;
status_t readFromParcel(const Parcel* parcel) override;
@@ -96,6 +98,8 @@
return 0;
}
+ bool equals(const AudioGains& other) const;
+
status_t writeToParcel(Parcel* parcel) const override;
status_t readFromParcel(const Parcel* parcel) override;
};
diff --git a/media/libaudiofoundation/include/media/AudioPort.h b/media/libaudiofoundation/include/media/AudioPort.h
new file mode 100644
index 0000000..3c013cb
--- /dev/null
+++ b/media/libaudiofoundation/include/media/AudioPort.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <media/AudioGain.h>
+#include <media/AudioProfile.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class AudioPort : public virtual RefBase, public virtual Parcelable
+{
+public:
+ AudioPort(const std::string& name, audio_port_type_t type, audio_port_role_t role) :
+ mName(name), mType(type), mRole(role) {}
+
+ virtual ~AudioPort() = default;
+
+ void setName(const std::string &name) { mName = name; }
+ const std::string &getName() const { return mName; }
+
+ audio_port_type_t getType() const { return mType; }
+ audio_port_role_t getRole() const { return mRole; }
+
+ void setGains(const AudioGains &gains) { mGains = gains; }
+ const AudioGains &getGains() const { return mGains; }
+
+ virtual void toAudioPort(struct audio_port *port) const;
+
+ virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+ mProfiles.add(profile);
+ }
+ virtual void clearAudioProfiles() {
+ mProfiles.clearProfiles();
+ }
+
+ bool hasValidAudioProfile() const { return mProfiles.hasValidProfile(); }
+
+ bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
+
+ void setAudioProfiles(const AudioProfileVector &profiles) { mProfiles = profiles; }
+ AudioProfileVector &getAudioProfiles() { return mProfiles; }
+
+ virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
+
+ status_t checkGain(const struct audio_gain_config *gainConfig, int index) const {
+ if (index < 0 || (size_t)index >= mGains.size()) {
+ return BAD_VALUE;
+ }
+ return mGains[index]->checkConfig(gainConfig);
+ }
+
+ bool useInputChannelMask() const
+ {
+ return ((mType == AUDIO_PORT_TYPE_DEVICE) && (mRole == AUDIO_PORT_ROLE_SOURCE)) ||
+ ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
+ }
+
+ void dump(std::string *dst, int spaces, bool verbose = true) const;
+
+ void log(const char* indent) const;
+
+ bool equals(const sp<AudioPort>& other) const;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+ AudioGains mGains; // gain controllers
+protected:
+ std::string mName;
+ audio_port_type_t mType;
+ audio_port_role_t mRole;
+ AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
+};
+
+
+class AudioPortConfig : public virtual RefBase, public virtual Parcelable
+{
+public:
+ virtual ~AudioPortConfig() = default;
+
+ virtual sp<AudioPort> getAudioPort() const = 0;
+
+ virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+ struct audio_port_config *backupConfig = NULL);
+
+ virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig = NULL) const;
+
+ unsigned int getSamplingRate() const { return mSamplingRate; }
+ audio_format_t getFormat() const { return mFormat; }
+ audio_channel_mask_t getChannelMask() const { return mChannelMask; }
+ audio_port_handle_t getId() const { return mId; }
+
+ bool hasGainController(bool canUseForVolume = false) const;
+
+ bool equals(const sp<AudioPortConfig>& other) const;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+protected:
+ unsigned int mSamplingRate = 0u;
+ audio_format_t mFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
+ audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
+ struct audio_gain_config mGain = { .index = -1 };
+};
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/AudioProfile.h b/media/libaudiofoundation/include/media/AudioProfile.h
new file mode 100644
index 0000000..730138a
--- /dev/null
+++ b/media/libaudiofoundation/include/media/AudioProfile.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <media/AudioContainers.h>
+#include <system/audio.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class AudioProfile final : public RefBase, public Parcelable
+{
+public:
+ static sp<AudioProfile> createFullDynamic(audio_format_t dynamicFormat = AUDIO_FORMAT_DEFAULT);
+
+ AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
+ AudioProfile(audio_format_t format,
+ const ChannelMaskSet &channelMasks,
+ const SampleRateSet &samplingRateCollection);
+
+ audio_format_t getFormat() const { return mFormat; }
+ const ChannelMaskSet &getChannels() const { return mChannelMasks; }
+ const SampleRateSet &getSampleRates() const { return mSamplingRates; }
+ void setChannels(const ChannelMaskSet &channelMasks);
+ void setSampleRates(const SampleRateSet &sampleRates);
+
+ void clear();
+ bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
+ bool supportsChannels(audio_channel_mask_t channels) const
+ {
+ return mChannelMasks.count(channels) != 0;
+ }
+ bool supportsRate(uint32_t rate) const { return mSamplingRates.count(rate) != 0; }
+
+ bool hasValidFormat() const { return mFormat != AUDIO_FORMAT_DEFAULT; }
+ bool hasValidRates() const { return !mSamplingRates.empty(); }
+ bool hasValidChannels() const { return !mChannelMasks.empty(); }
+
+ void setDynamicChannels(bool dynamic) { mIsDynamicChannels = dynamic; }
+ bool isDynamicChannels() const { return mIsDynamicChannels; }
+
+ void setDynamicRate(bool dynamic) { mIsDynamicRate = dynamic; }
+ bool isDynamicRate() const { return mIsDynamicRate; }
+
+ void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
+ bool isDynamicFormat() const { return mIsDynamicFormat; }
+
+ bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
+
+ void dump(std::string *dst, int spaces) const;
+
+ bool equals(const sp<AudioProfile>& other) const;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+private:
+ std::string mName;
+ audio_format_t mFormat; // The format for an audio profile should only be set when initialized.
+ ChannelMaskSet mChannelMasks;
+ SampleRateSet mSamplingRates;
+
+ bool mIsDynamicFormat = false;
+ bool mIsDynamicChannels = false;
+ bool mIsDynamicRate = false;
+};
+
+class AudioProfileVector : public std::vector<sp<AudioProfile>>, public Parcelable
+{
+public:
+ virtual ~AudioProfileVector() = default;
+
+ virtual ssize_t add(const sp<AudioProfile> &profile);
+
+ // If the profile is dynamic format and has valid format, it will be removed when doing
+ // clearProfiles(). Otherwise, AudioProfile::clear() will be called.
+ virtual void clearProfiles();
+
+ sp<AudioProfile> getFirstValidProfile() const;
+ sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
+ bool hasValidProfile() const { return getFirstValidProfile() != 0; }
+
+ FormatVector getSupportedFormats() const;
+ bool hasDynamicChannelsFor(audio_format_t format) const;
+ bool hasDynamicFormat() const;
+ bool hasDynamicProfile() const;
+ bool hasDynamicRateFor(audio_format_t format) const;
+
+ virtual void dump(std::string *dst, int spaces) const;
+
+ bool equals(const AudioProfileVector& other) const;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+};
+
+bool operator == (const AudioProfile &left, const AudioProfile &right);
+
+} // namespace android
diff --git a/media/libaudiofoundation/include/media/DeviceDescriptorBase.h b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
new file mode 100644
index 0000000..6a34b4d
--- /dev/null
+++ b/media/libaudiofoundation/include/media/DeviceDescriptorBase.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+#include <media/AudioPort.h>
+#include <utils/Errors.h>
+#include <cutils/config_utils.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+
+namespace android {
+
+class DeviceDescriptorBase : public AudioPort, public AudioPortConfig
+{
+public:
+ // Note that empty name refers by convention to a generic device.
+ explicit DeviceDescriptorBase(audio_devices_t type);
+
+ virtual ~DeviceDescriptorBase() {}
+
+ audio_devices_t type() const { return mDeviceType; }
+ std::string address() const { return mAddress; }
+ void setAddress(const std::string &address) { mAddress = address; }
+
+ // AudioPortConfig
+ virtual sp<AudioPort> getAudioPort() const {
+ return static_cast<AudioPort*>(const_cast<DeviceDescriptorBase*>(this));
+ }
+ virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig = NULL) const;
+
+ // AudioPort
+ virtual void toAudioPort(struct audio_port *port) const;
+
+ void dump(std::string *dst, int spaces, int index,
+ const char* extraInfo = nullptr, bool verbose = true) const;
+ void log() const;
+ std::string toString() const;
+
+ bool equals(const sp<DeviceDescriptorBase>& other) const;
+
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+protected:
+ std::string mAddress{""};
+ audio_devices_t mDeviceType;
+};
+
+} // namespace android
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
new file mode 100644
index 0000000..f258b14
--- /dev/null
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -0,0 +1,25 @@
+cc_test {
+ name: "audiofoundation_parcelable_test",
+
+ shared_libs: [
+ "libaudiofoundation",
+ "libbinder",
+ "liblog",
+ "libutils",
+ ],
+
+ header_libs: [
+ "libaudio_system_headers",
+ ],
+
+ srcs: [
+ "audiofoundation_parcelable_test.cpp",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ test_suites: ["device-tests"],
+}
diff --git a/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
new file mode 100644
index 0000000..5baa072
--- /dev/null
+++ b/media/libaudiofoundation/tests/audiofoundation_parcelable_test.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audiofoundation_parcelable_test"
+
+#include <gtest/gtest.h>
+
+#include <binder/IServiceManager.h>
+#include <binder/Parcelable.h>
+#include <binder/ProcessState.h>
+#include <media/AudioGain.h>
+#include <media/AudioPort.h>
+#include <media/AudioProfile.h>
+#include <media/DeviceDescriptorBase.h>
+#include <utils/Log.h>
+#include <utils/String16.h>
+
+namespace android {
+
+static const audio_port_config TEST_AUDIO_PORT_CONFIG = {
+ .id = 0,
+ .role = AUDIO_PORT_ROLE_SINK,
+ .type = AUDIO_PORT_TYPE_DEVICE,
+ .config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE | AUDIO_PORT_CONFIG_CHANNEL_MASK |
+ AUDIO_PORT_CONFIG_FORMAT | AUDIO_PORT_CONFIG_GAIN,
+ .sample_rate = 48000,
+ .channel_mask = AUDIO_CHANNEL_OUT_STEREO,
+ .format = AUDIO_FORMAT_PCM_16_BIT,
+ .gain = {
+ .index = 0,
+ .mode = AUDIO_GAIN_MODE_JOINT,
+ .channel_mask = AUDIO_CHANNEL_OUT_STEREO,
+ }
+};
+
+class AudioPortConfigTestStub : public AudioPortConfig {
+public:
+ sp<AudioPort> getAudioPort() const override { return nullptr; }
+};
+
+AudioGains getAudioGainsForTest() {
+ AudioGains audioGains;
+ sp<AudioGain> audioGain = new AudioGain(0 /*index*/, false /*useInChannelMask*/);
+ audioGain->setMode(AUDIO_GAIN_MODE_JOINT);
+ audioGain->setChannelMask(AUDIO_CHANNEL_OUT_STEREO);
+ audioGain->setMinValueInMb(-3200);
+ audioGain->setMaxValueInMb(600);
+ audioGain->setDefaultValueInMb(0);
+ audioGain->setStepValueInMb(100);
+ audioGain->setMinRampInMs(100);
+ audioGain->setMaxRampInMs(500);
+ audioGains.push_back(audioGain);
+ return audioGains;
+}
+
+AudioProfileVector getAudioProfileVectorForTest() {
+ AudioProfileVector audioProfiles;
+ sp<AudioProfile> audioProfile = AudioProfile::createFullDynamic();
+ audioProfile->setChannels({AUDIO_CHANNEL_OUT_MONO, AUDIO_CHANNEL_OUT_STEREO});
+ audioProfile->setSampleRates({48000});
+ audioProfiles.add(audioProfile);
+ return audioProfiles;
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioGain) {
+ Parcel data;
+ AudioGains audioGains = getAudioGainsForTest();
+
+ ASSERT_EQ(data.writeParcelable(audioGains), NO_ERROR);
+ data.setDataPosition(0);
+ AudioGains audioGainsFromParcel;
+ ASSERT_EQ(data.readParcelable(&audioGainsFromParcel), NO_ERROR);
+ ASSERT_TRUE(audioGainsFromParcel.equals(audioGains));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioProfileVector) {
+ Parcel data;
+ AudioProfileVector audioProfiles = getAudioProfileVectorForTest();
+
+ ASSERT_EQ(data.writeParcelable(audioProfiles), NO_ERROR);
+ data.setDataPosition(0);
+ AudioProfileVector audioProfilesFromParcel;
+ ASSERT_EQ(data.readParcelable(&audioProfilesFromParcel), NO_ERROR);
+ ASSERT_TRUE(audioProfilesFromParcel.equals(audioProfiles));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioPort) {
+ Parcel data;
+ sp<AudioPort> audioPort = new AudioPort(
+ "AudioPortName", AUDIO_PORT_TYPE_DEVICE, AUDIO_PORT_ROLE_SINK);
+ audioPort->setGains(getAudioGainsForTest());
+ audioPort->setAudioProfiles(getAudioProfileVectorForTest());
+
+ ASSERT_EQ(data.writeParcelable(*audioPort), NO_ERROR);
+ data.setDataPosition(0);
+ sp<AudioPort> audioPortFromParcel = new AudioPort(
+ "", AUDIO_PORT_TYPE_NONE, AUDIO_PORT_ROLE_NONE);
+ ASSERT_EQ(data.readParcelable(audioPortFromParcel.get()), NO_ERROR);
+ ASSERT_TRUE(audioPortFromParcel->equals(audioPort));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingAudioPortConfig) {
+ Parcel data;
+ sp<AudioPortConfig> audioPortConfig = new AudioPortConfigTestStub();
+ audioPortConfig->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
+
+ ASSERT_EQ(data.writeParcelable(*audioPortConfig), NO_ERROR);
+ data.setDataPosition(0);
+ sp<AudioPortConfig> audioPortConfigFromParcel = new AudioPortConfigTestStub();
+ ASSERT_EQ(data.readParcelable(audioPortConfigFromParcel.get()), NO_ERROR);
+ ASSERT_TRUE(audioPortConfigFromParcel->equals(audioPortConfig));
+}
+
+TEST(AudioFoundationParcelableTest, ParcelingDeviceDescriptorBase) {
+ Parcel data;
+ sp<DeviceDescriptorBase> desc = new DeviceDescriptorBase(AUDIO_DEVICE_OUT_SPEAKER);
+ desc->setGains(getAudioGainsForTest());
+ desc->setAudioProfiles(getAudioProfileVectorForTest());
+ desc->applyAudioPortConfig(&TEST_AUDIO_PORT_CONFIG);
+ desc->setAddress("DeviceDescriptorBaseTestAddress");
+
+ ASSERT_EQ(data.writeParcelable(*desc), NO_ERROR);
+ data.setDataPosition(0);
+ sp<DeviceDescriptorBase> descFromParcel = new DeviceDescriptorBase(AUDIO_DEVICE_NONE);
+ ASSERT_EQ(data.readParcelable(descFromParcel.get()), NO_ERROR);
+ ASSERT_TRUE(descFromParcel->equals(desc));
+}
+
+} // namespace android
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 5837fcf..74b48f3 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -16,6 +16,7 @@
"libaudiohal@2.0",
"libaudiohal@4.0",
"libaudiohal@5.0",
+ "libaudiohal@6.0",
"libutils",
],
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 88533da..8669e2a 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -36,8 +36,6 @@
"libhardware",
"libhidlbase",
"libhidlmemory",
- "libhidltransport",
- "libhwbinder",
"liblog",
"libmedia_helper",
"libmediautils",
@@ -45,6 +43,7 @@
],
header_libs: [
"android.hardware.audio.common.util@all-versions",
+ "libaudioclient_headers",
"libaudiohal_headers"
],
@@ -100,3 +99,20 @@
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "libaudiohal@6.0",
+ defaults: ["libaudiohal_default"],
+ shared_libs: [
+ "android.hardware.audio.common@6.0",
+ "android.hardware.audio.common@6.0-util",
+ "android.hardware.audio.effect@6.0",
+ "android.hardware.audio@6.0",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=6",
+ "-DMINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
+
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index b07f21d..3d3a5eb 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -229,14 +229,14 @@
status_t DeviceHalHidl::openOutputStream(
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
sp<StreamOutHalInterface> *outStream) {
if (mDevice == 0) return NO_INIT;
DeviceAddress hidlDevice;
- status_t status = deviceAddressFromHal(devices, address, &hidlDevice);
+ status_t status = deviceAddressFromHal(deviceType, address, &hidlDevice);
if (status != OK) return status;
AudioConfig hidlConfig;
HidlUtils::audioConfigFromHal(*config, &hidlConfig);
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index ee68252..dfbb6b2 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -104,7 +104,7 @@
status_t DeviceHalLocal::openOutputStream(
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
@@ -112,11 +112,11 @@
audio_stream_out_t *halStream;
ALOGV("open_output_stream handle: %d devices: %x flags: %#x"
"srate: %d format %#x channels %x address %s",
- handle, devices, flags,
+ handle, deviceType, flags,
config->sample_rate, config->format, config->channel_mask,
address);
int openResut = mDev->open_output_stream(
- mDev, handle, devices, flags, config, &halStream, address);
+ mDev, handle, deviceType, flags, config, &halStream, address);
if (openResut == OK) {
*outStream = new StreamOutHalLocal(halStream, this);
}
diff --git a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
index 829f99c..271bafc 100644
--- a/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/impl/include/libaudiohal/FactoryHalHidl.h
@@ -34,6 +34,7 @@
* the preferred available impl.
*/
enum class AudioHALVersion {
+ V6_0,
V5_0,
V4_0,
V2_0,
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index e565237..2200a7f 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -69,7 +69,7 @@
// by releasing all references to the returned object.
virtual status_t openOutputStream(
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
diff --git a/media/libaudioprocessing/Android.bp b/media/libaudioprocessing/Android.bp
index e8aa700..9b5d58c 100644
--- a/media/libaudioprocessing/Android.bp
+++ b/media/libaudioprocessing/Android.bp
@@ -33,6 +33,7 @@
header_libs: [
"libbase_headers",
+ "libmedia_headers"
],
shared_libs: [
diff --git a/media/libmedia/include/media/RecordBufferConverter.h b/media/libaudioprocessing/include/media/RecordBufferConverter.h
similarity index 100%
rename from media/libmedia/include/media/RecordBufferConverter.h
rename to media/libaudioprocessing/include/media/RecordBufferConverter.h
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index d990111..20c2c2c 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -3,8 +3,13 @@
cc_defaults {
name: "libaudioprocessing_test_defaults",
- header_libs: ["libbase_headers"],
+ header_libs: [
+ "libbase_headers",
+ "libmedia_headers",
+ ],
+
shared_libs: [
+ "libaudioclient",
"libaudioprocessing",
"libaudioutils",
"libcutils",
diff --git a/media/libaudioprocessing/tests/fuzzer/Android.bp b/media/libaudioprocessing/tests/fuzzer/Android.bp
new file mode 100644
index 0000000..1df47b7
--- /dev/null
+++ b/media/libaudioprocessing/tests/fuzzer/Android.bp
@@ -0,0 +1,10 @@
+cc_fuzz {
+ name: "libaudioprocessing_resampler_fuzzer",
+ srcs: [
+ "libaudioprocessing_resampler_fuzzer.cpp",
+ ],
+ defaults: ["libaudioprocessing_test_defaults"],
+ static_libs: [
+ "libsndfile",
+ ],
+}
diff --git a/media/libaudioprocessing/tests/fuzzer/libaudioprocessing_resampler_fuzzer.cpp b/media/libaudioprocessing/tests/fuzzer/libaudioprocessing_resampler_fuzzer.cpp
new file mode 100644
index 0000000..938c610
--- /dev/null
+++ b/media/libaudioprocessing/tests/fuzzer/libaudioprocessing_resampler_fuzzer.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/macros.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <math.h>
+#include <media/AudioBufferProvider.h>
+#include <media/AudioResampler.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <unistd.h>
+#include <utils/Vector.h>
+
+#include <memory>
+
+using namespace android;
+
+const int MAX_FRAMES = 10;
+const int MIN_FREQ = 1e3;
+const int MAX_FREQ = 100e3;
+
+const AudioResampler::src_quality qualities[] = {
+ AudioResampler::DEFAULT_QUALITY,
+ AudioResampler::LOW_QUALITY,
+ AudioResampler::MED_QUALITY,
+ AudioResampler::HIGH_QUALITY,
+ AudioResampler::VERY_HIGH_QUALITY,
+ AudioResampler::DYN_LOW_QUALITY,
+ AudioResampler::DYN_MED_QUALITY,
+ AudioResampler::DYN_HIGH_QUALITY,
+};
+
+class Provider : public AudioBufferProvider {
+ const void* mAddr; // base address
+ const size_t mNumFrames; // total frames
+ const size_t mFrameSize; // size of each frame in bytes
+ size_t mNextFrame; // index of next frame to provide
+ size_t mUnrel; // number of frames not yet released
+ public:
+ Provider(const void* addr, size_t frames, size_t frameSize)
+ : mAddr(addr),
+ mNumFrames(frames),
+ mFrameSize(frameSize),
+ mNextFrame(0),
+ mUnrel(0) {}
+ status_t getNextBuffer(Buffer* buffer) override {
+ if (buffer->frameCount > mNumFrames - mNextFrame) {
+ buffer->frameCount = mNumFrames - mNextFrame;
+ }
+ mUnrel = buffer->frameCount;
+ if (buffer->frameCount > 0) {
+ buffer->raw = (char*)mAddr + mFrameSize * mNextFrame;
+ return NO_ERROR;
+ } else {
+ buffer->raw = nullptr;
+ return NOT_ENOUGH_DATA;
+ }
+ }
+ virtual void releaseBuffer(Buffer* buffer) {
+ if (buffer->frameCount > mUnrel) {
+ mNextFrame += mUnrel;
+ mUnrel = 0;
+ } else {
+ mNextFrame += buffer->frameCount;
+ mUnrel -= buffer->frameCount;
+ }
+ buffer->frameCount = 0;
+ buffer->raw = nullptr;
+ }
+ void reset() { mNextFrame = 0; }
+};
+
+audio_format_t chooseFormat(AudioResampler::src_quality quality,
+ uint8_t input_byte) {
+ switch (quality) {
+ case AudioResampler::DYN_LOW_QUALITY:
+ case AudioResampler::DYN_MED_QUALITY:
+ case AudioResampler::DYN_HIGH_QUALITY:
+ if (input_byte % 2) {
+ return AUDIO_FORMAT_PCM_FLOAT;
+ }
+ FALLTHROUGH_INTENDED;
+ default:
+ return AUDIO_FORMAT_PCM_16_BIT;
+ }
+}
+
+int parseValue(const uint8_t* src, int index, void* dst, size_t size) {
+ memcpy(dst, &src[index], size);
+ return size;
+}
+
+bool validFreq(int freq) { return freq > MIN_FREQ && freq < MAX_FREQ; }
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ int input_freq = 0;
+ int output_freq = 0;
+ int input_channels = 0;
+
+ float left_volume = 0;
+ float right_volume = 0;
+
+ size_t metadata_size = 2 + 3 * sizeof(int) + 2 * sizeof(float);
+ if (size < metadata_size) {
+ // not enough data to set options
+ return 0;
+ }
+
+ AudioResampler::src_quality quality = qualities[data[0] % 8];
+ audio_format_t format = chooseFormat(quality, data[1]);
+
+ int index = 2;
+
+ index += parseValue(data, index, &input_freq, sizeof(int));
+ index += parseValue(data, index, &output_freq, sizeof(int));
+ index += parseValue(data, index, &input_channels, sizeof(int));
+
+ index += parseValue(data, index, &left_volume, sizeof(float));
+ index += parseValue(data, index, &right_volume, sizeof(float));
+
+ if (!validFreq(input_freq) || !validFreq(output_freq)) {
+ // sampling frequencies must be reasonable
+ return 0;
+ }
+
+ if (input_channels < 1 ||
+ input_channels > (quality < AudioResampler::DYN_LOW_QUALITY ? 2 : 8)) {
+ // invalid number of input channels
+ return 0;
+ }
+
+ size_t single_channel_size =
+ format == AUDIO_FORMAT_PCM_FLOAT ? sizeof(float) : sizeof(int16_t);
+ size_t input_frame_size = single_channel_size * input_channels;
+ size_t input_size = size - metadata_size;
+ uint8_t input_data[input_size];
+ memcpy(input_data, &data[metadata_size], input_size);
+
+ size_t input_frames = input_size / input_frame_size;
+ if (input_frames > MAX_FRAMES) {
+ return 0;
+ }
+
+ Provider provider(input_data, input_frames, input_frame_size);
+
+ std::unique_ptr<AudioResampler> resampler(
+ AudioResampler::create(format, input_channels, output_freq, quality));
+
+ resampler->setSampleRate(input_freq);
+ resampler->setVolume(left_volume, right_volume);
+
+ // output is at least stereo samples
+ int output_channels = input_channels > 2 ? input_channels : 2;
+ size_t output_frame_size = output_channels * sizeof(int32_t);
+ size_t output_frames = (input_frames * output_freq) / input_freq;
+ size_t output_size = output_frames * output_frame_size;
+
+ uint8_t output_data[output_size];
+ for (size_t i = 0; i < output_frames; i++) {
+ memset(output_data, 0, output_size);
+ resampler->resample((int*)output_data, i, &provider);
+ }
+
+ return 0;
+}
diff --git a/media/libcpustats/Android.bp b/media/libcpustats/Android.bp
index 8fcd8a4..6e8ca1d 100644
--- a/media/libcpustats/Android.bp
+++ b/media/libcpustats/Android.bp
@@ -6,6 +6,14 @@
"ThreadCpuUsage.cpp",
],
+ local_include_dirs: [
+ "include",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libdatasource/Android.bp b/media/libdatasource/Android.bp
new file mode 100644
index 0000000..f191c21
--- /dev/null
+++ b/media/libdatasource/Android.bp
@@ -0,0 +1,63 @@
+cc_library {
+ name: "libdatasource",
+
+ srcs: [
+ "DataSourceFactory.cpp",
+ "DataURISource.cpp",
+ "FileSource.cpp",
+ "HTTPBase.cpp",
+ "MediaHTTP.cpp",
+ "NuCachedSource2.cpp",
+ ],
+
+ aidl: {
+ local_include_dirs: ["aidl"],
+ export_aidl_headers: true,
+ },
+
+ header_libs: [
+ "libstagefright_headers",
+ "media_ndk_headers",
+ "libmedia_headers",
+ ],
+
+ export_header_lib_headers: [
+ "libstagefright_headers",
+ "media_ndk_headers",
+ ],
+
+ shared_libs: [
+ "liblog",
+ "libcutils",
+ "libutils",
+ "libstagefright_foundation",
+ "libdl",
+ ],
+
+ static_libs: [
+ "libc_malloc_debug_backtrace", // for memory heap analysis
+ "libmedia_midiiowrapper",
+ ],
+
+ local_include_dirs: [
+ "include",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ },
+}
diff --git a/media/libstagefright/DataSourceFactory.cpp b/media/libdatasource/DataSourceFactory.cpp
similarity index 72%
rename from media/libstagefright/DataSourceFactory.cpp
rename to media/libdatasource/DataSourceFactory.cpp
index 54bf0cc..bb6a08c 100644
--- a/media/libstagefright/DataSourceFactory.cpp
+++ b/media/libdatasource/DataSourceFactory.cpp
@@ -16,20 +16,33 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "DataSource"
-#include "include/HTTPBase.h"
-#include "include/NuCachedSource2.h"
+#include <datasource/DataSourceFactory.h>
+#include <datasource/DataURISource.h>
+#include <datasource/HTTPBase.h>
+#include <datasource/FileSource.h>
+#include <datasource/MediaHTTP.h>
+#include <datasource/NuCachedSource2.h>
#include <media/MediaHTTPConnection.h>
#include <media/MediaHTTPService.h>
-#include <media/stagefright/DataSourceFactory.h>
-#include <media/stagefright/DataURISource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaHTTP.h>
#include <utils/String8.h>
namespace android {
// static
+sp<DataSourceFactory> DataSourceFactory::sInstance;
+// static
+Mutex DataSourceFactory::sInstanceLock;
+
+// static
+sp<DataSourceFactory> DataSourceFactory::getInstance() {
+ Mutex::Autolock l(sInstanceLock);
+ if (!sInstance) {
+ sInstance = new DataSourceFactory();
+ }
+ return sInstance;
+}
+
sp<DataSource> DataSourceFactory::CreateFromURI(
const sp<MediaHTTPService> &httpService,
const char *uri,
@@ -42,20 +55,16 @@
sp<DataSource> source;
if (!strncasecmp("file://", uri, 7)) {
- source = new FileSource(uri + 7);
+ source = CreateFileSource(uri + 7);
} else if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
if (httpService == NULL) {
ALOGE("Invalid http service!");
return NULL;
}
- if (httpSource == NULL) {
- sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
- if (conn == NULL) {
- ALOGE("Failed to make http connection from http service!");
- return NULL;
- }
- httpSource = new MediaHTTP(conn);
+ sp<HTTPBase> mediaHTTP = httpSource;
+ if (mediaHTTP == NULL) {
+ mediaHTTP = static_cast<HTTPBase *>(CreateMediaHTTP(httpService).get());
}
String8 cacheConfig;
@@ -69,24 +78,24 @@
&disconnectAtHighwatermark);
}
- if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
+ if (mediaHTTP->connect(uri, &nonCacheSpecificHeaders) != OK) {
ALOGE("Failed to connect http source!");
return NULL;
}
if (contentType != NULL) {
- *contentType = httpSource->getMIMEType();
+ *contentType = mediaHTTP->getMIMEType();
}
source = NuCachedSource2::Create(
- httpSource,
+ mediaHTTP,
cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
disconnectAtHighwatermark);
} else if (!strncasecmp("data:", uri, 5)) {
source = DataURISource::Create(uri);
} else {
// Assume it's a filename.
- source = new FileSource(uri);
+ source = CreateFileSource(uri);
}
if (source == NULL || source->initCheck() != OK) {
@@ -108,10 +117,15 @@
sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
return NULL;
} else {
return new MediaHTTP(conn);
}
}
+sp<DataSource> DataSourceFactory::CreateFileSource(const char *uri) {
+ return new FileSource(uri);
+}
+
} // namespace android
diff --git a/media/libstagefright/DataURISource.cpp b/media/libdatasource/DataURISource.cpp
similarity index 98%
rename from media/libstagefright/DataURISource.cpp
rename to media/libdatasource/DataURISource.cpp
index b975b38..216f3d0 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libdatasource/DataURISource.cpp
@@ -13,7 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#include <media/stagefright/DataURISource.h>
+#include <datasource/DataURISource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AString.h>
diff --git a/media/libstagefright/ClearFileSource.cpp b/media/libdatasource/FileSource.cpp
similarity index 85%
rename from media/libstagefright/ClearFileSource.cpp
rename to media/libdatasource/FileSource.cpp
index e3a2cb7..bbf7dda 100644
--- a/media/libstagefright/ClearFileSource.cpp
+++ b/media/libdatasource/FileSource.cpp
@@ -15,12 +15,12 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearFileSource"
+#define LOG_TAG "FileSource"
#include <utils/Log.h>
+#include <datasource/FileSource.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/ClearFileSource.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <sys/types.h>
#include <unistd.h>
#include <sys/types.h>
@@ -29,7 +29,7 @@
namespace android {
-ClearFileSource::ClearFileSource(const char *filename)
+FileSource::FileSource(const char *filename)
: mFd(-1),
mOffset(0),
mLength(-1),
@@ -48,7 +48,7 @@
}
}
-ClearFileSource::ClearFileSource(int fd, int64_t offset, int64_t length)
+FileSource::FileSource(int fd, int64_t offset, int64_t length)
: mFd(fd),
mOffset(offset),
mLength(length),
@@ -89,18 +89,18 @@
}
-ClearFileSource::~ClearFileSource() {
+FileSource::~FileSource() {
if (mFd >= 0) {
::close(mFd);
mFd = -1;
}
}
-status_t ClearFileSource::initCheck() const {
+status_t FileSource::initCheck() const {
return mFd >= 0 ? OK : NO_INIT;
}
-ssize_t ClearFileSource::readAt(off64_t offset, void *data, size_t size) {
+ssize_t FileSource::readAt(off64_t offset, void *data, size_t size) {
if (mFd < 0) {
return NO_INIT;
}
@@ -118,7 +118,7 @@
return readAt_l(offset, data, size);
}
-ssize_t ClearFileSource::readAt_l(off64_t offset, void *data, size_t size) {
+ssize_t FileSource::readAt_l(off64_t offset, void *data, size_t size) {
off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
if (result == -1) {
ALOGE("seek to %lld failed", (long long)(offset + mOffset));
@@ -128,7 +128,7 @@
return ::read(mFd, data, size);
}
-status_t ClearFileSource::getSize(off64_t *size) {
+status_t FileSource::getSize(off64_t *size) {
Mutex::Autolock autoLock(mLock);
if (mFd < 0) {
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libdatasource/HTTPBase.cpp
similarity index 98%
rename from media/libstagefright/HTTPBase.cpp
rename to media/libdatasource/HTTPBase.cpp
index d118e8c..ef29c48 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libdatasource/HTTPBase.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "HTTPBase"
#include <utils/Log.h>
-#include "include/HTTPBase.h"
+#include <datasource/HTTPBase.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
diff --git a/media/libstagefright/http/ClearMediaHTTP.cpp b/media/libdatasource/MediaHTTP.cpp
similarity index 82%
rename from media/libstagefright/http/ClearMediaHTTP.cpp
rename to media/libdatasource/MediaHTTP.cpp
index 9557c8a..58c1ce8 100644
--- a/media/libstagefright/http/ClearMediaHTTP.cpp
+++ b/media/libdatasource/MediaHTTP.cpp
@@ -15,30 +15,30 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "ClearMediaHTTP"
+#define LOG_TAG "MediaHTTP"
#include <utils/Log.h>
-#include <media/stagefright/ClearMediaHTTP.h>
+#include <datasource/MediaHTTP.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <media/MediaHTTPConnection.h>
namespace android {
-ClearMediaHTTP::ClearMediaHTTP(const sp<MediaHTTPConnection> &conn)
+MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
: mInitCheck((conn != NULL) ? OK : NO_INIT),
mHTTPConnection(conn),
mCachedSizeValid(false),
mCachedSize(0ll) {
}
-ClearMediaHTTP::~ClearMediaHTTP() {
+MediaHTTP::~MediaHTTP() {
}
-status_t ClearMediaHTTP::connect(
+status_t MediaHTTP::connect(
const char *uri,
const KeyedVector<String8, String8> *headers,
off64_t /* offset */) {
@@ -68,18 +68,18 @@
if (success) {
AString sanitized = uriDebugString(mLastURI);
- mName = String8::format("ClearMediaHTTP(%s)", sanitized.c_str());
+ mName = String8::format("MediaHTTP(%s)", sanitized.c_str());
}
return success ? OK : UNKNOWN_ERROR;
}
-void ClearMediaHTTP::close() {
+void MediaHTTP::close() {
disconnect();
}
-void ClearMediaHTTP::disconnect() {
- mName = String8("ClearMediaHTTP(<disconnected>)");
+void MediaHTTP::disconnect() {
+ mName = String8("MediaHTTP(<disconnected>)");
if (mInitCheck != OK) {
return;
}
@@ -87,11 +87,11 @@
mHTTPConnection->disconnect();
}
-status_t ClearMediaHTTP::initCheck() const {
+status_t MediaHTTP::initCheck() const {
return mInitCheck;
}
-ssize_t ClearMediaHTTP::readAt(off64_t offset, void *data, size_t size) {
+ssize_t MediaHTTP::readAt(off64_t offset, void *data, size_t size) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -127,7 +127,7 @@
return numBytesRead;
}
-status_t ClearMediaHTTP::getSize(off64_t *size) {
+status_t MediaHTTP::getSize(off64_t *size) {
if (mInitCheck != OK) {
return mInitCheck;
}
@@ -145,16 +145,16 @@
return *size < 0 ? *size : static_cast<status_t>(OK);
}
-uint32_t ClearMediaHTTP::flags() {
+uint32_t MediaHTTP::flags() {
return kWantsPrefetching | kIsHTTPBasedSource;
}
-status_t ClearMediaHTTP::reconnectAtOffset(off64_t offset) {
+status_t MediaHTTP::reconnectAtOffset(off64_t offset) {
return connect(mLastURI.c_str(), &mLastHeaders, offset);
}
-String8 ClearMediaHTTP::getUri() {
+String8 MediaHTTP::getUri() {
if (mInitCheck != OK) {
return String8::empty();
}
@@ -166,7 +166,7 @@
return String8(mLastURI.c_str());
}
-String8 ClearMediaHTTP::getMIMEType() const {
+String8 MediaHTTP::getMIMEType() const {
if (mInitCheck != OK) {
return String8("application/octet-stream");
}
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libdatasource/NuCachedSource2.cpp
similarity index 99%
rename from media/libstagefright/NuCachedSource2.cpp
rename to media/libdatasource/NuCachedSource2.cpp
index 522c81d..7f5ae61 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libdatasource/NuCachedSource2.cpp
@@ -20,8 +20,8 @@
#define LOG_TAG "NuCachedSource2"
#include <utils/Log.h>
-#include "include/NuCachedSource2.h"
-#include "include/HTTPBase.h"
+#include <datasource/NuCachedSource2.h>
+#include <datasource/HTTPBase.h>
#include <cutils/properties.h>
#include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/include/media/stagefright/DataSourceFactory.h b/media/libdatasource/include/datasource/DataSourceFactory.h
similarity index 66%
rename from media/libstagefright/include/media/stagefright/DataSourceFactory.h
rename to media/libdatasource/include/datasource/DataSourceFactory.h
index 2a1d491..194abe2 100644
--- a/media/libstagefright/include/media/stagefright/DataSourceFactory.h
+++ b/media/libdatasource/include/datasource/DataSourceFactory.h
@@ -18,7 +18,9 @@
#define DATA_SOURCE_FACTORY_H_
+#include <media/DataSource.h>
#include <sys/types.h>
+#include <utils/KeyedVector.h>
#include <utils/RefBase.h>
namespace android {
@@ -27,17 +29,27 @@
class String8;
struct HTTPBase;
-class DataSourceFactory {
+class DataSourceFactory : public RefBase {
public:
- static sp<DataSource> CreateFromURI(
+ static sp<DataSourceFactory> getInstance();
+ sp<DataSource> CreateFromURI(
const sp<MediaHTTPService> &httpService,
const char *uri,
const KeyedVector<String8, String8> *headers = NULL,
String8 *contentType = NULL,
HTTPBase *httpSource = NULL);
- static sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
- static sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
+ virtual sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
+ sp<DataSource> CreateFromFd(int fd, int64_t offset, int64_t length);
+
+protected:
+ virtual sp<DataSource> CreateFileSource(const char *uri);
+ DataSourceFactory() {};
+ virtual ~DataSourceFactory() {};
+
+private:
+ static sp<DataSourceFactory> sInstance;
+ static Mutex sInstanceLock;
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/DataURISource.h b/media/libdatasource/include/datasource/DataURISource.h
similarity index 100%
rename from media/libstagefright/include/media/stagefright/DataURISource.h
rename to media/libdatasource/include/datasource/DataURISource.h
diff --git a/media/libstagefright/include/media/stagefright/ClearFileSource.h b/media/libdatasource/include/datasource/FileSource.h
similarity index 74%
rename from media/libstagefright/include/media/stagefright/ClearFileSource.h
rename to media/libdatasource/include/datasource/FileSource.h
index be83748..dee0c33 100644
--- a/media/libstagefright/include/media/stagefright/ClearFileSource.h
+++ b/media/libdatasource/include/datasource/FileSource.h
@@ -14,9 +14,9 @@
* limitations under the License.
*/
-#ifndef CLEAR_FILE_SOURCE_H_
+#ifndef FILE_SOURCE_H_
-#define CLEAR_FILE_SOURCE_H_
+#define FILE_SOURCE_H_
#include <stdio.h>
@@ -26,11 +26,11 @@
namespace android {
-class ClearFileSource : public DataSource {
+class FileSource : public DataSource {
public:
- ClearFileSource(const char *filename);
- // ClearFileSource takes ownership and will close the fd
- ClearFileSource(int fd, int64_t offset, int64_t length);
+ FileSource(const char *filename);
+ // FileSource takes ownership and will close the fd
+ FileSource(int fd, int64_t offset, int64_t length);
virtual status_t initCheck() const;
@@ -47,7 +47,7 @@
}
protected:
- virtual ~ClearFileSource();
+ virtual ~FileSource();
virtual ssize_t readAt_l(off64_t offset, void *data, size_t size);
int mFd;
@@ -58,11 +58,11 @@
private:
String8 mName;
- ClearFileSource(const ClearFileSource &);
- ClearFileSource &operator=(const ClearFileSource &);
+ FileSource(const FileSource &);
+ FileSource &operator=(const FileSource &);
};
} // namespace android
-#endif // CLEAR_FILE_SOURCE_H_
+#endif // FILE_SOURCE_H_
diff --git a/media/libstagefright/include/HTTPBase.h b/media/libdatasource/include/datasource/HTTPBase.h
similarity index 100%
rename from media/libstagefright/include/HTTPBase.h
rename to media/libdatasource/include/datasource/HTTPBase.h
diff --git a/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h b/media/libdatasource/include/datasource/MediaHTTP.h
similarity index 83%
rename from media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
rename to media/libdatasource/include/datasource/MediaHTTP.h
index 72907a9..a8d203b 100644
--- a/media/libstagefright/include/media/stagefright/ClearMediaHTTP.h
+++ b/media/libdatasource/include/datasource/MediaHTTP.h
@@ -14,20 +14,20 @@
* limitations under the License.
*/
-#ifndef CLEAR_MEDIA_HTTP_H_
+#ifndef MEDIA_HTTP_H_
-#define CLEAR_MEDIA_HTTP_H_
+#define MEDIA_HTTP_H_
#include <media/stagefright/foundation/AString.h>
-#include "include/HTTPBase.h"
+#include "HTTPBase.h"
namespace android {
struct MediaHTTPConnection;
-struct ClearMediaHTTP : public HTTPBase {
- ClearMediaHTTP(const sp<MediaHTTPConnection> &conn);
+struct MediaHTTP : public HTTPBase {
+ MediaHTTP(const sp<MediaHTTPConnection> &conn);
virtual status_t connect(
const char *uri,
@@ -49,7 +49,7 @@
virtual status_t reconnectAtOffset(off64_t offset);
protected:
- virtual ~ClearMediaHTTP();
+ virtual ~MediaHTTP();
virtual String8 getUri();
virtual String8 getMIMEType() const;
@@ -65,9 +65,9 @@
bool mCachedSizeValid;
off64_t mCachedSize;
- DISALLOW_EVIL_CONSTRUCTORS(ClearMediaHTTP);
+ DISALLOW_EVIL_CONSTRUCTORS(MediaHTTP);
};
} // namespace android
-#endif // CLEAR_MEDIA_HTTP_H_
+#endif // MEDIA_HTTP_H_
diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libdatasource/include/datasource/NuCachedSource2.h
similarity index 100%
rename from media/libstagefright/include/NuCachedSource2.h
rename to media/libdatasource/include/datasource/NuCachedSource2.h
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index a7c0f84..33ea1ca 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -44,8 +44,7 @@
info->mHeight = videoFrame->mHeight;
info->mRotationAngle = videoFrame->mRotationAngle;
info->mBytesPerPixel = videoFrame->mBytesPerPixel;
- // TODO: retrieve per-frame duration from extractor/metadataretriever.
- info->mDurationUs = 33333;
+ info->mDurationUs = videoFrame->mDurationUs;
if (videoFrame->mIccSize > 0) {
info->mIccData.assign(
videoFrame->getFlattenedIccData(),
@@ -174,7 +173,7 @@
// copy from cache if the request falls entirely in cache
if (offset + size <= mCachedOffset + mCachedSize) {
- memcpy(mMemory->pointer(), mCache.get() + offset - mCachedOffset, size);
+ memcpy(mMemory->unsecurePointer(), mCache.get() + offset - mCachedOffset, size);
return size;
}
@@ -272,7 +271,7 @@
if (bytesAvailable < (int64_t)size) {
size = bytesAvailable;
}
- memcpy(mMemory->pointer(), mCache.get() + offset - mCachedOffset, size);
+ memcpy(mMemory->unsecurePointer(), mCache.get() + offset - mCachedOffset, size);
return size;
}
@@ -361,12 +360,16 @@
sp<IMemory> sharedMem = mRetriever->getImageAtIndex(
-1, mOutputColor, true /*metaOnly*/);
- if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
+ if (sharedMem == nullptr || sharedMem->unsecurePointer() == nullptr) {
ALOGE("init: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->unsecurePointer());
ALOGV("Image dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@@ -392,12 +395,17 @@
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
mOutputColor, true /*metaOnly*/);
- if (sharedMem == nullptr || sharedMem->pointer() == nullptr) {
+ if (sharedMem == nullptr || sharedMem->unsecurePointer() == nullptr) {
ALOGE("init: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(sharedMem->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(
+ sharedMem->unsecurePointer());
ALOGV("Sequence dimension %dx%d, display %dx%d, angle %d, iccSize %d",
videoFrame->mWidth,
@@ -488,7 +496,7 @@
{
Mutex::Autolock autolock(mLock);
- if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ if (frameMemory == nullptr || frameMemory->unsecurePointer() == nullptr) {
mAsyncDecodeDone = true;
mScanlineReady.signal();
break;
@@ -530,12 +538,16 @@
sp<IMemory> frameMemory = mRetriever->getImageRectAtIndex(
-1, mOutputColor, 0, 0, mImageInfo.mWidth, mSliceHeight);
- if (frameMemory == nullptr || frameMemory->pointer() == nullptr) {
+ if (frameMemory == nullptr || frameMemory->unsecurePointer() == nullptr) {
ALOGE("decode: metadata is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(frameMemory->unsecurePointer());
if (frameInfo != nullptr) {
initFrameInfo(frameInfo, videoFrame);
@@ -564,12 +576,16 @@
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
}
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
ALOGE("decode: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
ALOGE("decode: videoFrame size is invalid");
@@ -614,12 +630,16 @@
mTotalScanline = mSequenceInfo.mHeight;
mFrameMemory = mRetriever->getFrameAtIndex(frameIndex, mOutputColor);
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
ALOGE("decode: videoFrame is a nullptr");
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
if (videoFrame->mSize == 0 ||
mFrameMemory->size() < videoFrame->getFlattenedSize()) {
ALOGE("decode: videoFrame size is invalid");
@@ -642,10 +662,14 @@
}
bool HeifDecoderImpl::getScanlineInner(uint8_t* dst) {
- if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
+ if (mFrameMemory == nullptr || mFrameMemory->unsecurePointer() == nullptr) {
return false;
}
- VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoFrame* videoFrame = static_cast<VideoFrame*>(mFrameMemory->unsecurePointer());
uint8_t* src = videoFrame->getFlattenedData() + videoFrame->mRowBytes * mCurScanline++;
memcpy(dst, src, videoFrame->mBytesPerPixel * videoFrame->mWidth);
return true;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index d141287..ac88448 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -1,15 +1,8 @@
-cc_defaults {
- name: "libmedia_defaults",
- include_dirs: [
- "bionic/libc/private",
- ],
-}
-
cc_library_headers {
name: "libmedia_headers",
vendor_available: true,
export_include_dirs: ["include"],
- header_libs:[
+ header_libs: [
"libbase_headers",
"libgui_headers",
"libstagefright_headers",
@@ -22,40 +15,29 @@
],
}
-cc_library {
- name: "libmedia_helper",
- vendor_available: true,
- vndk: {
- enabled: true,
- },
- double_loadable: true,
- srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
- cflags: [
- "-Werror",
- "-Wno-error=deprecated-declarations",
- "-Wall",
- ],
- shared_libs: ["libutils", "liblog"],
- header_libs: [
- "libmedia_headers",
- "libaudioclient_headers",
- "libaudio_system_headers",
- ],
- export_header_lib_headers: [
- "libmedia_headers",
- ],
- clang: true,
-}
-
filegroup {
name: "libmedia_omx_aidl",
srcs: [
- "aidl/android/IGraphicBufferSource.aidl",
"aidl/android/IOMXBufferSource.aidl",
],
path: "aidl",
}
+aidl_interface {
+ name: "resourcemanager_aidl_interface",
+ local_include_dir: "aidl",
+ srcs: [
+ "aidl/android/media/IResourceManagerClient.aidl",
+ "aidl/android/media/IResourceManagerService.aidl",
+ "aidl/android/media/MediaResourceType.aidl",
+ "aidl/android/media/MediaResourceSubType.aidl",
+ "aidl/android/media/MediaResourceParcel.aidl",
+ "aidl/android/media/MediaResourcePolicyParcel.aidl",
+ ],
+ api_dir: "api/resourcemanager",
+ versions: ["1"],
+}
+
cc_library_shared {
name: "libmedia_omx",
vendor_available: true,
@@ -67,12 +49,9 @@
srcs: [
":libmedia_omx_aidl",
- "IMediaCodecList.cpp",
"IOMX.cpp",
"MediaCodecBuffer.cpp",
- "MediaCodecInfo.cpp",
"OMXBuffer.cpp",
- "omx/1.0/WGraphicBufferSource.cpp",
"omx/1.0/WOmxBufferSource.cpp",
"omx/1.0/WOmxNode.cpp",
"omx/1.0/WOmxObserver.cpp",
@@ -82,7 +61,7 @@
local_include_dirs: ["aidl"],
export_aidl_headers: true,
},
-
+
local_include_dirs: [
"include",
],
@@ -93,7 +72,6 @@
"libbinder",
"libcutils",
"libhidlbase",
- "libhidltransport",
"liblog",
"libstagefright_foundation",
"libui",
@@ -137,7 +115,6 @@
},
}
-
cc_library_shared {
name: "libmedia_omx_client",
@@ -154,7 +131,6 @@
"libcutils",
"libgui",
"libhidlbase",
- "libhidltransport",
"liblog",
"libmedia_omx",
"libstagefright_foundation",
@@ -208,6 +184,7 @@
],
header_libs: [
+ "libmedia_headers",
"media_ndk_headers",
],
@@ -226,11 +203,52 @@
},
}
+cc_library_shared {
+ name: "libmedia_codeclist",
+
+ srcs: [
+ "IMediaCodecList.cpp",
+ "MediaCodecInfo.cpp",
+ ],
+
+ local_include_dirs: [
+ "include",
+ ],
+
+ shared_libs: [
+ "android.hardware.media.omx@1.0",
+ "libbinder",
+ "liblog",
+ "libstagefright_foundation",
+ "libutils",
+ ],
+
+ include_dirs: [
+ "system/libhidl/transport/token/1.0/utils/include",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ },
+}
+
cc_library {
name: "libmedia",
- defaults: [ "libmedia_defaults" ],
-
srcs: [
"IDataSource.cpp",
"BufferingSettings.cpp",
@@ -247,16 +265,12 @@
"IMediaSource.cpp",
"IRemoteDisplay.cpp",
"IRemoteDisplayClient.cpp",
- "IResourceManagerClient.cpp",
- "IResourceManagerService.cpp",
"IStreamSource.cpp",
"MediaUtils.cpp",
"Metadata.cpp",
"mediarecorder.cpp",
"IMediaMetadataRetriever.cpp",
"mediametadataretriever.cpp",
- "MidiDeviceInfo.cpp",
- "JetPlayer.cpp",
"MediaScanner.cpp",
"MediaScannerClient.cpp",
"CharacterEncodingDetector.cpp",
@@ -264,7 +278,6 @@
"MediaProfiles.cpp",
"MediaResource.cpp",
"MediaResourcePolicy.cpp",
- "Visualizer.cpp",
"StringArray.cpp",
"NdkMediaFormatPriv.cpp",
"NdkMediaErrorPriv.cpp",
@@ -276,6 +289,7 @@
},
header_libs: [
+ "bionic_libc_platform_headers",
"libstagefright_headers",
"media_ndk_headers",
],
@@ -299,8 +313,8 @@
"libstagefright_foundation",
"libgui",
"libdl",
- "libaudioutils",
"libaudioclient",
+ "libmedia_codeclist",
"libmedia_omx",
],
@@ -313,8 +327,12 @@
],
static_libs: [
- "libc_malloc_debug_backtrace", // for memory heap analysis
- "libmedia_midiiowrapper",
+ "libc_malloc_debug_backtrace", // for memory heap analysis
+ "resourcemanager_aidl_interface-unstable-cpp",
+ ],
+
+ export_static_lib_headers: [
+ "resourcemanager_aidl_interface-unstable-cpp",
],
export_include_dirs: [
@@ -337,66 +355,3 @@
cfi: true,
},
}
-
-cc_library_static {
- name: "libmedia_player2_util",
-
- defaults: [ "libmedia_defaults" ],
-
- srcs: [
- "AudioParameter.cpp",
- "BufferingSettings.cpp",
- "DataSourceDesc.cpp",
- "MediaCodecBuffer.cpp",
- "Metadata.cpp",
- "NdkWrapper.cpp",
- ],
-
- shared_libs: [
- "libbinder",
- "libcutils",
- "liblog",
- "libmediandk",
- "libnativewindow",
- "libmediandk_utils",
- "libstagefright_foundation",
- "libui",
- "libutils",
- ],
-
- export_shared_lib_headers: [
- "libbinder",
- "libmediandk",
- ],
-
- header_libs: [
- "media_plugin_headers",
- ],
-
- include_dirs: [
- "frameworks/av/media/ndk",
- ],
-
- static_libs: [
- "libstagefright_rtsp",
- "libstagefright_timedtext",
- ],
-
- export_include_dirs: [
- "include",
- ],
-
- cflags: [
- "-Werror",
- "-Wno-error=deprecated-declarations",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "unsigned-integer-overflow",
- "signed-integer-overflow",
- ],
- cfi: true,
- },
-}
diff --git a/media/libmedia/DataSourceDesc.cpp b/media/libmedia/DataSourceDesc.cpp
deleted file mode 100644
index b7ccbce..0000000
--- a/media/libmedia/DataSourceDesc.cpp
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "DataSourceDesc"
-
-#include <media/DataSource.h>
-#include <media/DataSourceDesc.h>
-#include <media/MediaHTTPService.h>
-
-namespace android {
-
-static const int64_t kLongMax = 0x7ffffffffffffffL;
-
-DataSourceDesc::DataSourceDesc()
- : mType(TYPE_NONE),
- mFDOffset(0),
- mFDLength(kLongMax),
- mId(0),
- mStartPositionMs(0),
- mEndPositionMs(0) {
-}
-
-} // namespace android
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 1bb8d67..8cbb4c2 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -128,12 +128,12 @@
ALOGE("readAt got a NULL buffer");
return UNKNOWN_ERROR;
}
- if (mMemory->pointer() == NULL) {
- ALOGE("readAt got a NULL mMemory->pointer()");
+ if (mMemory->unsecurePointer() == NULL) {
+ ALOGE("readAt got a NULL mMemory->unsecurePointer()");
return UNKNOWN_ERROR;
}
- memcpy(buffer, mMemory->pointer(), len);
+ memcpy(buffer, mMemory->unsecurePointer(), len);
return len;
}
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index bc0c2cd..959a3d7 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -29,7 +29,6 @@
#include <utils/NativeHandle.h>
#include <media/omx/1.0/WOmxNode.h>
-#include <android/IGraphicBufferSource.h>
#include <android/IOMXBufferSource.h>
namespace android {
diff --git a/media/libmedia/IResourceManagerClient.cpp b/media/libmedia/IResourceManagerClient.cpp
deleted file mode 100644
index 1fea479..0000000
--- a/media/libmedia/IResourceManagerClient.cpp
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-#include <media/IResourceManagerClient.h>
-
-namespace android {
-
-enum {
- RECLAIM_RESOURCE = IBinder::FIRST_CALL_TRANSACTION,
- GET_NAME,
-};
-
-class BpResourceManagerClient: public BpInterface<IResourceManagerClient>
-{
-public:
- explicit BpResourceManagerClient(const sp<IBinder> &impl)
- : BpInterface<IResourceManagerClient>(impl)
- {
- }
-
- virtual bool reclaimResource() {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerClient::getInterfaceDescriptor());
-
- bool ret = false;
- status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
- if (status == NO_ERROR) {
- ret = (bool)reply.readInt32();
- }
- return ret;
- }
-
- virtual String8 getName() {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerClient::getInterfaceDescriptor());
-
- String8 ret;
- status_t status = remote()->transact(GET_NAME, data, &reply);
- if (status == NO_ERROR) {
- ret = reply.readString8();
- }
- return ret;
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(ResourceManagerClient, "android.media.IResourceManagerClient");
-
-// ----------------------------------------------------------------------
-
-status_t BnResourceManagerClient::onTransact(
- uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
-{
- switch (code) {
- case RECLAIM_RESOURCE: {
- CHECK_INTERFACE(IResourceManagerClient, data, reply);
- bool ret = reclaimResource();
- reply->writeInt32(ret);
- return NO_ERROR;
- } break;
- case GET_NAME: {
- CHECK_INTERFACE(IResourceManagerClient, data, reply);
- String8 ret = getName();
- reply->writeString8(ret);
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-}; // namespace android
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
deleted file mode 100644
index f8a0a14..0000000
--- a/media/libmedia/IResourceManagerService.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-**
-** Copyright 2015, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IResourceManagerService"
-#include <utils/Log.h>
-
-#include <media/IResourceManagerService.h>
-
-#include <binder/Parcel.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-
-namespace android {
-
-enum {
- CONFIG = IBinder::FIRST_CALL_TRANSACTION,
- ADD_RESOURCE,
- REMOVE_RESOURCE,
- REMOVE_CLIENT,
- RECLAIM_RESOURCE,
-};
-
-template <typename T>
-static void writeToParcel(Parcel *data, const Vector<T> &items) {
- size_t size = items.size();
- // truncates size, but should be okay for this usecase
- data->writeUint32(static_cast<uint32_t>(size));
- for (size_t i = 0; i < size; i++) {
- items[i].writeToParcel(data);
- }
-}
-
-template <typename T>
-static void readFromParcel(const Parcel &data, Vector<T> *items) {
- size_t size = (size_t)data.readUint32();
- for (size_t i = 0; i < size && data.dataAvail() > 0; i++) {
- T item;
- item.readFromParcel(data);
- items->add(item);
- }
-}
-
-class BpResourceManagerService : public BpInterface<IResourceManagerService>
-{
-public:
- explicit BpResourceManagerService(const sp<IBinder> &impl)
- : BpInterface<IResourceManagerService>(impl)
- {
- }
-
- virtual void config(const Vector<MediaResourcePolicy> &policies) {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
- writeToParcel(&data, policies);
- remote()->transact(CONFIG, data, &reply);
- }
-
- virtual void addResource(
- int pid,
- int uid,
- int64_t clientId,
- const sp<IResourceManagerClient> client,
- const Vector<MediaResource> &resources) {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
- data.writeInt32(pid);
- data.writeInt32(uid);
- data.writeInt64(clientId);
- data.writeStrongBinder(IInterface::asBinder(client));
- writeToParcel(&data, resources);
-
- remote()->transact(ADD_RESOURCE, data, &reply);
- }
-
- virtual void removeResource(int pid, int64_t clientId, const Vector<MediaResource> &resources) {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
- data.writeInt32(pid);
- data.writeInt64(clientId);
- writeToParcel(&data, resources);
-
- remote()->transact(REMOVE_RESOURCE, data, &reply);
- }
-
- virtual void removeClient(int pid, int64_t clientId) {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
- data.writeInt32(pid);
- data.writeInt64(clientId);
-
- remote()->transact(REMOVE_CLIENT, data, &reply);
- }
-
- virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources) {
- Parcel data, reply;
- data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
- data.writeInt32(callingPid);
- writeToParcel(&data, resources);
-
- bool ret = false;
- status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
- if (status == NO_ERROR) {
- ret = (bool)reply.readInt32();
- }
- return ret;
- }
-};
-
-IMPLEMENT_META_INTERFACE(ResourceManagerService, "android.media.IResourceManagerService");
-
-// ----------------------------------------------------------------------
-
-
-status_t BnResourceManagerService::onTransact(
- uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
-{
- switch (code) {
- case CONFIG: {
- CHECK_INTERFACE(IResourceManagerService, data, reply);
- Vector<MediaResourcePolicy> policies;
- readFromParcel(data, &policies);
- config(policies);
- return NO_ERROR;
- } break;
-
- case ADD_RESOURCE: {
- CHECK_INTERFACE(IResourceManagerService, data, reply);
- int pid = data.readInt32();
- int uid = data.readInt32();
- int64_t clientId = data.readInt64();
- sp<IResourceManagerClient> client(
- interface_cast<IResourceManagerClient>(data.readStrongBinder()));
- if (client == NULL) {
- return NO_ERROR;
- }
- Vector<MediaResource> resources;
- readFromParcel(data, &resources);
- addResource(pid, uid, clientId, client, resources);
- return NO_ERROR;
- } break;
-
- case REMOVE_RESOURCE: {
- CHECK_INTERFACE(IResourceManagerService, data, reply);
- int pid = data.readInt32();
- int64_t clientId = data.readInt64();
- Vector<MediaResource> resources;
- readFromParcel(data, &resources);
- removeResource(pid, clientId, resources);
- return NO_ERROR;
- } break;
-
- case REMOVE_CLIENT: {
- CHECK_INTERFACE(IResourceManagerService, data, reply);
- int pid = data.readInt32();
- int64_t clientId = data.readInt64();
- removeClient(pid, clientId);
- return NO_ERROR;
- } break;
-
- case RECLAIM_RESOURCE: {
- CHECK_INTERFACE(IResourceManagerService, data, reply);
- int callingPid = data.readInt32();
- Vector<MediaResource> resources;
- readFromParcel(data, &resources);
- bool ret = reclaimResource(callingPid, resources);
- reply->writeInt32(ret);
- return NO_ERROR;
- } break;
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
deleted file mode 100644
index 0d3c1ba..0000000
--- a/media/libmedia/JetPlayer.cpp
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "JetPlayer-C"
-
-#include <utils/Log.h>
-#include <media/JetPlayer.h>
-
-
-namespace android
-{
-
-static const int MIX_NUM_BUFFERS = 4;
-static const S_EAS_LIB_CONFIG* pLibConfig = NULL;
-
-//-------------------------------------------------------------------------------------------------
-JetPlayer::JetPlayer(void *javaJetPlayer, int maxTracks, int trackBufferSize) :
- mEventCallback(NULL),
- mJavaJetPlayerRef(javaJetPlayer),
- mTid(-1),
- mRender(false),
- mPaused(false),
- mMaxTracks(maxTracks),
- mEasData(NULL),
- mIoWrapper(NULL),
- mTrackBufferSize(trackBufferSize)
-{
- ALOGV("JetPlayer constructor");
- mPreviousJetStatus.currentUserID = -1;
- mPreviousJetStatus.segmentRepeatCount = -1;
- mPreviousJetStatus.numQueuedSegments = -1;
- mPreviousJetStatus.paused = true;
-}
-
-//-------------------------------------------------------------------------------------------------
-JetPlayer::~JetPlayer()
-{
- ALOGV("~JetPlayer");
- release();
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::init()
-{
- //Mutex::Autolock lock(&mMutex);
-
- EAS_RESULT result;
-
- // retrieve the EAS library settings
- if (pLibConfig == NULL)
- pLibConfig = EAS_Config();
- if (pLibConfig == NULL) {
- ALOGE("JetPlayer::init(): EAS library configuration could not be retrieved, aborting.");
- return EAS_FAILURE;
- }
-
- // init the EAS library
- result = EAS_Init(&mEasData);
- if (result != EAS_SUCCESS) {
- ALOGE("JetPlayer::init(): Error initializing Sonivox EAS library, aborting.");
- mState = EAS_STATE_ERROR;
- return result;
- }
- // init the JET library with the default app event controller range
- result = JET_Init(mEasData, NULL, sizeof(S_JET_CONFIG));
- if (result != EAS_SUCCESS) {
- ALOGE("JetPlayer::init(): Error initializing JET library, aborting.");
- mState = EAS_STATE_ERROR;
- return result;
- }
-
- // create the output AudioTrack
- mAudioTrack = new AudioTrack();
- status_t status = mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parameterize this
- pLibConfig->sampleRate,
- AUDIO_FORMAT_PCM_16_BIT,
- audio_channel_out_mask_from_count(pLibConfig->numChannels),
- (size_t) mTrackBufferSize,
- AUDIO_OUTPUT_FLAG_NONE);
- if (status != OK) {
- ALOGE("JetPlayer::init(): Error initializing JET library; AudioTrack error %d", status);
- mAudioTrack.clear();
- mState = EAS_STATE_ERROR;
- return EAS_FAILURE;
- }
-
- // create render and playback thread
- {
- Mutex::Autolock l(mMutex);
- ALOGV("JetPlayer::init(): trying to start render thread");
- mThread = new JetPlayerThread(this);
- mThread->run("jetRenderThread", ANDROID_PRIORITY_AUDIO);
- mCondition.wait(mMutex);
- }
- if (mTid > 0) {
- // render thread started, we're ready
- ALOGV("JetPlayer::init(): render thread(%d) successfully started.", mTid);
- mState = EAS_STATE_READY;
- } else {
- ALOGE("JetPlayer::init(): failed to start render thread.");
- mState = EAS_STATE_ERROR;
- return EAS_FAILURE;
- }
-
- return EAS_SUCCESS;
-}
-
-void JetPlayer::setEventCallback(jetevent_callback eventCallback)
-{
- Mutex::Autolock l(mMutex);
- mEventCallback = eventCallback;
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::release()
-{
- ALOGV("JetPlayer::release()");
- Mutex::Autolock lock(mMutex);
- mPaused = true;
- mRender = false;
- if (mEasData) {
- JET_Pause(mEasData);
- JET_CloseFile(mEasData);
- JET_Shutdown(mEasData);
- EAS_Shutdown(mEasData);
- }
- delete mIoWrapper;
- mIoWrapper = NULL;
- if (mAudioTrack != 0) {
- mAudioTrack->stop();
- mAudioTrack->flush();
- mAudioTrack.clear();
- }
- if (mAudioBuffer) {
- delete mAudioBuffer;
- mAudioBuffer = NULL;
- }
- mEasData = NULL;
-
- return EAS_SUCCESS;
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::render() {
- EAS_RESULT result = EAS_FAILURE;
- EAS_I32 count;
- int temp;
- bool audioStarted = false;
-
- ALOGV("JetPlayer::render(): entering");
-
- // allocate render buffer
- mAudioBuffer =
- new EAS_PCM[pLibConfig->mixBufferSize * pLibConfig->numChannels * MIX_NUM_BUFFERS];
-
- // signal main thread that we started
- {
- Mutex::Autolock l(mMutex);
- mTid = gettid();
- ALOGV("JetPlayer::render(): render thread(%d) signal", mTid);
- mCondition.signal();
- }
-
- while (1) {
-
- mMutex.lock(); // [[[[[[[[ LOCK ---------------------------------------
-
- if (mEasData == NULL) {
- mMutex.unlock();
- ALOGV("JetPlayer::render(): NULL EAS data, exiting render.");
- goto threadExit;
- }
-
- // nothing to render, wait for client thread to wake us up
- while (!mRender)
- {
- ALOGV("JetPlayer::render(): signal wait");
- if (audioStarted) {
- mAudioTrack->pause();
- // we have to restart the playback once we start rendering again
- audioStarted = false;
- }
- mCondition.wait(mMutex);
- ALOGV("JetPlayer::render(): signal rx'd");
- }
-
- // render midi data into the input buffer
- int num_output = 0;
- EAS_PCM* p = mAudioBuffer;
- for (int i = 0; i < MIX_NUM_BUFFERS; i++) {
- result = EAS_Render(mEasData, p, pLibConfig->mixBufferSize, &count);
- if (result != EAS_SUCCESS) {
- ALOGE("JetPlayer::render(): EAS_Render returned error %ld", result);
- }
- p += count * pLibConfig->numChannels;
- num_output += count * pLibConfig->numChannels * sizeof(EAS_PCM);
-
- // send events that were generated (if any) to the event callback
- fireEventsFromJetQueue();
- }
-
- // update playback state
- //ALOGV("JetPlayer::render(): updating state");
- JET_Status(mEasData, &mJetStatus);
- fireUpdateOnStatusChange();
- mPaused = mJetStatus.paused;
-
- mMutex.unlock(); // UNLOCK ]]]]]]]] -----------------------------------
-
- // check audio output track
- if (mAudioTrack == NULL) {
- ALOGE("JetPlayer::render(): output AudioTrack was not created");
- goto threadExit;
- }
-
- // Write data to the audio hardware
- //ALOGV("JetPlayer::render(): writing to audio output");
- if ((temp = mAudioTrack->write(mAudioBuffer, num_output)) < 0) {
- ALOGE("JetPlayer::render(): Error in writing:%d",temp);
- return temp;
- }
-
- // start audio output if necessary
- if (!audioStarted) {
- ALOGV("JetPlayer::render(): starting audio playback");
- mAudioTrack->start();
- audioStarted = true;
- }
-
- }//while (1)
-
-threadExit:
- if (mAudioTrack != NULL) {
- mAudioTrack->stop();
- mAudioTrack->flush();
- }
- delete [] mAudioBuffer;
- mAudioBuffer = NULL;
- mMutex.lock();
- mTid = -1;
- mCondition.signal();
- mMutex.unlock();
- return result;
-}
-
-
-//-------------------------------------------------------------------------------------------------
-// fire up an update if any of the status fields has changed
-// precondition: mMutex locked
-void JetPlayer::fireUpdateOnStatusChange()
-{
- if ( (mJetStatus.currentUserID != mPreviousJetStatus.currentUserID)
- ||(mJetStatus.segmentRepeatCount != mPreviousJetStatus.segmentRepeatCount) ) {
- if (mEventCallback) {
- mEventCallback(
- JetPlayer::JET_USERID_UPDATE,
- mJetStatus.currentUserID,
- mJetStatus.segmentRepeatCount,
- mJavaJetPlayerRef);
- }
- mPreviousJetStatus.currentUserID = mJetStatus.currentUserID;
- mPreviousJetStatus.segmentRepeatCount = mJetStatus.segmentRepeatCount;
- }
-
- if (mJetStatus.numQueuedSegments != mPreviousJetStatus.numQueuedSegments) {
- if (mEventCallback) {
- mEventCallback(
- JetPlayer::JET_NUMQUEUEDSEGMENT_UPDATE,
- mJetStatus.numQueuedSegments,
- -1,
- mJavaJetPlayerRef);
- }
- mPreviousJetStatus.numQueuedSegments = mJetStatus.numQueuedSegments;
- }
-
- if (mJetStatus.paused != mPreviousJetStatus.paused) {
- if (mEventCallback) {
- mEventCallback(JetPlayer::JET_PAUSE_UPDATE,
- mJetStatus.paused,
- -1,
- mJavaJetPlayerRef);
- }
- mPreviousJetStatus.paused = mJetStatus.paused;
- }
-
-}
-
-
-//-------------------------------------------------------------------------------------------------
-// fire up all the JET events in the JET engine queue (until the queue is empty)
-// precondition: mMutex locked
-void JetPlayer::fireEventsFromJetQueue()
-{
- if (!mEventCallback) {
- // no callback, just empty the event queue
- while (JET_GetEvent(mEasData, NULL, NULL)) { }
- return;
- }
-
- EAS_U32 rawEvent;
- while (JET_GetEvent(mEasData, &rawEvent, NULL)) {
- mEventCallback(
- JetPlayer::JET_EVENT,
- rawEvent,
- -1,
- mJavaJetPlayerRef);
- }
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::loadFromFile(const char* path)
-{
- ALOGV("JetPlayer::loadFromFile(): path=%s", path);
-
- Mutex::Autolock lock(mMutex);
-
- delete mIoWrapper;
- mIoWrapper = new MidiIoWrapper(path);
-
- EAS_RESULT result = JET_OpenFile(mEasData, mIoWrapper->getLocator());
- if (result != EAS_SUCCESS)
- mState = EAS_STATE_ERROR;
- else
- mState = EAS_STATE_OPEN;
- return( result );
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::loadFromFD(const int fd, const long long offset, const long long length)
-{
- ALOGV("JetPlayer::loadFromFD(): fd=%d offset=%lld length=%lld", fd, offset, length);
-
- Mutex::Autolock lock(mMutex);
-
- delete mIoWrapper;
- mIoWrapper = new MidiIoWrapper(fd, offset, length);
-
- EAS_RESULT result = JET_OpenFile(mEasData, mIoWrapper->getLocator());
- if (result != EAS_SUCCESS)
- mState = EAS_STATE_ERROR;
- else
- mState = EAS_STATE_OPEN;
- return( result );
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::closeFile()
-{
- Mutex::Autolock lock(mMutex);
- return JET_CloseFile(mEasData);
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::play()
-{
- ALOGV("JetPlayer::play(): entering");
- Mutex::Autolock lock(mMutex);
-
- EAS_RESULT result = JET_Play(mEasData);
-
- mPaused = false;
- mRender = true;
-
- JET_Status(mEasData, &mJetStatus);
- this->dumpJetStatus(&mJetStatus);
-
- fireUpdateOnStatusChange();
-
- // wake up render thread
- ALOGV("JetPlayer::play(): wakeup render thread");
- mCondition.signal();
-
- return result;
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::pause()
-{
- Mutex::Autolock lock(mMutex);
- mPaused = true;
- EAS_RESULT result = JET_Pause(mEasData);
-
- mRender = false;
-
- JET_Status(mEasData, &mJetStatus);
- this->dumpJetStatus(&mJetStatus);
- fireUpdateOnStatusChange();
-
-
- return result;
-}
-
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::queueSegment(int segmentNum, int libNum, int repeatCount, int transpose,
- EAS_U32 muteFlags, EAS_U8 userID)
-{
- ALOGV("JetPlayer::queueSegment segmentNum=%d, libNum=%d, repeatCount=%d, transpose=%d",
- segmentNum, libNum, repeatCount, transpose);
- Mutex::Autolock lock(mMutex);
- return JET_QueueSegment(mEasData, segmentNum, libNum, repeatCount, transpose, muteFlags,
- userID);
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::setMuteFlags(EAS_U32 muteFlags, bool sync)
-{
- Mutex::Autolock lock(mMutex);
- return JET_SetMuteFlags(mEasData, muteFlags, sync);
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::setMuteFlag(int trackNum, bool muteFlag, bool sync)
-{
- Mutex::Autolock lock(mMutex);
- return JET_SetMuteFlag(mEasData, trackNum, muteFlag, sync);
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::triggerClip(int clipId)
-{
- ALOGV("JetPlayer::triggerClip clipId=%d", clipId);
- Mutex::Autolock lock(mMutex);
- return JET_TriggerClip(mEasData, clipId);
-}
-
-//-------------------------------------------------------------------------------------------------
-int JetPlayer::clearQueue()
-{
- ALOGV("JetPlayer::clearQueue");
- Mutex::Autolock lock(mMutex);
- return JET_Clear_Queue(mEasData);
-}
-
-//-------------------------------------------------------------------------------------------------
-void JetPlayer::dump()
-{
-}
-
-void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
-{
- if (pJetStatus!=NULL)
- ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d "
- "paused=%d",
- pJetStatus->currentUserID, pJetStatus->segmentRepeatCount,
- pJetStatus->numQueuedSegments, pJetStatus->paused);
- else
- ALOGE(">> JET player status is NULL");
-}
-
-
-} // end namespace android
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index e636a50..fe86d27 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -19,47 +19,73 @@
#include <utils/Log.h>
#include <media/MediaResource.h>
+#include <vector>
+
namespace android {
-MediaResource::MediaResource()
- : mType(kUnspecified),
- mSubType(kUnspecifiedSubType),
- mValue(0) {}
-
-MediaResource::MediaResource(Type type, uint64_t value)
- : mType(type),
- mSubType(kUnspecifiedSubType),
- mValue(value) {}
-
-MediaResource::MediaResource(Type type, SubType subType, uint64_t value)
- : mType(type),
- mSubType(subType),
- mValue(value) {}
-
-void MediaResource::readFromParcel(const Parcel &parcel) {
- mType = static_cast<Type>(parcel.readInt32());
- mSubType = static_cast<SubType>(parcel.readInt32());
- mValue = parcel.readUint64();
+MediaResource::MediaResource(Type type, int64_t value) {
+ this->type = type;
+ this->subType = SubType::kUnspecifiedSubType;
+ this->value = value;
}
-void MediaResource::writeToParcel(Parcel *parcel) const {
- parcel->writeInt32(static_cast<int32_t>(mType));
- parcel->writeInt32(static_cast<int32_t>(mSubType));
- parcel->writeUint64(mValue);
+MediaResource::MediaResource(Type type, SubType subType, int64_t value) {
+ this->type = type;
+ this->subType = subType;
+ this->value = value;
}
-String8 MediaResource::toString() const {
+MediaResource::MediaResource(Type type, const std::vector<uint8_t> &id, int64_t value) {
+ this->type = type;
+ this->subType = SubType::kUnspecifiedSubType;
+ this->id = id;
+ this->value = value;
+}
+
+//static
+MediaResource MediaResource::CodecResource(bool secure, bool video) {
+ return MediaResource(
+ secure ? Type::kSecureCodec : Type::kNonSecureCodec,
+ video ? SubType::kVideoCodec : SubType::kAudioCodec,
+ 1);
+}
+
+//static
+MediaResource MediaResource::GraphicMemoryResource(int64_t value) {
+ return MediaResource(Type::kGraphicMemory, value);
+}
+
+//static
+MediaResource MediaResource::CpuBoostResource() {
+ return MediaResource(Type::kCpuBoost, 1);
+}
+
+//static
+MediaResource MediaResource::VideoBatteryResource() {
+ return MediaResource(Type::kBattery, SubType::kVideoCodec, 1);
+}
+
+//static
+MediaResource MediaResource::DrmSessionResource(const std::vector<uint8_t> &id, int64_t value) {
+ return MediaResource(Type::kDrmSession, id, value);
+}
+
+static String8 bytesToHexString(const std::vector<uint8_t> &bytes) {
String8 str;
- str.appendFormat("%s/%s:%llu", asString(mType), asString(mSubType), (unsigned long long)mValue);
+ for (auto &b : bytes) {
+ str.appendFormat("%02x", b);
+ }
return str;
}
-bool MediaResource::operator==(const MediaResource &other) const {
- return (other.mType == mType) && (other.mSubType == mSubType) && (other.mValue == mValue);
-}
+String8 toString(const MediaResourceParcel& resource) {
+ String8 str;
-bool MediaResource::operator!=(const MediaResource &other) const {
- return !(*this == other);
+ str.appendFormat("%s/%s:[%s]:%lld",
+ asString(resource.type), asString(resource.subType),
+ bytesToHexString(resource.id).c_str(),
+ (long long)resource.value);
+ return str;
}
}; // namespace android
diff --git a/media/libmedia/MediaResourcePolicy.cpp b/media/libmedia/MediaResourcePolicy.cpp
index 5210825..c463179 100644
--- a/media/libmedia/MediaResourcePolicy.cpp
+++ b/media/libmedia/MediaResourcePolicy.cpp
@@ -18,31 +18,29 @@
#define LOG_TAG "MediaResourcePolicy"
#include <utils/Log.h>
#include <media/MediaResourcePolicy.h>
+#include <android/media/IResourceManagerService.h>
namespace android {
-const char kPolicySupportsMultipleSecureCodecs[] = "supports-multiple-secure-codecs";
-const char kPolicySupportsSecureWithNonSecureCodec[] = "supports-secure-with-non-secure-codec";
-
-MediaResourcePolicy::MediaResourcePolicy() {}
-
-MediaResourcePolicy::MediaResourcePolicy(String8 type, String8 value)
- : mType(type),
- mValue(value) {}
-
-void MediaResourcePolicy::readFromParcel(const Parcel &parcel) {
- mType = parcel.readString8();
- mValue = parcel.readString8();
+using android::media::IResourceManagerService;
+//static
+const ::std::string& MediaResourcePolicy::kPolicySupportsMultipleSecureCodecs() {
+ return IResourceManagerService::kPolicySupportsMultipleSecureCodecs();
+}
+//static
+const ::std::string& MediaResourcePolicy::kPolicySupportsSecureWithNonSecureCodec() {
+ return IResourceManagerService::kPolicySupportsSecureWithNonSecureCodec();
}
-void MediaResourcePolicy::writeToParcel(Parcel *parcel) const {
- parcel->writeString8(mType);
- parcel->writeString8(mValue);
+MediaResourcePolicy::MediaResourcePolicy(
+ const std::string& type, const std::string& value) {
+ this->type = type;
+ this->value = value;
}
-String8 MediaResourcePolicy::toString() const {
+String8 toString(const MediaResourcePolicyParcel &policy) {
String8 str;
- str.appendFormat("%s:%s", mType.string(), mValue.string());
+ str.appendFormat("%s:%s", policy.type.c_str(), policy.value.c_str());
return str;
}
diff --git a/media/libmedia/MediaUtils.cpp b/media/libmedia/MediaUtils.cpp
index 31972fa..2efb30e 100644
--- a/media/libmedia/MediaUtils.cpp
+++ b/media/libmedia/MediaUtils.cpp
@@ -22,7 +22,7 @@
#include <sys/resource.h>
#include <unistd.h>
-#include <bionic_malloc.h>
+#include <bionic/malloc.h>
#include "MediaUtils.h"
diff --git a/media/libmedia/MidiDeviceInfo.cpp b/media/libmedia/MidiDeviceInfo.cpp
deleted file mode 100644
index 7588e00..0000000
--- a/media/libmedia/MidiDeviceInfo.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MidiDeviceInfo"
-
-#include <media/MidiDeviceInfo.h>
-
-#include <binder/Parcel.h>
-#include <log/log.h>
-#include <utils/Errors.h>
-#include <utils/String16.h>
-
-namespace android {
-namespace media {
-namespace midi {
-
-// The constant values need to be kept in sync with MidiDeviceInfo.java.
-// static
-const char* const MidiDeviceInfo::PROPERTY_NAME = "name";
-const char* const MidiDeviceInfo::PROPERTY_MANUFACTURER = "manufacturer";
-const char* const MidiDeviceInfo::PROPERTY_PRODUCT = "product";
-const char* const MidiDeviceInfo::PROPERTY_VERSION = "version";
-const char* const MidiDeviceInfo::PROPERTY_SERIAL_NUMBER = "serial_number";
-const char* const MidiDeviceInfo::PROPERTY_ALSA_CARD = "alsa_card";
-const char* const MidiDeviceInfo::PROPERTY_ALSA_DEVICE = "alsa_device";
-
-String16 MidiDeviceInfo::getProperty(const char* propertyName) {
- String16 value;
- if (mProperties.getString(String16(propertyName), &value)) {
- return value;
- } else {
- return String16();
- }
-}
-
-#define RETURN_IF_FAILED(calledOnce) \
- { \
- status_t returnStatus = calledOnce; \
- if (returnStatus) { \
- ALOGE("Failed at %s:%d (%s)", __FILE__, __LINE__, __func__); \
- return returnStatus; \
- } \
- }
-
-status_t MidiDeviceInfo::writeToParcel(Parcel* parcel) const {
- // Needs to be kept in sync with code in MidiDeviceInfo.java
- RETURN_IF_FAILED(parcel->writeInt32(mType));
- RETURN_IF_FAILED(parcel->writeInt32(mId));
- RETURN_IF_FAILED(parcel->writeInt32((int32_t)mInputPortNames.size()));
- RETURN_IF_FAILED(parcel->writeInt32((int32_t)mOutputPortNames.size()));
- RETURN_IF_FAILED(writeStringVector(parcel, mInputPortNames));
- RETURN_IF_FAILED(writeStringVector(parcel, mOutputPortNames));
- RETURN_IF_FAILED(parcel->writeInt32(mIsPrivate ? 1 : 0));
- RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
- // This corresponds to "extra" properties written by Java code
- RETURN_IF_FAILED(mProperties.writeToParcel(parcel));
- return OK;
-}
-
-status_t MidiDeviceInfo::readFromParcel(const Parcel* parcel) {
- // Needs to be kept in sync with code in MidiDeviceInfo.java
- RETURN_IF_FAILED(parcel->readInt32(&mType));
- RETURN_IF_FAILED(parcel->readInt32(&mId));
- int32_t inputPortCount;
- RETURN_IF_FAILED(parcel->readInt32(&inputPortCount));
- int32_t outputPortCount;
- RETURN_IF_FAILED(parcel->readInt32(&outputPortCount));
- RETURN_IF_FAILED(readStringVector(parcel, &mInputPortNames, inputPortCount));
- RETURN_IF_FAILED(readStringVector(parcel, &mOutputPortNames, outputPortCount));
- int32_t isPrivate;
- RETURN_IF_FAILED(parcel->readInt32(&isPrivate));
- mIsPrivate = isPrivate == 1;
- RETURN_IF_FAILED(mProperties.readFromParcel(parcel));
- // Ignore "extra" properties as they may contain Java Parcelables
- return OK;
-}
-
-status_t MidiDeviceInfo::readStringVector(
- const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength) {
- std::unique_ptr<std::vector<std::unique_ptr<String16>>> v;
- status_t result = parcel->readString16Vector(&v);
- if (result != OK) return result;
- vectorPtr->clear();
- if (v.get() != nullptr) {
- for (const auto& iter : *v) {
- if (iter.get() != nullptr) {
- vectorPtr->push_back(*iter);
- } else {
- vectorPtr->push_back(String16());
- }
- }
- } else {
- vectorPtr->resize(defaultLength);
- }
- return OK;
-}
-
-status_t MidiDeviceInfo::writeStringVector(Parcel* parcel, const Vector<String16>& vector) const {
- std::vector<String16> v;
- for (size_t i = 0; i < vector.size(); ++i) {
- v.push_back(vector[i]);
- }
- return parcel->writeString16Vector(v);
-}
-
-// Vector does not define operator==
-static inline bool areVectorsEqual(const Vector<String16>& lhs, const Vector<String16>& rhs) {
- if (lhs.size() != rhs.size()) return false;
- for (size_t i = 0; i < lhs.size(); ++i) {
- if (lhs[i] != rhs[i]) return false;
- }
- return true;
-}
-
-bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
- return (lhs.mType == rhs.mType && lhs.mId == rhs.mId &&
- areVectorsEqual(lhs.mInputPortNames, rhs.mInputPortNames) &&
- areVectorsEqual(lhs.mOutputPortNames, rhs.mOutputPortNames) &&
- lhs.mProperties == rhs.mProperties &&
- lhs.mIsPrivate == rhs.mIsPrivate);
-}
-
-} // namespace midi
-} // namespace media
-} // namespace android
diff --git a/media/libmedia/MidiIoWrapper.cpp b/media/libmedia/MidiIoWrapper.cpp
index d8ef9cf..6d46363 100644
--- a/media/libmedia/MidiIoWrapper.cpp
+++ b/media/libmedia/MidiIoWrapper.cpp
@@ -17,7 +17,6 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MidiIoWrapper"
#include <utils/Log.h>
-#include <utils/RefBase.h>
#include <sys/stat.h>
#include <fcntl.h>
@@ -50,7 +49,7 @@
mDataSource = nullptr;
}
-class DataSourceUnwrapper : public DataSourceBase {
+class DataSourceUnwrapper {
public:
explicit DataSourceUnwrapper(CDataSource *csource) {
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
deleted file mode 100644
index c150407..0000000
--- a/media/libmedia/NdkWrapper.cpp
+++ /dev/null
@@ -1,1290 +0,0 @@
-/*
- * Copyright 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NdkWrapper"
-
-#include <media/NdkWrapper.h>
-
-#include <android/native_window.h>
-#include <log/log.h>
-#include <media/NdkMediaCodec.h>
-#include <media/NdkMediaCrypto.h>
-#include <media/NdkMediaDrm.h>
-#include <media/NdkMediaFormat.h>
-#include <media/NdkMediaExtractor.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <utils/Errors.h>
-
-#include "NdkMediaDataSourceCallbacksPriv.h"
-
-namespace android {
-
-static const size_t kAESBlockSize = 16; // AES_BLOCK_SIZE
-
-static const char *AMediaFormatKeyGroupInt32[] = {
- AMEDIAFORMAT_KEY_AAC_DRC_ATTENUATION_FACTOR,
- AMEDIAFORMAT_KEY_AAC_DRC_BOOST_FACTOR,
- AMEDIAFORMAT_KEY_AAC_DRC_HEAVY_COMPRESSION,
- AMEDIAFORMAT_KEY_AAC_DRC_TARGET_REFERENCE_LEVEL,
- AMEDIAFORMAT_KEY_AAC_ENCODED_TARGET_LEVEL,
- AMEDIAFORMAT_KEY_AAC_MAX_OUTPUT_CHANNEL_COUNT,
- AMEDIAFORMAT_KEY_AAC_PROFILE,
- AMEDIAFORMAT_KEY_AAC_SBR_MODE,
- AMEDIAFORMAT_KEY_AUDIO_SESSION_ID,
- AMEDIAFORMAT_KEY_BITRATE_MODE,
- AMEDIAFORMAT_KEY_BIT_RATE,
- AMEDIAFORMAT_KEY_CAPTURE_RATE,
- AMEDIAFORMAT_KEY_CHANNEL_COUNT,
- AMEDIAFORMAT_KEY_CHANNEL_MASK,
- AMEDIAFORMAT_KEY_COLOR_FORMAT,
- AMEDIAFORMAT_KEY_COLOR_RANGE,
- AMEDIAFORMAT_KEY_COLOR_STANDARD,
- AMEDIAFORMAT_KEY_COLOR_TRANSFER,
- AMEDIAFORMAT_KEY_COMPLEXITY,
- AMEDIAFORMAT_KEY_CREATE_INPUT_SURFACE_SUSPENDED,
- AMEDIAFORMAT_KEY_CRYPTO_DEFAULT_IV_SIZE,
- AMEDIAFORMAT_KEY_CRYPTO_ENCRYPTED_BYTE_BLOCK,
- AMEDIAFORMAT_KEY_CRYPTO_MODE,
- AMEDIAFORMAT_KEY_CRYPTO_SKIP_BYTE_BLOCK,
- AMEDIAFORMAT_KEY_FLAC_COMPRESSION_LEVEL,
- AMEDIAFORMAT_KEY_GRID_COLUMNS,
- AMEDIAFORMAT_KEY_GRID_ROWS,
- AMEDIAFORMAT_KEY_HAPTIC_CHANNEL_COUNT,
- AMEDIAFORMAT_KEY_HEIGHT,
- AMEDIAFORMAT_KEY_INTRA_REFRESH_PERIOD,
- AMEDIAFORMAT_KEY_IS_ADTS,
- AMEDIAFORMAT_KEY_IS_AUTOSELECT,
- AMEDIAFORMAT_KEY_IS_DEFAULT,
- AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE,
- AMEDIAFORMAT_KEY_LATENCY,
- AMEDIAFORMAT_KEY_LEVEL,
- AMEDIAFORMAT_KEY_MAX_HEIGHT,
- AMEDIAFORMAT_KEY_MAX_INPUT_SIZE,
- AMEDIAFORMAT_KEY_MAX_WIDTH,
- AMEDIAFORMAT_KEY_PCM_ENCODING,
- AMEDIAFORMAT_KEY_PRIORITY,
- AMEDIAFORMAT_KEY_PROFILE,
- AMEDIAFORMAT_KEY_PUSH_BLANK_BUFFERS_ON_STOP,
- AMEDIAFORMAT_KEY_ROTATION,
- AMEDIAFORMAT_KEY_SAMPLE_RATE,
- AMEDIAFORMAT_KEY_SLICE_HEIGHT,
- AMEDIAFORMAT_KEY_STRIDE,
- AMEDIAFORMAT_KEY_TRACK_ID,
- AMEDIAFORMAT_KEY_WIDTH,
- AMEDIAFORMAT_KEY_DISPLAY_HEIGHT,
- AMEDIAFORMAT_KEY_DISPLAY_WIDTH,
- AMEDIAFORMAT_KEY_TEMPORAL_LAYER_ID,
- AMEDIAFORMAT_KEY_TILE_HEIGHT,
- AMEDIAFORMAT_KEY_TILE_WIDTH,
- AMEDIAFORMAT_KEY_TRACK_INDEX,
-};
-
-static const char *AMediaFormatKeyGroupInt64[] = {
- AMEDIAFORMAT_KEY_DURATION,
- AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER,
- AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER,
- AMEDIAFORMAT_KEY_TIME_US,
-};
-
-static const char *AMediaFormatKeyGroupString[] = {
- AMEDIAFORMAT_KEY_LANGUAGE,
- AMEDIAFORMAT_KEY_MIME,
- AMEDIAFORMAT_KEY_TEMPORAL_LAYERING,
-};
-
-static const char *AMediaFormatKeyGroupBuffer[] = {
- AMEDIAFORMAT_KEY_CRYPTO_IV,
- AMEDIAFORMAT_KEY_CRYPTO_KEY,
- AMEDIAFORMAT_KEY_HDR_STATIC_INFO,
- AMEDIAFORMAT_KEY_SEI,
- AMEDIAFORMAT_KEY_MPEG_USER_DATA,
-};
-
-static const char *AMediaFormatKeyGroupCsd[] = {
- AMEDIAFORMAT_KEY_CSD_0,
- AMEDIAFORMAT_KEY_CSD_1,
- AMEDIAFORMAT_KEY_CSD_2,
-};
-
-static const char *AMediaFormatKeyGroupRect[] = {
- AMEDIAFORMAT_KEY_DISPLAY_CROP,
-};
-
-static const char *AMediaFormatKeyGroupFloatInt32[] = {
- AMEDIAFORMAT_KEY_FRAME_RATE,
- AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
- AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER,
- AMEDIAFORMAT_KEY_OPERATING_RATE,
-};
-
-static status_t translateErrorCode(media_status_t err) {
- if (err == AMEDIA_OK) {
- return OK;
- } else if (err == AMEDIA_ERROR_END_OF_STREAM) {
- return ERROR_END_OF_STREAM;
- } else if (err == AMEDIA_ERROR_IO) {
- return ERROR_IO;
- } else if (err == AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
- return -EAGAIN;
- }
-
- ALOGE("ndk error code: %d", err);
- return UNKNOWN_ERROR;
-}
-
-static int32_t translateActionCode(int32_t actionCode) {
- if (AMediaCodecActionCode_isTransient(actionCode)) {
- return ACTION_CODE_TRANSIENT;
- } else if (AMediaCodecActionCode_isRecoverable(actionCode)) {
- return ACTION_CODE_RECOVERABLE;
- }
- return ACTION_CODE_FATAL;
-}
-
-static CryptoPlugin::Mode translateToCryptoPluginMode(cryptoinfo_mode_t mode) {
- CryptoPlugin::Mode ret = CryptoPlugin::kMode_Unencrypted;
- switch (mode) {
- case AMEDIACODECRYPTOINFO_MODE_AES_CTR: {
- ret = CryptoPlugin::kMode_AES_CTR;
- break;
- }
-
- case AMEDIACODECRYPTOINFO_MODE_AES_WV: {
- ret = CryptoPlugin::kMode_AES_WV;
- break;
- }
-
- case AMEDIACODECRYPTOINFO_MODE_AES_CBC: {
- ret = CryptoPlugin::kMode_AES_CBC;
- break;
- }
-
- default:
- break;
- }
-
- return ret;
-}
-
-static cryptoinfo_mode_t translateToCryptoInfoMode(CryptoPlugin::Mode mode) {
- cryptoinfo_mode_t ret = AMEDIACODECRYPTOINFO_MODE_CLEAR;
- switch (mode) {
- case CryptoPlugin::kMode_AES_CTR: {
- ret = AMEDIACODECRYPTOINFO_MODE_AES_CTR;
- break;
- }
-
- case CryptoPlugin::kMode_AES_WV: {
- ret = AMEDIACODECRYPTOINFO_MODE_AES_WV;
- break;
- }
-
- case CryptoPlugin::kMode_AES_CBC: {
- ret = AMEDIACODECRYPTOINFO_MODE_AES_CBC;
- break;
- }
-
- default:
- break;
- }
-
- return ret;
-}
-
-//////////// AMediaFormatWrapper
-// static
-sp<AMediaFormatWrapper> AMediaFormatWrapper::Create(const sp<AMessage> &message) {
- sp<AMediaFormatWrapper> aMediaFormat = new AMediaFormatWrapper();
-
- for (size_t i = 0; i < message->countEntries(); ++i) {
- AMessage::Type valueType;
- const char *key = message->getEntryNameAt(i, &valueType);
-
- switch (valueType) {
- case AMessage::kTypeInt32: {
- int32_t val;
- if (!message->findInt32(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setInt32(key, val);
- break;
- }
-
- case AMessage::kTypeInt64: {
- int64_t val;
- if (!message->findInt64(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setInt64(key, val);
- break;
- }
-
- case AMessage::kTypeFloat: {
- float val;
- if (!message->findFloat(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setFloat(key, val);
- break;
- }
-
- case AMessage::kTypeDouble: {
- double val;
- if (!message->findDouble(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setDouble(key, val);
- break;
- }
-
- case AMessage::kTypeSize: {
- size_t val;
- if (!message->findSize(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setSize(key, val);
- break;
- }
-
- case AMessage::kTypeRect: {
- int32_t left, top, right, bottom;
- if (!message->findRect(key, &left, &top, &right, &bottom)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setRect(key, left, top, right, bottom);
- break;
- }
-
- case AMessage::kTypeString: {
- AString val;
- if (!message->findString(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setString(key, val);
- break;
- }
-
- case AMessage::kTypeBuffer: {
- sp<ABuffer> val;
- if (!message->findBuffer(key, &val)) {
- ALOGE("AMediaFormatWrapper::Create: error at item %zu", i);
- continue;
- }
- aMediaFormat->setBuffer(key, val->data(), val->size());
- break;
- }
-
- default: {
- break;
- }
- }
- }
-
- return aMediaFormat;
-}
-
-AMediaFormatWrapper::AMediaFormatWrapper() {
- mAMediaFormat = AMediaFormat_new();
-}
-
-AMediaFormatWrapper::AMediaFormatWrapper(AMediaFormat *aMediaFormat)
- : mAMediaFormat(aMediaFormat) {
-}
-
-AMediaFormatWrapper::~AMediaFormatWrapper() {
- release();
-}
-
-status_t AMediaFormatWrapper::release() {
- if (mAMediaFormat != NULL) {
- media_status_t err = AMediaFormat_delete(mAMediaFormat);
- mAMediaFormat = NULL;
- return translateErrorCode(err);
- }
- return OK;
-}
-
-AMediaFormat *AMediaFormatWrapper::getAMediaFormat() const {
- return mAMediaFormat;
-}
-
-sp<AMessage> AMediaFormatWrapper::toAMessage() const {
- sp<AMessage> msg;
- writeToAMessage(msg);
- return msg;
-}
-
-void AMediaFormatWrapper::writeToAMessage(sp<AMessage> &msg) const {
- if (mAMediaFormat == NULL) {
- msg = NULL;
- }
-
- if (msg == NULL) {
- msg = new AMessage;
- }
- for (auto& key : AMediaFormatKeyGroupInt32) {
- int32_t val;
- if (getInt32(key, &val)) {
- msg->setInt32(key, val);
- }
- }
- for (auto& key : AMediaFormatKeyGroupInt64) {
- int64_t val;
- if (getInt64(key, &val)) {
- msg->setInt64(key, val);
- }
- }
- for (auto& key : AMediaFormatKeyGroupString) {
- AString val;
- if (getString(key, &val)) {
- msg->setString(key, val);
- }
- }
- for (auto& key : AMediaFormatKeyGroupBuffer) {
- void *data;
- size_t size;
- if (getBuffer(key, &data, &size)) {
- sp<ABuffer> buffer = ABuffer::CreateAsCopy(data, size);
- msg->setBuffer(key, buffer);
- }
- }
- for (auto& key : AMediaFormatKeyGroupCsd) {
- void *data;
- size_t size;
- if (getBuffer(key, &data, &size)) {
- sp<ABuffer> buffer = ABuffer::CreateAsCopy(data, size);
- buffer->meta()->setInt32(AMEDIAFORMAT_KEY_CSD, 1);
- buffer->meta()->setInt64(AMEDIAFORMAT_KEY_TIME_US, 0);
- msg->setBuffer(key, buffer);
- }
- }
- for (auto& key : AMediaFormatKeyGroupRect) {
- int32_t left, top, right, bottom;
- if (getRect(key, &left, &top, &right, &bottom)) {
- msg->setRect(key, left, top, right, bottom);
- }
- }
- for (auto& key : AMediaFormatKeyGroupFloatInt32) {
- float valFloat;
- if (getFloat(key, &valFloat)) {
- msg->setFloat(key, valFloat);
- } else {
- int32_t valInt32;
- if (getInt32(key, &valInt32)) {
- msg->setFloat(key, (float)valInt32);
- }
- }
- }
-}
-
-const char* AMediaFormatWrapper::toString() const {
- if (mAMediaFormat == NULL) {
- return NULL;
- }
- return AMediaFormat_toString(mAMediaFormat);
-}
-
-bool AMediaFormatWrapper::getInt32(const char *name, int32_t *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getInt32(mAMediaFormat, name, out);
-}
-
-bool AMediaFormatWrapper::getInt64(const char *name, int64_t *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getInt64(mAMediaFormat, name, out);
-}
-
-bool AMediaFormatWrapper::getFloat(const char *name, float *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getFloat(mAMediaFormat, name, out);
-}
-
-bool AMediaFormatWrapper::getDouble(const char *name, double *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getDouble(mAMediaFormat, name, out);
-}
-
-bool AMediaFormatWrapper::getSize(const char *name, size_t *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getSize(mAMediaFormat, name, out);
-}
-
-bool AMediaFormatWrapper::getRect(
- const char *name, int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getRect(mAMediaFormat, name, left, top, right, bottom);
-}
-
-bool AMediaFormatWrapper::getBuffer(const char *name, void** data, size_t *outSize) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- return AMediaFormat_getBuffer(mAMediaFormat, name, data, outSize);
-}
-
-bool AMediaFormatWrapper::getString(const char *name, AString *out) const {
- if (mAMediaFormat == NULL) {
- return false;
- }
- const char *outChar = NULL;
- bool ret = AMediaFormat_getString(mAMediaFormat, name, &outChar);
- if (ret) {
- *out = AString(outChar);
- }
- return ret;
-}
-
-void AMediaFormatWrapper::setInt32(const char* name, int32_t value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setInt32(mAMediaFormat, name, value);
- }
-}
-
-void AMediaFormatWrapper::setInt64(const char* name, int64_t value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setInt64(mAMediaFormat, name, value);
- }
-}
-
-void AMediaFormatWrapper::setFloat(const char* name, float value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setFloat(mAMediaFormat, name, value);
- }
-}
-
-void AMediaFormatWrapper::setDouble(const char* name, double value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setDouble(mAMediaFormat, name, value);
- }
-}
-
-void AMediaFormatWrapper::setSize(const char* name, size_t value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setSize(mAMediaFormat, name, value);
- }
-}
-
-void AMediaFormatWrapper::setRect(
- const char* name, int32_t left, int32_t top, int32_t right, int32_t bottom) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setRect(mAMediaFormat, name, left, top, right, bottom);
- }
-}
-
-void AMediaFormatWrapper::setString(const char* name, const AString &value) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setString(mAMediaFormat, name, value.c_str());
- }
-}
-
-void AMediaFormatWrapper::setBuffer(const char* name, void* data, size_t size) {
- if (mAMediaFormat != NULL) {
- AMediaFormat_setBuffer(mAMediaFormat, name, data, size);
- }
-}
-
-
-//////////// ANativeWindowWrapper
-ANativeWindowWrapper::ANativeWindowWrapper(ANativeWindow *aNativeWindow)
- : mANativeWindow(aNativeWindow) {
- if (aNativeWindow != NULL) {
- ANativeWindow_acquire(aNativeWindow);
- }
-}
-
-ANativeWindowWrapper::~ANativeWindowWrapper() {
- release();
-}
-
-status_t ANativeWindowWrapper::release() {
- if (mANativeWindow != NULL) {
- ANativeWindow_release(mANativeWindow);
- mANativeWindow = NULL;
- }
- return OK;
-}
-
-ANativeWindow *ANativeWindowWrapper::getANativeWindow() const {
- return mANativeWindow;
-}
-
-
-//////////// AMediaDrmWrapper
-AMediaDrmWrapper::AMediaDrmWrapper(const uint8_t uuid[16]) {
- mAMediaDrm = AMediaDrm_createByUUID(uuid);
-}
-
-AMediaDrmWrapper::AMediaDrmWrapper(AMediaDrm *aMediaDrm)
- : mAMediaDrm(aMediaDrm) {
-}
-
-AMediaDrmWrapper::~AMediaDrmWrapper() {
- release();
-}
-
-status_t AMediaDrmWrapper::release() {
- if (mAMediaDrm != NULL) {
- AMediaDrm_release(mAMediaDrm);
- mAMediaDrm = NULL;
- }
- return OK;
-}
-
-AMediaDrm *AMediaDrmWrapper::getAMediaDrm() const {
- return mAMediaDrm;
-}
-
-// static
-bool AMediaDrmWrapper::isCryptoSchemeSupported(
- const uint8_t uuid[16],
- const char *mimeType) {
- return AMediaDrm_isCryptoSchemeSupported(uuid, mimeType);
-}
-
-
-//////////// AMediaCryptoWrapper
-AMediaCryptoWrapper::AMediaCryptoWrapper(
- const uint8_t uuid[16], const void *initData, size_t initDataSize) {
- mAMediaCrypto = AMediaCrypto_new(uuid, initData, initDataSize);
-}
-
-AMediaCryptoWrapper::AMediaCryptoWrapper(AMediaCrypto *aMediaCrypto)
- : mAMediaCrypto(aMediaCrypto) {
-}
-
-AMediaCryptoWrapper::~AMediaCryptoWrapper() {
- release();
-}
-
-status_t AMediaCryptoWrapper::release() {
- if (mAMediaCrypto != NULL) {
- AMediaCrypto_delete(mAMediaCrypto);
- mAMediaCrypto = NULL;
- }
- return OK;
-}
-
-AMediaCrypto *AMediaCryptoWrapper::getAMediaCrypto() const {
- return mAMediaCrypto;
-}
-
-bool AMediaCryptoWrapper::isCryptoSchemeSupported(const uint8_t uuid[16]) {
- if (mAMediaCrypto == NULL) {
- return false;
- }
- return AMediaCrypto_isCryptoSchemeSupported(uuid);
-}
-
-bool AMediaCryptoWrapper::requiresSecureDecoderComponent(const char *mime) {
- if (mAMediaCrypto == NULL) {
- return false;
- }
- return AMediaCrypto_requiresSecureDecoderComponent(mime);
-}
-
-
-//////////// AMediaCodecCryptoInfoWrapper
-// static
-sp<AMediaCodecCryptoInfoWrapper> AMediaCodecCryptoInfoWrapper::Create(MetaDataBase &meta) {
-
- uint32_t type;
- const void *crypteddata;
- size_t cryptedsize;
-
- if (!meta.findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
- return NULL;
- }
-
- int numSubSamples = cryptedsize / sizeof(size_t);
-
- if (numSubSamples <= 0) {
- ALOGE("Create: INVALID numSubSamples: %d", numSubSamples);
- return NULL;
- }
-
- const void *cleardata;
- size_t clearsize;
- if (meta.findData(kKeyPlainSizes, &type, &cleardata, &clearsize)) {
- if (clearsize != cryptedsize) {
- // The two must be of the same length.
- ALOGE("Create: mismatch cryptedsize: %zu != clearsize: %zu", cryptedsize, clearsize);
- return NULL;
- }
- }
-
- const void *key;
- size_t keysize;
- if (meta.findData(kKeyCryptoKey, &type, &key, &keysize)) {
- if (keysize != kAESBlockSize) {
- // Keys must be 16 bytes in length.
- ALOGE("Create: Keys must be %zu bytes in length: %zu", kAESBlockSize, keysize);
- return NULL;
- }
- }
-
- const void *iv;
- size_t ivsize;
- if (meta.findData(kKeyCryptoIV, &type, &iv, &ivsize)) {
- if (ivsize != kAESBlockSize) {
- // IVs must be 16 bytes in length.
- ALOGE("Create: IV must be %zu bytes in length: %zu", kAESBlockSize, ivsize);
- return NULL;
- }
- }
-
- int32_t mode;
- if (!meta.findInt32(kKeyCryptoMode, &mode)) {
- mode = CryptoPlugin::kMode_AES_CTR;
- }
-
- return new AMediaCodecCryptoInfoWrapper(
- numSubSamples,
- (uint8_t*) key,
- (uint8_t*) iv,
- (CryptoPlugin::Mode)mode,
- (size_t*) cleardata,
- (size_t*) crypteddata);
-}
-
-AMediaCodecCryptoInfoWrapper::AMediaCodecCryptoInfoWrapper(
- int numsubsamples,
- uint8_t key[16],
- uint8_t iv[16],
- CryptoPlugin::Mode mode,
- size_t *clearbytes,
- size_t *encryptedbytes) {
- mAMediaCodecCryptoInfo =
- AMediaCodecCryptoInfo_new(numsubsamples,
- key,
- iv,
- translateToCryptoInfoMode(mode),
- clearbytes,
- encryptedbytes);
-}
-
-AMediaCodecCryptoInfoWrapper::AMediaCodecCryptoInfoWrapper(
- AMediaCodecCryptoInfo *aMediaCodecCryptoInfo)
- : mAMediaCodecCryptoInfo(aMediaCodecCryptoInfo) {
-}
-
-AMediaCodecCryptoInfoWrapper::~AMediaCodecCryptoInfoWrapper() {
- release();
-}
-
-status_t AMediaCodecCryptoInfoWrapper::release() {
- if (mAMediaCodecCryptoInfo != NULL) {
- media_status_t err = AMediaCodecCryptoInfo_delete(mAMediaCodecCryptoInfo);
- mAMediaCodecCryptoInfo = NULL;
- return translateErrorCode(err);
- }
- return OK;
-}
-
-AMediaCodecCryptoInfo *AMediaCodecCryptoInfoWrapper::getAMediaCodecCryptoInfo() const {
- return mAMediaCodecCryptoInfo;
-}
-
-void AMediaCodecCryptoInfoWrapper::setPattern(CryptoPlugin::Pattern *pattern) {
- if (mAMediaCodecCryptoInfo == NULL || pattern == NULL) {
- return;
- }
- cryptoinfo_pattern_t ndkPattern = {(int32_t)pattern->mEncryptBlocks,
- (int32_t)pattern->mSkipBlocks };
- return AMediaCodecCryptoInfo_setPattern(mAMediaCodecCryptoInfo, &ndkPattern);
-}
-
-size_t AMediaCodecCryptoInfoWrapper::getNumSubSamples() {
- if (mAMediaCodecCryptoInfo == NULL) {
- return 0;
- }
- return AMediaCodecCryptoInfo_getNumSubSamples(mAMediaCodecCryptoInfo);
-}
-
-status_t AMediaCodecCryptoInfoWrapper::getKey(uint8_t *dst) {
- if (mAMediaCodecCryptoInfo == NULL) {
- return DEAD_OBJECT;
- }
- if (dst == NULL) {
- return BAD_VALUE;
- }
- return translateErrorCode(
- AMediaCodecCryptoInfo_getKey(mAMediaCodecCryptoInfo, dst));
-}
-
-status_t AMediaCodecCryptoInfoWrapper::getIV(uint8_t *dst) {
- if (mAMediaCodecCryptoInfo == NULL) {
- return DEAD_OBJECT;
- }
- if (dst == NULL) {
- return BAD_VALUE;
- }
- return translateErrorCode(
- AMediaCodecCryptoInfo_getIV(mAMediaCodecCryptoInfo, dst));
-}
-
-CryptoPlugin::Mode AMediaCodecCryptoInfoWrapper::getMode() {
- if (mAMediaCodecCryptoInfo == NULL) {
- return CryptoPlugin::kMode_Unencrypted;
- }
- return translateToCryptoPluginMode(
- AMediaCodecCryptoInfo_getMode(mAMediaCodecCryptoInfo));
-}
-
-status_t AMediaCodecCryptoInfoWrapper::getClearBytes(size_t *dst) {
- if (mAMediaCodecCryptoInfo == NULL) {
- return DEAD_OBJECT;
- }
- if (dst == NULL) {
- return BAD_VALUE;
- }
- return translateErrorCode(
- AMediaCodecCryptoInfo_getClearBytes(mAMediaCodecCryptoInfo, dst));
-}
-
-status_t AMediaCodecCryptoInfoWrapper::getEncryptedBytes(size_t *dst) {
- if (mAMediaCodecCryptoInfo == NULL) {
- return DEAD_OBJECT;
- }
- if (dst == NULL) {
- return BAD_VALUE;
- }
- return translateErrorCode(
- AMediaCodecCryptoInfo_getEncryptedBytes(mAMediaCodecCryptoInfo, dst));
-}
-
-
-//////////// AMediaCodecWrapper
-// static
-sp<AMediaCodecWrapper> AMediaCodecWrapper::CreateCodecByName(const AString &name) {
- AMediaCodec *aMediaCodec = AMediaCodec_createCodecByName(name.c_str());
- return new AMediaCodecWrapper(aMediaCodec);
-}
-
-// static
-sp<AMediaCodecWrapper> AMediaCodecWrapper::CreateDecoderByType(const AString &mimeType) {
- AMediaCodec *aMediaCodec = AMediaCodec_createDecoderByType(mimeType.c_str());
- return new AMediaCodecWrapper(aMediaCodec);
-}
-
-// static
-void AMediaCodecWrapper::OnInputAvailableCB(
- AMediaCodec * /* aMediaCodec */,
- void *userdata,
- int32_t index) {
- ALOGV("OnInputAvailableCB: index(%d)", index);
- sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
- msg->setInt32("callbackID", CB_INPUT_AVAILABLE);
- msg->setInt32("index", index);
- msg->post();
-}
-
-// static
-void AMediaCodecWrapper::OnOutputAvailableCB(
- AMediaCodec * /* aMediaCodec */,
- void *userdata,
- int32_t index,
- AMediaCodecBufferInfo *bufferInfo) {
- ALOGV("OnOutputAvailableCB: index(%d), (%d, %d, %lld, 0x%x)",
- index, bufferInfo->offset, bufferInfo->size,
- (long long)bufferInfo->presentationTimeUs, bufferInfo->flags);
- sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
- msg->setInt32("callbackID", CB_OUTPUT_AVAILABLE);
- msg->setInt32("index", index);
- msg->setSize("offset", (size_t)(bufferInfo->offset));
- msg->setSize("size", (size_t)(bufferInfo->size));
- msg->setInt64("timeUs", bufferInfo->presentationTimeUs);
- msg->setInt32("flags", (int32_t)(bufferInfo->flags));
- msg->post();
-}
-
-// static
-void AMediaCodecWrapper::OnFormatChangedCB(
- AMediaCodec * /* aMediaCodec */,
- void *userdata,
- AMediaFormat *format) {
- sp<AMediaFormatWrapper> formatWrapper = new AMediaFormatWrapper(format);
- sp<AMessage> outputFormat = formatWrapper->toAMessage();
- ALOGV("OnFormatChangedCB: format(%s)", outputFormat->debugString().c_str());
-
- sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
- msg->setInt32("callbackID", CB_OUTPUT_FORMAT_CHANGED);
- msg->setMessage("format", outputFormat);
- msg->post();
-}
-
-// static
-void AMediaCodecWrapper::OnErrorCB(
- AMediaCodec * /* aMediaCodec */,
- void *userdata,
- media_status_t err,
- int32_t actionCode,
- const char *detail) {
- ALOGV("OnErrorCB: err(%d), actionCode(%d), detail(%s)", err, actionCode, detail);
- sp<AMessage> msg = sp<AMessage>((AMessage *)userdata)->dup();
- msg->setInt32("callbackID", CB_ERROR);
- msg->setInt32("err", translateErrorCode(err));
- msg->setInt32("actionCode", translateActionCode(actionCode));
- msg->setString("detail", detail);
- msg->post();
-}
-
-AMediaCodecWrapper::AMediaCodecWrapper(AMediaCodec *aMediaCodec)
- : mAMediaCodec(aMediaCodec) {
-}
-
-AMediaCodecWrapper::~AMediaCodecWrapper() {
- release();
-}
-
-status_t AMediaCodecWrapper::release() {
- if (mAMediaCodec != NULL) {
- AMediaCodecOnAsyncNotifyCallback aCB = {};
- AMediaCodec_setAsyncNotifyCallback(mAMediaCodec, aCB, NULL);
- mCallback = NULL;
-
- media_status_t err = AMediaCodec_delete(mAMediaCodec);
- mAMediaCodec = NULL;
- return translateErrorCode(err);
- }
- return OK;
-}
-
-AMediaCodec *AMediaCodecWrapper::getAMediaCodec() const {
- return mAMediaCodec;
-}
-
-status_t AMediaCodecWrapper::getName(AString *outComponentName) const {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- char *name = NULL;
- media_status_t err = AMediaCodec_getName(mAMediaCodec, &name);
- if (err != AMEDIA_OK) {
- return translateErrorCode(err);
- }
-
- *outComponentName = AString(name);
- AMediaCodec_releaseName(mAMediaCodec, name);
- return OK;
-}
-
-status_t AMediaCodecWrapper::configure(
- const sp<AMediaFormatWrapper> &format,
- const sp<ANativeWindowWrapper> &nww,
- const sp<AMediaCryptoWrapper> &crypto,
- uint32_t flags) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
-
- media_status_t err = AMediaCodec_configure(
- mAMediaCodec,
- format->getAMediaFormat(),
- (nww == NULL ? NULL : nww->getANativeWindow()),
- crypto == NULL ? NULL : crypto->getAMediaCrypto(),
- flags);
-
- return translateErrorCode(err);
-}
-
-status_t AMediaCodecWrapper::setCallback(const sp<AMessage> &callback) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
-
- mCallback = callback;
-
- AMediaCodecOnAsyncNotifyCallback aCB = {
- OnInputAvailableCB,
- OnOutputAvailableCB,
- OnFormatChangedCB,
- OnErrorCB
- };
-
- return translateErrorCode(
- AMediaCodec_setAsyncNotifyCallback(mAMediaCodec, aCB, callback.get()));
-}
-
-status_t AMediaCodecWrapper::releaseCrypto() {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaCodec_releaseCrypto(mAMediaCodec));
-}
-
-status_t AMediaCodecWrapper::start() {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaCodec_start(mAMediaCodec));
-}
-
-status_t AMediaCodecWrapper::stop() {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaCodec_stop(mAMediaCodec));
-}
-
-status_t AMediaCodecWrapper::flush() {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaCodec_flush(mAMediaCodec));
-}
-
-uint8_t* AMediaCodecWrapper::getInputBuffer(size_t idx, size_t *out_size) {
- if (mAMediaCodec == NULL) {
- return NULL;
- }
- return AMediaCodec_getInputBuffer(mAMediaCodec, idx, out_size);
-}
-
-uint8_t* AMediaCodecWrapper::getOutputBuffer(size_t idx, size_t *out_size) {
- if (mAMediaCodec == NULL) {
- return NULL;
- }
- return AMediaCodec_getOutputBuffer(mAMediaCodec, idx, out_size);
-}
-
-status_t AMediaCodecWrapper::queueInputBuffer(
- size_t idx,
- size_t offset,
- size_t size,
- uint64_t time,
- uint32_t flags) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_queueInputBuffer(mAMediaCodec, idx, offset, size, time, flags));
-}
-
-status_t AMediaCodecWrapper::queueSecureInputBuffer(
- size_t idx,
- size_t offset,
- sp<AMediaCodecCryptoInfoWrapper> &codecCryptoInfo,
- uint64_t time,
- uint32_t flags) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_queueSecureInputBuffer(
- mAMediaCodec,
- idx,
- offset,
- codecCryptoInfo->getAMediaCodecCryptoInfo(),
- time,
- flags));
-}
-
-sp<AMediaFormatWrapper> AMediaCodecWrapper::getOutputFormat() {
- if (mAMediaCodec == NULL) {
- return NULL;
- }
- return new AMediaFormatWrapper(AMediaCodec_getOutputFormat(mAMediaCodec));
-}
-
-sp<AMediaFormatWrapper> AMediaCodecWrapper::getInputFormat() {
- if (mAMediaCodec == NULL) {
- return NULL;
- }
- return new AMediaFormatWrapper(AMediaCodec_getInputFormat(mAMediaCodec));
-}
-
-status_t AMediaCodecWrapper::releaseOutputBuffer(size_t idx, bool render) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_releaseOutputBuffer(mAMediaCodec, idx, render));
-}
-
-status_t AMediaCodecWrapper::setOutputSurface(const sp<ANativeWindowWrapper> &nww) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_setOutputSurface(mAMediaCodec,
- (nww == NULL ? NULL : nww->getANativeWindow())));
-}
-
-status_t AMediaCodecWrapper::releaseOutputBufferAtTime(size_t idx, int64_t timestampNs) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_releaseOutputBufferAtTime(mAMediaCodec, idx, timestampNs));
-}
-
-status_t AMediaCodecWrapper::setParameters(const sp<AMediaFormatWrapper> ¶ms) {
- if (mAMediaCodec == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(
- AMediaCodec_setParameters(mAMediaCodec, params->getAMediaFormat()));
-}
-
-//////////// AMediaExtractorWrapper
-
-AMediaExtractorWrapper::AMediaExtractorWrapper(AMediaExtractor *aMediaExtractor)
- : mAMediaExtractor(aMediaExtractor) {
-}
-
-AMediaExtractorWrapper::~AMediaExtractorWrapper() {
- release();
-}
-
-status_t AMediaExtractorWrapper::release() {
- if (mAMediaExtractor != NULL) {
- media_status_t err = AMediaExtractor_delete(mAMediaExtractor);
- mAMediaExtractor = NULL;
- return translateErrorCode(err);
- }
- return OK;
-}
-
-AMediaExtractor *AMediaExtractorWrapper::getAMediaExtractor() const {
- return mAMediaExtractor;
-}
-
-status_t AMediaExtractorWrapper::setDataSource(int fd, off64_t offset, off64_t length) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaExtractor_setDataSourceFd(
- mAMediaExtractor, fd, offset, length));
-}
-
-status_t AMediaExtractorWrapper::setDataSource(const char *location) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaExtractor_setDataSource(mAMediaExtractor, location));
-}
-
-status_t AMediaExtractorWrapper::setDataSource(AMediaDataSource *source) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaExtractor_setDataSourceCustom(mAMediaExtractor, source));
-}
-
-size_t AMediaExtractorWrapper::getTrackCount() {
- if (mAMediaExtractor == NULL) {
- return 0;
- }
- return AMediaExtractor_getTrackCount(mAMediaExtractor);
-}
-
-sp<AMediaFormatWrapper> AMediaExtractorWrapper::getFormat() {
- if (mAMediaExtractor == NULL) {
- return NULL;
- }
- return new AMediaFormatWrapper(AMediaExtractor_getFileFormat(mAMediaExtractor));
-}
-
-sp<AMediaFormatWrapper> AMediaExtractorWrapper::getTrackFormat(size_t idx) {
- if (mAMediaExtractor == NULL) {
- return NULL;
- }
- return new AMediaFormatWrapper(AMediaExtractor_getTrackFormat(mAMediaExtractor, idx));
-}
-
-status_t AMediaExtractorWrapper::selectTrack(size_t idx) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaExtractor_selectTrack(mAMediaExtractor, idx));
-}
-
-status_t AMediaExtractorWrapper::unselectTrack(size_t idx) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- return translateErrorCode(AMediaExtractor_unselectTrack(mAMediaExtractor, idx));
-}
-
-status_t AMediaExtractorWrapper::selectSingleTrack(size_t idx) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- for (size_t i = 0; i < AMediaExtractor_getTrackCount(mAMediaExtractor); ++i) {
- if (i == idx) {
- media_status_t err = AMediaExtractor_selectTrack(mAMediaExtractor, i);
- if (err != AMEDIA_OK) {
- return translateErrorCode(err);
- }
- } else {
- media_status_t err = AMediaExtractor_unselectTrack(mAMediaExtractor, i);
- if (err != AMEDIA_OK) {
- return translateErrorCode(err);
- }
- }
- }
- return OK;
-}
-
-ssize_t AMediaExtractorWrapper::readSampleData(const sp<ABuffer> &buffer) {
- if (mAMediaExtractor == NULL) {
- return -1;
- }
- return AMediaExtractor_readSampleData(mAMediaExtractor, buffer->data(), buffer->capacity());
-}
-
-ssize_t AMediaExtractorWrapper::getSampleSize() {
- if (mAMediaExtractor == NULL) {
- return 0;
- }
- return AMediaExtractor_getSampleSize(mAMediaExtractor);
-}
-
-uint32_t AMediaExtractorWrapper::getSampleFlags() {
- if (mAMediaExtractor == NULL) {
- return 0;
- }
- return AMediaExtractor_getSampleFlags(mAMediaExtractor);
-}
-
-int AMediaExtractorWrapper::getSampleTrackIndex() {
- if (mAMediaExtractor == NULL) {
- return -1;
- }
- return AMediaExtractor_getSampleTrackIndex(mAMediaExtractor);
-}
-
-int64_t AMediaExtractorWrapper::getSampleTime() {
- if (mAMediaExtractor == NULL) {
- return -1;
- }
- return AMediaExtractor_getSampleTime(mAMediaExtractor);
-}
-
-status_t AMediaExtractorWrapper::getSampleFormat(sp<AMediaFormatWrapper> &formatWrapper) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
- AMediaFormat *format = AMediaFormat_new();
- formatWrapper = new AMediaFormatWrapper(format);
- return translateErrorCode(AMediaExtractor_getSampleFormat(mAMediaExtractor, format));
-}
-
-int64_t AMediaExtractorWrapper::getCachedDuration() {
- if (mAMediaExtractor == NULL) {
- return -1;
- }
- return AMediaExtractor_getCachedDuration(mAMediaExtractor);
-}
-
-bool AMediaExtractorWrapper::advance() {
- if (mAMediaExtractor == NULL) {
- return false;
- }
- return AMediaExtractor_advance(mAMediaExtractor);
-}
-
-status_t AMediaExtractorWrapper::seekTo(int64_t seekPosUs, MediaSource::ReadOptions::SeekMode mode) {
- if (mAMediaExtractor == NULL) {
- return DEAD_OBJECT;
- }
-
- SeekMode aMode;
- switch (mode) {
- case MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC: {
- aMode = AMEDIAEXTRACTOR_SEEK_PREVIOUS_SYNC;
- break;
- }
- case MediaSource::ReadOptions::SEEK_NEXT_SYNC: {
- aMode = AMEDIAEXTRACTOR_SEEK_NEXT_SYNC;
- break;
- }
- default: {
- aMode = AMEDIAEXTRACTOR_SEEK_CLOSEST_SYNC;
- break;
- }
- }
- return AMediaExtractor_seekTo(mAMediaExtractor, seekPosUs, aMode);
-}
-
-PsshInfo* AMediaExtractorWrapper::getPsshInfo() {
- if (mAMediaExtractor == NULL) {
- return NULL;
- }
- return AMediaExtractor_getPsshInfo(mAMediaExtractor);
-}
-
-sp<AMediaCodecCryptoInfoWrapper> AMediaExtractorWrapper::getSampleCryptoInfo() {
- if (mAMediaExtractor == NULL) {
- return NULL;
- }
- AMediaCodecCryptoInfo *cryptoInfo = AMediaExtractor_getSampleCryptoInfo(mAMediaExtractor);
- if (cryptoInfo == NULL) {
- return NULL;
- }
- return new AMediaCodecCryptoInfoWrapper(cryptoInfo);
-}
-
-AMediaDataSourceWrapper::AMediaDataSourceWrapper(const sp<DataSource> &dataSource)
- : mDataSource(dataSource),
- mAMediaDataSource(convertDataSourceToAMediaDataSource(dataSource)) {
-}
-
-AMediaDataSourceWrapper::AMediaDataSourceWrapper(AMediaDataSource *aDataSource)
- : mDataSource(NULL),
- mAMediaDataSource(aDataSource) {
-}
-
-AMediaDataSourceWrapper::~AMediaDataSourceWrapper() {
- if (mAMediaDataSource == NULL) {
- return;
- }
- AMediaDataSource_close(mAMediaDataSource);
- AMediaDataSource_delete(mAMediaDataSource);
- mAMediaDataSource = NULL;
-}
-
-AMediaDataSource* AMediaDataSourceWrapper::getAMediaDataSource() {
- return mAMediaDataSource;
-}
-
-void AMediaDataSourceWrapper::close() {
- AMediaDataSource_close(mAMediaDataSource);
-}
-
-} // namespace android
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
deleted file mode 100644
index 2bf0802..0000000
--- a/media/libmedia/Visualizer.cpp
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
-**
-** Copyright 2010, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "Visualizer"
-#include <utils/Log.h>
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <limits.h>
-
-#include <media/Visualizer.h>
-#include <audio_utils/fixedfft.h>
-#include <utils/Thread.h>
-
-namespace android {
-
-// ---------------------------------------------------------------------------
-
-Visualizer::Visualizer (const String16& opPackageName,
- int32_t priority,
- effect_callback_t cbf,
- void* user,
- audio_session_t sessionId)
- : AudioEffect(SL_IID_VISUALIZATION, opPackageName, NULL, priority, cbf, user, sessionId),
- mCaptureRate(CAPTURE_RATE_DEF),
- mCaptureSize(CAPTURE_SIZE_DEF),
- mSampleRate(44100000),
- mScalingMode(VISUALIZER_SCALING_MODE_NORMALIZED),
- mMeasurementMode(MEASUREMENT_MODE_NONE),
- mCaptureCallBack(NULL),
- mCaptureCbkUser(NULL)
-{
- initCaptureSize();
-}
-
-Visualizer::~Visualizer()
-{
- ALOGV("Visualizer::~Visualizer()");
- setEnabled(false);
- setCaptureCallBack(NULL, NULL, 0, 0);
-}
-
-void Visualizer::release()
-{
- ALOGV("Visualizer::release()");
- setEnabled(false);
- Mutex::Autolock _l(mCaptureLock);
-
- mCaptureThread.clear();
- mCaptureCallBack = NULL;
- mCaptureCbkUser = NULL;
- mCaptureFlags = 0;
- mCaptureRate = 0;
-}
-
-status_t Visualizer::setEnabled(bool enabled)
-{
- Mutex::Autolock _l(mCaptureLock);
-
- sp<CaptureThread> t = mCaptureThread;
- if (t != 0) {
- if (enabled) {
- if (t->exitPending()) {
- mCaptureLock.unlock();
- if (t->requestExitAndWait() == WOULD_BLOCK) {
- mCaptureLock.lock();
- ALOGE("Visualizer::enable() called from thread");
- return INVALID_OPERATION;
- }
- mCaptureLock.lock();
- }
- }
- t->mLock.lock();
- }
-
- status_t status = AudioEffect::setEnabled(enabled);
-
- if (t != 0) {
- if (enabled && status == NO_ERROR) {
- t->run("Visualizer");
- } else {
- t->requestExit();
- }
- }
-
- if (t != 0) {
- t->mLock.unlock();
- }
-
- return status;
-}
-
-status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
- uint32_t rate)
-{
- if (rate > CAPTURE_RATE_MAX) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mCaptureLock);
-
- if (mEnabled) {
- return INVALID_OPERATION;
- }
-
- if (mCaptureThread != 0) {
- mCaptureLock.unlock();
- mCaptureThread->requestExitAndWait();
- mCaptureLock.lock();
- }
-
- mCaptureThread.clear();
- mCaptureCallBack = cbk;
- mCaptureCbkUser = user;
- mCaptureFlags = flags;
- mCaptureRate = rate;
-
- if (cbk != NULL) {
- mCaptureThread = new CaptureThread(this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
- }
- ALOGV("setCaptureCallBack() rate: %d thread %p flags 0x%08x",
- rate, mCaptureThread.get(), mCaptureFlags);
- return NO_ERROR;
-}
-
-status_t Visualizer::setCaptureSize(uint32_t size)
-{
- if (size > VISUALIZER_CAPTURE_SIZE_MAX ||
- size < VISUALIZER_CAPTURE_SIZE_MIN ||
- popcount(size) != 1) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock _l(mCaptureLock);
- if (mEnabled) {
- return INVALID_OPERATION;
- }
-
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- *(int32_t *)p->data = VISUALIZER_PARAM_CAPTURE_SIZE;
- *((int32_t *)p->data + 1)= size;
- status_t status = setParameter(p);
-
- ALOGV("setCaptureSize size %d status %d p->status %d", size, status, p->status);
-
- if (status == NO_ERROR) {
- status = p->status;
- if (status == NO_ERROR) {
- mCaptureSize = size;
- }
- }
-
- return status;
-}
-
-status_t Visualizer::setScalingMode(uint32_t mode) {
- if ((mode != VISUALIZER_SCALING_MODE_NORMALIZED)
- && (mode != VISUALIZER_SCALING_MODE_AS_PLAYED)) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock _l(mCaptureLock);
-
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- *(int32_t *)p->data = VISUALIZER_PARAM_SCALING_MODE;
- *((int32_t *)p->data + 1)= mode;
- status_t status = setParameter(p);
-
- ALOGV("setScalingMode mode %d status %d p->status %d", mode, status, p->status);
-
- if (status == NO_ERROR) {
- status = p->status;
- if (status == NO_ERROR) {
- mScalingMode = mode;
- }
- }
-
- return status;
-}
-
-status_t Visualizer::setMeasurementMode(uint32_t mode) {
- if ((mode != MEASUREMENT_MODE_NONE)
- //Note: needs to be handled as a mask when more measurement modes are added
- && ((mode & MEASUREMENT_MODE_PEAK_RMS) != mode)) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock _l(mCaptureLock);
-
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- *(int32_t *)p->data = VISUALIZER_PARAM_MEASUREMENT_MODE;
- *((int32_t *)p->data + 1)= mode;
- status_t status = setParameter(p);
-
- ALOGV("setMeasurementMode mode %d status %d p->status %d", mode, status, p->status);
-
- if (status == NO_ERROR) {
- status = p->status;
- if (status == NO_ERROR) {
- mMeasurementMode = mode;
- }
- }
- return status;
-}
-
-status_t Visualizer::getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements) {
- if (mMeasurementMode == MEASUREMENT_MODE_NONE) {
- ALOGE("Cannot retrieve int measurements, no measurement mode set");
- return INVALID_OPERATION;
- }
- if (!(mMeasurementMode & type)) {
- // measurement type has not been set on this Visualizer
- ALOGE("Cannot retrieve int measurements, requested measurement mode 0x%x not set(0x%x)",
- type, mMeasurementMode);
- return INVALID_OPERATION;
- }
- // only peak+RMS measurement supported
- if ((type != MEASUREMENT_MODE_PEAK_RMS)
- // for peak+RMS measurement, the results are 2 int32_t values
- || (number != 2)) {
- ALOGE("Cannot retrieve int measurements, MEASUREMENT_MODE_PEAK_RMS returns 2 ints, not %d",
- number);
- return BAD_VALUE;
- }
-
- status_t status = NO_ERROR;
- if (mEnabled) {
- uint32_t replySize = number * sizeof(int32_t);
- status = command(VISUALIZER_CMD_MEASURE,
- sizeof(uint32_t) /*cmdSize*/,
- &type /*cmdData*/,
- &replySize, measurements);
- ALOGV("getMeasurements() command returned %d", status);
- if ((status == NO_ERROR) && (replySize == 0)) {
- status = NOT_ENOUGH_DATA;
- }
- } else {
- ALOGV("getMeasurements() disabled");
- return INVALID_OPERATION;
- }
- return status;
-}
-
-status_t Visualizer::getWaveForm(uint8_t *waveform)
-{
- if (waveform == NULL) {
- return BAD_VALUE;
- }
- if (mCaptureSize == 0) {
- return NO_INIT;
- }
-
- status_t status = NO_ERROR;
- if (mEnabled) {
- uint32_t replySize = mCaptureSize;
- status = command(VISUALIZER_CMD_CAPTURE, 0, NULL, &replySize, waveform);
- ALOGV("getWaveForm() command returned %d", status);
- if ((status == NO_ERROR) && (replySize == 0)) {
- status = NOT_ENOUGH_DATA;
- }
- } else {
- ALOGV("getWaveForm() disabled");
- memset(waveform, 0x80, mCaptureSize);
- }
- return status;
-}
-
-status_t Visualizer::getFft(uint8_t *fft)
-{
- if (fft == NULL) {
- return BAD_VALUE;
- }
- if (mCaptureSize == 0) {
- return NO_INIT;
- }
-
- status_t status = NO_ERROR;
- if (mEnabled) {
- uint8_t buf[mCaptureSize];
- status = getWaveForm(buf);
- if (status == NO_ERROR) {
- status = doFft(fft, buf);
- }
- } else {
- memset(fft, 0, mCaptureSize);
- }
- return status;
-}
-
-status_t Visualizer::doFft(uint8_t *fft, uint8_t *waveform)
-{
- int32_t workspace[mCaptureSize >> 1];
- int32_t nonzero = 0;
-
- for (uint32_t i = 0; i < mCaptureSize; i += 2) {
- workspace[i >> 1] =
- ((waveform[i] ^ 0x80) << 24) | ((waveform[i + 1] ^ 0x80) << 8);
- nonzero |= workspace[i >> 1];
- }
-
- if (nonzero) {
- fixed_fft_real(mCaptureSize >> 1, workspace);
- }
-
- for (uint32_t i = 0; i < mCaptureSize; i += 2) {
- short tmp = workspace[i >> 1] >> 21;
- while (tmp > 127 || tmp < -128) tmp >>= 1;
- fft[i] = tmp;
- tmp = workspace[i >> 1];
- tmp >>= 5;
- while (tmp > 127 || tmp < -128) tmp >>= 1;
- fft[i + 1] = tmp;
- }
-
- return NO_ERROR;
-}
-
-void Visualizer::periodicCapture()
-{
- Mutex::Autolock _l(mCaptureLock);
- ALOGV("periodicCapture() %p mCaptureCallBack %p mCaptureFlags 0x%08x",
- this, mCaptureCallBack, mCaptureFlags);
- if (mCaptureCallBack != NULL &&
- (mCaptureFlags & (CAPTURE_WAVEFORM|CAPTURE_FFT)) &&
- mCaptureSize != 0) {
- uint8_t waveform[mCaptureSize];
- status_t status = getWaveForm(waveform);
- if (status != NO_ERROR) {
- return;
- }
- uint8_t fft[mCaptureSize];
- if (mCaptureFlags & CAPTURE_FFT) {
- status = doFft(fft, waveform);
- }
- if (status != NO_ERROR) {
- return;
- }
- uint8_t *wavePtr = NULL;
- uint8_t *fftPtr = NULL;
- uint32_t waveSize = 0;
- uint32_t fftSize = 0;
- if (mCaptureFlags & CAPTURE_WAVEFORM) {
- wavePtr = waveform;
- waveSize = mCaptureSize;
- }
- if (mCaptureFlags & CAPTURE_FFT) {
- fftPtr = fft;
- fftSize = mCaptureSize;
- }
- mCaptureCallBack(mCaptureCbkUser, waveSize, wavePtr, fftSize, fftPtr, mSampleRate);
- }
-}
-
-uint32_t Visualizer::initCaptureSize()
-{
- uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + 2];
- effect_param_t *p = (effect_param_t *)buf32;
-
- p->psize = sizeof(uint32_t);
- p->vsize = sizeof(uint32_t);
- *(int32_t *)p->data = VISUALIZER_PARAM_CAPTURE_SIZE;
- status_t status = getParameter(p);
-
- if (status == NO_ERROR) {
- status = p->status;
- }
-
- uint32_t size = 0;
- if (status == NO_ERROR) {
- size = *((int32_t *)p->data + 1);
- }
- mCaptureSize = size;
-
- ALOGV("initCaptureSize size %d status %d", mCaptureSize, status);
-
- return size;
-}
-
-void Visualizer::controlStatusChanged(bool controlGranted) {
- if (controlGranted) {
- // this Visualizer instance regained control of the effect, reset the scaling mode
- // and capture size as has been cached through it.
- ALOGV("controlStatusChanged(true) causes effect parameter reset:");
- ALOGV(" scaling mode reset to %d", mScalingMode);
- setScalingMode(mScalingMode);
- ALOGV(" capture size reset to %d", mCaptureSize);
- setCaptureSize(mCaptureSize);
- }
- AudioEffect::controlStatusChanged(controlGranted);
-}
-
-//-------------------------------------------------------------------------
-
-Visualizer::CaptureThread::CaptureThread(Visualizer* receiver, uint32_t captureRate,
- bool bCanCallJava)
- : Thread(bCanCallJava), mReceiver(receiver)
-{
- mSleepTimeUs = 1000000000 / captureRate;
- ALOGV("CaptureThread cstor %p captureRate %d mSleepTimeUs %d", this, captureRate, mSleepTimeUs);
-}
-
-bool Visualizer::CaptureThread::threadLoop()
-{
- ALOGV("CaptureThread %p enter", this);
- sp<Visualizer> receiver = mReceiver.promote();
- if (receiver == NULL) {
- return false;
- }
- while (!exitPending())
- {
- usleep(mSleepTimeUs);
- receiver->periodicCapture();
- }
- ALOGV("CaptureThread %p exiting", this);
- return false;
-}
-
-} // namespace android
diff --git a/media/libmedia/aidl/android/media/IResourceManagerClient.aidl b/media/libmedia/aidl/android/media/IResourceManagerClient.aidl
new file mode 100644
index 0000000..4c3ef47
--- /dev/null
+++ b/media/libmedia/aidl/android/media/IResourceManagerClient.aidl
@@ -0,0 +1,39 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * IResourceManagerClient interface for the ResourceManagerService to
+ * call the client.
+ *
+ * {@hide}
+ */
+interface IResourceManagerClient {
+ /**
+ * Instruct the client to reclaim its resources.
+ *
+ * @return true if the reclaim was successful and false otherwise.
+ */
+ boolean reclaimResource();
+
+ /**
+ * Retrieve the name of the client.
+ *
+ * @return name of the client.
+ */
+ @utf8InCpp String getName();
+}
diff --git a/media/libmedia/aidl/android/media/IResourceManagerService.aidl b/media/libmedia/aidl/android/media/IResourceManagerService.aidl
new file mode 100644
index 0000000..3e6f8db
--- /dev/null
+++ b/media/libmedia/aidl/android/media/IResourceManagerService.aidl
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.IResourceManagerClient;
+import android.media.MediaResourceParcel;
+import android.media.MediaResourcePolicyParcel;
+
+/**
+ * ResourceManagerService interface that keeps track of media resource
+ * owned by clients, and reclaims resources based on configured policies
+ * when necessary.
+ *
+ * {@hide}
+ */
+interface IResourceManagerService {
+ const @utf8InCpp String kPolicySupportsMultipleSecureCodecs
+ = "supports-multiple-secure-codecs";
+ const @utf8InCpp String kPolicySupportsSecureWithNonSecureCodec
+ = "supports-secure-with-non-secure-codec";
+
+ /**
+ * Configure the ResourceManagerService to adopted particular policies when
+ * managing the resources.
+ *
+ * @param policies an array of policies to be adopted.
+ */
+ void config(in MediaResourcePolicyParcel[] policies);
+
+ /**
+ * Add a client to a process with a list of resources.
+ *
+ * @param pid pid of the client.
+ * @param uid uid of the client.
+ * @param clientId an identifier that uniquely identifies the client within the pid.
+ * @param client interface for the ResourceManagerService to call the client.
+ * @param resources an array of resources to be added.
+ */
+ void addResource(
+ int pid,
+ int uid,
+ long clientId,
+ IResourceManagerClient client,
+ in MediaResourceParcel[] resources);
+
+ /**
+ * Remove the listed resources from a client.
+ *
+ * @param pid pid from which the list of resources will be removed.
+ * @param clientId clientId within the pid from which the list of resources will be removed.
+ * @param resources an array of resources to be removed from the client.
+ */
+ void removeResource(int pid, long clientId, in MediaResourceParcel[] resources);
+
+ /**
+ * Remove all resources from a client.
+ *
+ * @param pid pid from which the client's resources will be removed.
+ * @param clientId clientId within the pid that will be removed.
+ */
+ void removeClient(int pid, long clientId);
+
+ /**
+ * Tries to reclaim resource from processes with lower priority than the
+ * calling process according to the requested resources.
+ *
+ * @param callingPid pid of the calling process.
+ * @param resources an array of resources to be reclaimed.
+ *
+ * @return true if the reclaim was successful and false otherwise.
+ */
+ boolean reclaimResource(int callingPid, in MediaResourceParcel[] resources);
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourceParcel.aidl b/media/libmedia/aidl/android/media/MediaResourceParcel.aidl
new file mode 100644
index 0000000..b0f2b71
--- /dev/null
+++ b/media/libmedia/aidl/android/media/MediaResourceParcel.aidl
@@ -0,0 +1,50 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.MediaResourceType;
+import android.media.MediaResourceSubType;
+
+/**
+ * Description of a media resource to be tracked by MediaResourceManager.
+ *
+ * {@hide}
+ */
+parcelable MediaResourceParcel {
+ // TODO: default enum value is not supported yet.
+ // Set default enum value when b/142739329 is fixed.
+
+ /**
+ * Type of the media resource.
+ */
+ MediaResourceType type;// = MediaResourceTypeEnum::kUnspecified;
+
+ /**
+ * Sub-type of the media resource.
+ */
+ MediaResourceSubType subType;// = MediaResourceSubTypeEnum::kUnspecifiedSubType;
+
+ /**
+ * Identifier of the media resource (eg. Drm session id).
+ */
+ byte[] id;
+
+ /**
+ * Number of units of the media resource (bytes of graphic memory, number of codecs, etc.).
+ */
+ long value = 0;
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl b/media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl
new file mode 100644
index 0000000..4ea859a
--- /dev/null
+++ b/media/libmedia/aidl/android/media/MediaResourcePolicyParcel.aidl
@@ -0,0 +1,33 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Description of a policy to be adopted by ResourceManagerService.
+ * {@hide}
+ */
+parcelable MediaResourcePolicyParcel {
+ /**
+ * Name of the policy to be adopted.
+ */
+ @utf8InCpp String type;
+
+ /**
+ * Value of the policy to be adopted.
+ */
+ @utf8InCpp String value;
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourceSubType.aidl b/media/libmedia/aidl/android/media/MediaResourceSubType.aidl
new file mode 100644
index 0000000..af2ba68
--- /dev/null
+++ b/media/libmedia/aidl/android/media/MediaResourceSubType.aidl
@@ -0,0 +1,29 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Sub-type enums of media resources.
+ *
+ * {@hide}
+ */
+@Backing(type="int")
+enum MediaResourceSubType {
+ kUnspecifiedSubType = 0,
+ kAudioCodec = 1,
+ kVideoCodec = 2,
+}
diff --git a/media/libmedia/aidl/android/media/MediaResourceType.aidl b/media/libmedia/aidl/android/media/MediaResourceType.aidl
new file mode 100644
index 0000000..b2bb71b
--- /dev/null
+++ b/media/libmedia/aidl/android/media/MediaResourceType.aidl
@@ -0,0 +1,33 @@
+/**
+ * Copyright (c) 2019, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Type enums of media resources.
+ *
+ * {@hide}
+ */
+@Backing(type="int")
+enum MediaResourceType {
+ kUnspecified = 0,
+ kSecureCodec = 1,
+ kNonSecureCodec = 2,
+ kGraphicMemory = 3,
+ kCpuBoost = 4,
+ kBattery = 5,
+ kDrmSession = 6,
+}
diff --git a/media/libmedia/api/resourcemanager/1/.hash b/media/libmedia/api/resourcemanager/1/.hash
new file mode 100644
index 0000000..e56d56b
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/.hash
@@ -0,0 +1,18 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+58fe4b26909c9c4f17b1803baa4005c10ee40750 -
diff --git a/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerClient.aidl b/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerClient.aidl
new file mode 100644
index 0000000..20bfe72
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerClient.aidl
@@ -0,0 +1,22 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+interface IResourceManagerClient {
+ boolean reclaimResource();
+ @utf8InCpp String getName();
+}
diff --git a/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerService.aidl b/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerService.aidl
new file mode 100644
index 0000000..53cf036
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/IResourceManagerService.aidl
@@ -0,0 +1,27 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+interface IResourceManagerService {
+ void config(in android.media.MediaResourcePolicyParcel[] policies);
+ void addResource(int pid, int uid, long clientId, android.media.IResourceManagerClient client, in android.media.MediaResourceParcel[] resources);
+ void removeResource(int pid, long clientId, in android.media.MediaResourceParcel[] resources);
+ void removeClient(int pid, long clientId);
+ boolean reclaimResource(int pid, in android.media.MediaResourceParcel[] resources);
+ const String kPolicySupportsMultipleSecureCodecs = "supports-multiple-secure-codecs";
+ const String kPolicySupportsSecureWithNonSecureCodec = "supports-secure-with-non-secure-codec";
+}
diff --git a/media/libmedia/api/resourcemanager/1/android/media/MediaResourceParcel.aidl b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceParcel.aidl
new file mode 100644
index 0000000..47ea9bc
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceParcel.aidl
@@ -0,0 +1,24 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+parcelable MediaResourceParcel {
+ android.media.MediaResourceType type;
+ android.media.MediaResourceSubType subType;
+ byte[] id;
+ long value = 0;
+}
diff --git a/media/libmedia/api/resourcemanager/1/android/media/MediaResourcePolicyParcel.aidl b/media/libmedia/api/resourcemanager/1/android/media/MediaResourcePolicyParcel.aidl
new file mode 100644
index 0000000..85d2588
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/MediaResourcePolicyParcel.aidl
@@ -0,0 +1,22 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+parcelable MediaResourcePolicyParcel {
+ @utf8InCpp String type;
+ @utf8InCpp String value;
+}
diff --git a/media/libmedia/api/resourcemanager/1/android/media/MediaResourceSubType.aidl b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceSubType.aidl
new file mode 100644
index 0000000..19b68af
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceSubType.aidl
@@ -0,0 +1,24 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+@Backing(type="int")
+enum MediaResourceSubType {
+ kUnspecifiedSubType = 0,
+ kAudioCodec = 1,
+ kVideoCodec = 2,
+}
diff --git a/media/libmedia/api/resourcemanager/1/android/media/MediaResourceType.aidl b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceType.aidl
new file mode 100644
index 0000000..6a123fc
--- /dev/null
+++ b/media/libmedia/api/resourcemanager/1/android/media/MediaResourceType.aidl
@@ -0,0 +1,28 @@
+///////////////////////////////////////////////////////////////////////////////
+// THIS FILE IS IMMUTABLE. DO NOT EDIT IN ANY CASE. //
+///////////////////////////////////////////////////////////////////////////////
+
+// This file is a frozen snapshot of an AIDL interface (or parcelable). Do not
+// try to edit this file. It looks like you are doing that because you have
+// modified an AIDL interface in a backward-incompatible way, e.g., deleting a
+// function from an interface or a field from a parcelable and it broke the
+// build. That breakage is intended.
+//
+// You must not make a backward incompatible changes to the AIDL files built
+// with the aidl_interface module type with versions property set. The module
+// type is used to build AIDL files in a way that they can be used across
+// independently updatable components of the system. If a device is shipped
+// with such a backward incompatible change, it has a high risk of breaking
+// later when a module using the interface is updated, e.g., Mainline modules.
+
+package android.media;
+@Backing(type="int")
+enum MediaResourceType {
+ kUnspecified = 0,
+ kSecureCodec = 1,
+ kNonSecureCodec = 2,
+ kGraphicMemory = 3,
+ kCpuBoost = 4,
+ kBattery = 5,
+ kDrmSession = 6,
+}
diff --git a/media/libmedia/include/media/DataSourceDesc.h b/media/libmedia/include/media/DataSourceDesc.h
deleted file mode 100644
index 4336767..0000000
--- a/media/libmedia/include/media/DataSourceDesc.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_DATASOURCEDESC_H
-#define ANDROID_DATASOURCEDESC_H
-
-#include <media/stagefright/foundation/ABase.h>
-#include <utils/RefBase.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class DataSource;
-struct MediaHTTPService;
-
-// A binder interface for implementing a stagefright DataSource remotely.
-struct DataSourceDesc : public RefBase {
-public:
- // intentionally less than INT64_MAX
- // keep consistent with JAVA code
- static const int64_t kMaxTimeMs = 0x7ffffffffffffffll / 1000;
- static const int64_t kMaxTimeUs = kMaxTimeMs * 1000;
-
- enum {
- /* No data source has been set yet */
- TYPE_NONE = 0,
- /* data source is type of MediaDataSource */
- TYPE_CALLBACK = 1,
- /* data source is type of FileDescriptor */
- TYPE_FD = 2,
- /* data source is type of Url */
- TYPE_URL = 3,
- };
-
- DataSourceDesc();
-
- int mType;
-
- sp<MediaHTTPService> mHttpService;
- String8 mUrl;
- KeyedVector<String8, String8> mHeaders;
-
- int mFD;
- int64_t mFDOffset;
- int64_t mFDLength;
-
- sp<DataSource> mCallbackSource;
-
- int64_t mId;
- int64_t mStartPositionMs;
- int64_t mEndPositionMs;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(DataSourceDesc);
-};
-
-}; // namespace android
-
-#endif // ANDROID_DATASOURCEDESC_H
diff --git a/media/libmedia/include/media/IOMX.h b/media/libmedia/include/media/IOMX.h
index 7e7c2d2..70c8a74 100644
--- a/media/libmedia/include/media/IOMX.h
+++ b/media/libmedia/include/media/IOMX.h
@@ -34,9 +34,17 @@
#include <media/openmax/OMX_VideoExt.h>
namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+struct IGraphicBufferSource;
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
class IGraphicBufferProducer;
-class IGraphicBufferSource;
class IMemory;
class IOMXBufferSource;
class IOMXNode;
@@ -82,7 +90,7 @@
virtual status_t createInputSurface(
sp<IGraphicBufferProducer> *bufferProducer,
- sp<IGraphicBufferSource> *bufferSource) = 0;
+ sp<hardware::media::omx::V1_0::IGraphicBufferSource> *bufferSource) = 0;
};
class IOMXNode : public IInterface {
diff --git a/media/libmedia/include/media/IResourceManagerClient.h b/media/libmedia/include/media/IResourceManagerClient.h
deleted file mode 100644
index aa0cd88..0000000
--- a/media/libmedia/include/media/IResourceManagerClient.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IRESOURCEMANAGERCLIENT_H
-#define ANDROID_IRESOURCEMANAGERCLIENT_H
-
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-namespace android {
-
-class IResourceManagerClient: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(ResourceManagerClient);
-
- virtual bool reclaimResource() = 0;
- virtual String8 getName() = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnResourceManagerClient: public BnInterface<IResourceManagerClient>
-{
-public:
- virtual status_t onTransact(uint32_t code,
- const Parcel &data,
- Parcel *reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IRESOURCEMANAGERCLIENT_H
diff --git a/media/libmedia/include/media/IResourceManagerService.h b/media/libmedia/include/media/IResourceManagerService.h
deleted file mode 100644
index 8992f8b..0000000
--- a/media/libmedia/include/media/IResourceManagerService.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_IRESOURCEMANAGERSERVICE_H
-#define ANDROID_IRESOURCEMANAGERSERVICE_H
-
-#include <utils/Errors.h> // for status_t
-#include <utils/KeyedVector.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-#include <media/IResourceManagerClient.h>
-#include <media/MediaResource.h>
-#include <media/MediaResourcePolicy.h>
-
-namespace android {
-
-class IResourceManagerService: public IInterface
-{
-public:
- DECLARE_META_INTERFACE(ResourceManagerService);
-
- virtual void config(const Vector<MediaResourcePolicy> &policies) = 0;
-
- virtual void addResource(
- int pid,
- int uid,
- int64_t clientId,
- const sp<IResourceManagerClient> client,
- const Vector<MediaResource> &resources) = 0;
-
- virtual void removeResource(int pid, int64_t clientId,
- const Vector<MediaResource> &resources) = 0;
-
- virtual void removeClient(int pid, int64_t clientId) = 0;
-
- virtual bool reclaimResource(
- int callingPid,
- const Vector<MediaResource> &resources) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnResourceManagerService: public BnInterface<IResourceManagerService>
-{
-public:
- virtual status_t onTransact(uint32_t code,
- const Parcel &data,
- Parcel *reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif // ANDROID_IRESOURCEMANAGERSERVICE_H
diff --git a/media/libmedia/include/media/JetPlayer.h b/media/libmedia/include/media/JetPlayer.h
deleted file mode 100644
index bb569bc..0000000
--- a/media/libmedia/include/media/JetPlayer.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef JETPLAYER_H_
-#define JETPLAYER_H_
-
-#include <utils/threads.h>
-
-#include <libsonivox/jet.h>
-#include <libsonivox/eas_types.h>
-#include <media/AudioTrack.h>
-#include <media/MidiIoWrapper.h>
-
-
-namespace android {
-
-typedef void (*jetevent_callback)(int eventType, int val1, int val2, void *cookie);
-
-class JetPlayer {
-
-public:
-
- // to keep in sync with the JetPlayer class constants
- // defined in frameworks/base/media/java/android/media/JetPlayer.java
- static const int JET_EVENT = 1;
- static const int JET_USERID_UPDATE = 2;
- static const int JET_NUMQUEUEDSEGMENT_UPDATE = 3;
- static const int JET_PAUSE_UPDATE = 4;
-
- JetPlayer(void *javaJetPlayer,
- int maxTracks = 32,
- int trackBufferSize = 1200);
- ~JetPlayer();
- int init();
- int release();
-
- int loadFromFile(const char* url);
- int loadFromFD(const int fd, const long long offset, const long long length);
- int closeFile();
- int play();
- int pause();
- int queueSegment(int segmentNum, int libNum, int repeatCount, int transpose,
- EAS_U32 muteFlags, EAS_U8 userID);
- int setMuteFlags(EAS_U32 muteFlags, bool sync);
- int setMuteFlag(int trackNum, bool muteFlag, bool sync);
- int triggerClip(int clipId);
- int clearQueue();
-
- void setEventCallback(jetevent_callback callback);
-
- int getMaxTracks() { return mMaxTracks; };
-
-
-private:
- int render();
- void fireUpdateOnStatusChange();
- void fireEventsFromJetQueue();
-
- JetPlayer() {} // no default constructor
- void dump();
- void dumpJetStatus(S_JET_STATUS* pJetStatus);
-
- jetevent_callback mEventCallback;
-
- void* mJavaJetPlayerRef;
- Mutex mMutex; // mutex to sync the render and playback thread with the JET calls
- pid_t mTid;
- Condition mCondition;
- volatile bool mRender;
- bool mPaused;
-
- EAS_STATE mState;
- int* mMemFailedVar;
-
- int mMaxTracks; // max number of MIDI tracks, usually 32
- EAS_DATA_HANDLE mEasData;
- MidiIoWrapper* mIoWrapper;
- EAS_PCM* mAudioBuffer;// EAS renders the MIDI data into this buffer,
- sp<AudioTrack> mAudioTrack; // and we play it in this audio track
- int mTrackBufferSize;
- S_JET_STATUS mJetStatus;
- S_JET_STATUS mPreviousJetStatus;
-
- class JetPlayerThread : public Thread {
- public:
- JetPlayerThread(JetPlayer *player) : mPlayer(player) {
- }
-
- protected:
- virtual ~JetPlayerThread() {}
-
- private:
- JetPlayer *mPlayer;
-
- bool threadLoop() {
- int result;
- result = mPlayer->render();
- return false;
- }
-
- JetPlayerThread(const JetPlayerThread &);
- JetPlayerThread &operator=(const JetPlayerThread &);
- };
-
- sp<JetPlayerThread> mThread;
-
-}; // end class JetPlayer
-
-} // end namespace android
-
-
-
-#endif /*JETPLAYER_H_*/
diff --git a/media/libmedia/include/media/LinearMap.h b/media/libmedia/include/media/LinearMap.h
deleted file mode 100644
index 2220a0c..0000000
--- a/media/libmedia/include/media/LinearMap.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_LINEAR_MAP_H
-#define ANDROID_LINEAR_MAP_H
-
-#include <stdint.h>
-
-namespace android {
-
-/*
-A general purpose lookup utility that defines a mapping between X and Y as a
-continuous set of line segments with shared (x, y) end-points.
-The (x, y) points must be added in order, monotonically increasing in both x and y;
-a log warning is emitted if this does not happen (See general usage notes below).
-
-A limited history of (x, y) points is kept for space reasons (See general usage notes).
-
-In AudioFlinger, we use the LinearMap to associate track frames to
-sink frames. When we want to obtain a client track timestamp, we first
-get a timestamp from the sink. The sink timestamp's position (mPosition)
-corresponds to the sink frames written. We use LinearMap to figure out which track frame
-the sink frame corresponds to. This allows us to substitute a track frame for the
-the sink frame (keeping the mTime identical) and return that timestamp back to the client.
-
-The method findX() can be used to retrieve an x value from a given y value and is
-used for timestamps, similarly for findY() which is provided for completeness.
-
-We update the (track frame, sink frame) points in the LinearMap each time we write data
-to the sink by the AudioFlinger PlaybackThread (MixerThread).
-
-
-AudioFlinger Timestamp Notes:
-
-1) Example: Obtaining a track timestamp during playback. In this case, the LinearMap
-looks something like this:
-
-Track Frame Sink Frame
-(track start)
-0 50000 (track starts here, the sink may already be running)
-1000 51000
-2000 52000
-
-When we request a track timestamp, we call the sink getTimestamp() and get for example
-mPosition = 51020. Using the LinearMap, we find we have played to track frame 1020.
-We substitute the sink mPosition of 51020 with the track position 1020,
-and return that timestamp to the app.
-
-2) Example: Obtaining a track timestamp duing pause. In this case, the LinearMap
-looks something like this:
-
-Track Frame Sink Frame
-... (some time has gone by)
-15000 30000
-16000 31000
-17000 32000
-(pause here)
-(suppose we call sink getTimestamp() here and get sink mPosition = 31100; that means
- we have played to track frame 16100. The track timestamp mPosition will
- continue to advance until the sink timestamp returns a value of mPosition
- greater than 32000, corresponding to track frame 17000 when the pause was called).
-17000 33000
-17000 34000
-...
-
-3) If the track underruns, it appears as if a pause was called on that track.
-
-4) If there is an underrun in the HAL layer, then it may be possible that
-the sink getTimestamp() will return a value greater than the number of frames written
-(it should always be less). This should be rare, if not impossible by some
-HAL implementations of the sink getTimestamp. In that case, timing is lost
-and we will return the most recent track frame written.
-
-5) When called with no points in the map, findX() returns the start value (default 0).
-This is consistent with starting after a stop() or flush().
-
-6) Resuming after Track standby will be similar to coming out of pause, as the HAL ensures
-framesWritten() and getTimestamp() are contiguous for non-offloaded/direct tracks.
-
-7) LinearMap works for different speeds and sample rates as it uses
-linear interpolation. Since AudioFlinger only updates speed and sample rate
-exactly at the sample points pushed into the LinearMap, the returned values
-from findX() and findY() are accurate regardless of how many speed or sample
-rate changes are made, so long as the coordinate looked up is within the
-sample history.
-
-General usage notes:
-
-1) In order for the LinearMap to work reliably, you cannot look backwards more
-than the size of its circular buffer history, set upon creation (typically 16).
-If you look back further, the position is extrapolated either from a passed in
-extrapolation parameter or from the oldest line segment.
-
-2) Points must monotonically increase in x and y. The increment between adjacent
-points cannot be greater than signed 32 bits. Wrap in the x, y coordinates are supported,
-since we use differences in our computation.
-
-3) If the frame data is discontinuous (due to stop or flush) call reset() to clear
-the sample counter.
-
-4) If (x, y) are not strictly monotonic increasing, i.e. (x2 > x1) and (y2 > y1),
-then one or both of the inverses y = f(x) or x = g(y) may have multiple solutions.
-In that case, the most recent solution is returned by findX() or findY(). We
-do not warn if (x2 == x1) or (y2 == y1), but we do logcat warn if (x2 < x1) or
-(y2 < y1).
-
-5) Due to rounding it is possible x != findX(findY(x)) or y != findY(findX(y))
-even when the inverse exists. Nevertheless, the values should be close.
-
-*/
-
-template <typename T>
-class LinearMap {
-public:
- // This enumeration describes the reliability of the findX() or findY() estimation
- // in descending order.
- enum FindMethod {
- FIND_METHOD_INTERPOLATION, // High reliability (errors due to rounding)
- FIND_METHOD_FORWARD_EXTRAPOLATION, // Reliability based on no future speed changes
- FIND_METHOD_BACKWARD_EXTRAPOLATION, // Reliability based on prior estimated speed
- FIND_METHOD_START_VALUE, // No samples in history, using start value
- };
-
- explicit LinearMap(size_t size)
- : mSize(size),
- mPos(0), // a circular buffer, so could start anywhere. the first sample is at 1.
- mSamples(0),
- // mStepValid(false), // only valid if mSamples > 1
- // mExtrapolateTail(false), // only valid if mSamples > 0
- mX(new T[size]),
- mY(new T[size]) { }
-
- ~LinearMap() {
- delete[] mX;
- delete[] mY;
- }
-
- // Add a new sample point to the linear map.
- //
- // The difference between the new sample and the previous sample
- // in the x or y coordinate must be less than INT32_MAX for purposes
- // of the linear interpolation or extrapolation.
- //
- // The value should be monotonic increasing (e.g. diff >= 0);
- // logcat warnings are issued if they are not.
- __attribute__((no_sanitize("integer")))
- void push(T x, T y) {
- // Assumption: we assume x, y are monotonic increasing values,
- // which (can) wrap in precision no less than 32 bits and have
- // "step" or differences between adjacent points less than 32 bits.
-
- if (mSamples > 0) {
- const bool lastStepValid = mStepValid;
- int32_t xdiff;
- int32_t ydiff;
- // check difference assumption here
- mStepValid = checkedDiff(&xdiff, x, mX[mPos], "x")
- & /* bitwise AND to always warn for ydiff, though logical AND is also OK */
- checkedDiff(&ydiff, y, mY[mPos], "y");
-
- // Optimization: do not add a new sample if the line segment would
- // simply extend the previous line segment. This extends the useful
- // history by removing redundant points.
- if (mSamples > 1 && mStepValid && lastStepValid) {
- const size_t prev = previousPosition();
- const int32_t xdiff2 = x - mX[prev];
- const int32_t ydiff2 = y - mY[prev];
-
- // if both current step and previous step are valid (non-negative and
- // less than INT32_MAX for precision greater than 4 bytes)
- // then the sum of the two steps is valid when the
- // int32_t difference is non-negative.
- if (xdiff2 >= 0 && ydiff2 >= 0
- && (int64_t)xdiff2 * ydiff == (int64_t)ydiff2 * xdiff) {
- // ALOGD("reusing sample! (%u, %u) sample depth %zd", x, y, mSamples);
- mX[mPos] = x;
- mY[mPos] = y;
- return;
- }
- }
- }
- if (++mPos >= mSize) {
- mPos = 0;
- }
- if (mSamples < mSize) {
- mExtrapolateTail = false;
- ++mSamples;
- } else {
- // we enable extrapolation beyond the oldest sample
- // if the sample buffers are completely full and we
- // no longer know the full history.
- mExtrapolateTail = true;
- }
- mX[mPos] = x;
- mY[mPos] = y;
- }
-
- // clear all samples from the circular array
- void reset() {
- // no need to reset mPos, we use a circular buffer.
- // computed values such as mStepValid are set after a subsequent push().
- mSamples = 0;
- }
-
- // returns true if LinearMap contains at least one sample.
- bool hasData() const {
- return mSamples != 0;
- }
-
- // find the corresponding X point from a Y point.
- // See findU for details.
- __attribute__((no_sanitize("integer")))
- T findX(T y, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
- return findU(y, mX, mY, method, extrapolation, startValue);
- }
-
- // find the corresponding Y point from a X point.
- // See findU for details.
- __attribute__((no_sanitize("integer")))
- T findY(T x, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
- return findU(x, mY, mX, method, extrapolation, startValue);
- }
-
-protected:
-
- // returns false if the diff is out of int32_t bounds or negative.
- __attribute__((no_sanitize("integer")))
- static inline bool checkedDiff(int32_t *diff, T x2, T x1, const char *coord) {
- if (sizeof(T) >= 8) {
- const int64_t diff64 = x2 - x1;
- *diff = (int32_t)diff64; // intentionally lose precision
- if (diff64 > INT32_MAX) {
- ALOGW("LinearMap: %s overflow diff(%lld) from %llu - %llu exceeds INT32_MAX",
- coord, (long long)diff64,
- (unsigned long long)x2, (unsigned long long)x1);
- return false;
- } else if (diff64 < 0) {
- ALOGW("LinearMap: %s negative diff(%lld) from %llu - %llu",
- coord, (long long)diff64,
- (unsigned long long)x2, (unsigned long long)x1);
- return false;
- }
- return true;
- }
- // for 32 bit integers we cannot detect overflow (it
- // shows up as a negative difference).
- *diff = x2 - x1;
- if (*diff < 0) {
- ALOGW("LinearMap: %s negative diff(%d) from %u - %u",
- coord, *diff, (unsigned)x2, (unsigned)x1);
- return false;
- }
- return true;
- }
-
- // Returns the previous position in the mSamples array
- // going backwards back steps.
- //
- // Parameters:
- // back: number of backward steps, cannot be less than zero or greater than mSamples.
- //
- __attribute__((no_sanitize("integer")))
- size_t previousPosition(ssize_t back = 1) const {
- LOG_ALWAYS_FATAL_IF(back < 0 || (size_t)back > mSamples, "Invalid back(%zd)", back);
- ssize_t position = mPos - back;
- if (position < 0) position += mSize;
- return (size_t)position;
- }
-
- // A generic implementation of finding the "other coordinate" with coordinates
- // (u, v) = (x, y) or (u, v) = (y, x).
- //
- // Parameters:
- // uArray: the u axis samples.
- // vArray: the v axis samples.
- // method: [out] how the returned value was computed.
- // extrapolation: the slope used when extrapolating from the
- // first sample value or the last sample value in the history.
- // If mExtrapolateTail is set, the slope of the last line segment
- // is used if the extrapolation parameter is zero to continue the tail of history.
- // At this time, we do not use a different value for forward extrapolation from the
- // head of history from backward extrapolation from the tail of history.
- // TODO: back extrapolation value could be stored along with mX, mY in history.
- // startValue: used only when there are no samples in history. One can detect
- // whether there are samples in history by the method hasData().
- //
- __attribute__((no_sanitize("integer")))
- T findU(T v, T *uArray, T *vArray, FindMethod *method,
- double extrapolation, T startValue) const {
- if (mSamples == 0) {
- if (method != NULL) {
- *method = FIND_METHOD_START_VALUE;
- }
- return startValue; // nothing yet
- }
- ssize_t previous = 0;
- int32_t diff = 0;
- for (ssize_t i = 0; i < (ssize_t)mSamples; ++i) {
- size_t current = previousPosition(i);
-
- // Assumption: even though the type "T" may have precision greater
- // than 32 bits, the difference between adjacent points is limited to 32 bits.
- diff = v - vArray[current];
- if (diff >= 0 ||
- (i == (ssize_t)mSamples - 1 && mExtrapolateTail && extrapolation == 0.0)) {
- // ALOGD("depth = %zd out of %zd", i, limit);
- if (i == 0) {
- if (method != NULL) {
- *method = FIND_METHOD_FORWARD_EXTRAPOLATION;
- }
- return uArray[current] + diff * extrapolation;
- }
- // interpolate / extrapolate: For this computation, we
- // must use differentials here otherwise we have inconsistent
- // values on modulo wrap. previous is always valid here since
- // i > 0. we also perform rounding with the assumption
- // that uStep, vStep, and diff are non-negative.
- int32_t uStep = uArray[previous] - uArray[current]; // non-negative
- int32_t vStep = vArray[previous] - vArray[current]; // positive
- T u = uStep <= 0 || vStep <= 0 ? // we do not permit negative ustep or vstep
- uArray[current]
- : ((int64_t)diff * uStep + (vStep >> 1)) / vStep + uArray[current];
- // ALOGD("u:%u diff:%d uStep:%d vStep:%d u_current:%d",
- // u, diff, uStep, vStep, uArray[current]);
- if (method != NULL) {
- *method = (diff >= 0) ?
- FIND_METHOD_INTERPOLATION : FIND_METHOD_BACKWARD_EXTRAPOLATION;
- }
- return u;
- }
- previous = current;
- }
- // previous is always valid here.
- if (method != NULL) {
- *method = FIND_METHOD_BACKWARD_EXTRAPOLATION;
- }
- return uArray[previous] + diff * extrapolation;
- }
-
-private:
- const size_t mSize; // Size of mX and mY arrays (history).
- size_t mPos; // Index in mX and mY of last pushed data;
- // (incremented after push) [0, mSize - 1].
- size_t mSamples; // Number of valid samples in the array [0, mSize].
- bool mStepValid; // Last sample step was valid (non-negative)
- bool mExtrapolateTail; // extrapolate tail using oldest line segment
- T * const mX; // History of X values as a circular array.
- T * const mY; // History of Y values as a circular array.
-};
-
-} // namespace android
-
-#endif // ANDROID_LINEAR_MAP_H
diff --git a/media/libmedia/include/media/MediaResource.h b/media/libmedia/include/media/MediaResource.h
index 10a07bb..caf03b1 100644
--- a/media/libmedia/include/media/MediaResource.h
+++ b/media/libmedia/include/media/MediaResource.h
@@ -18,66 +18,55 @@
#ifndef ANDROID_MEDIA_RESOURCE_H
#define ANDROID_MEDIA_RESOURCE_H
-#include <binder/Parcel.h>
-#include <utils/String8.h>
+#include <android/media/MediaResourceParcel.h>
namespace android {
-class MediaResource {
+using android::media::MediaResourceParcel;
+using android::media::MediaResourceSubType;
+using android::media::MediaResourceType;
+
+class MediaResource : public MediaResourceParcel {
public:
- enum Type {
- kUnspecified = 0,
- kSecureCodec,
- kNonSecureCodec,
- kGraphicMemory,
- kCpuBoost,
- kBattery,
- };
+ using Type = MediaResourceType;
+ using SubType = MediaResourceSubType;
- enum SubType {
- kUnspecifiedSubType = 0,
- kAudioCodec,
- kVideoCodec,
- };
+ MediaResource() = delete;
+ MediaResource(Type type, int64_t value);
+ MediaResource(Type type, SubType subType, int64_t value);
+ MediaResource(Type type, const std::vector<uint8_t> &id, int64_t value);
- MediaResource();
- MediaResource(Type type, uint64_t value);
- MediaResource(Type type, SubType subType, uint64_t value);
-
- void readFromParcel(const Parcel &parcel);
- void writeToParcel(Parcel *parcel) const;
-
- String8 toString() const;
-
- bool operator==(const MediaResource &other) const;
- bool operator!=(const MediaResource &other) const;
-
- Type mType;
- SubType mSubType;
- uint64_t mValue;
+ static MediaResource CodecResource(bool secure, bool video);
+ static MediaResource GraphicMemoryResource(int64_t value);
+ static MediaResource CpuBoostResource();
+ static MediaResource VideoBatteryResource();
+ static MediaResource DrmSessionResource(const std::vector<uint8_t> &id, int64_t value);
};
inline static const char *asString(MediaResource::Type i, const char *def = "??") {
switch (i) {
- case MediaResource::kUnspecified: return "unspecified";
- case MediaResource::kSecureCodec: return "secure-codec";
- case MediaResource::kNonSecureCodec: return "non-secure-codec";
- case MediaResource::kGraphicMemory: return "graphic-memory";
- case MediaResource::kCpuBoost: return "cpu-boost";
- case MediaResource::kBattery: return "battery";
- default: return def;
+ case MediaResource::Type::kUnspecified: return "unspecified";
+ case MediaResource::Type::kSecureCodec: return "secure-codec";
+ case MediaResource::Type::kNonSecureCodec: return "non-secure-codec";
+ case MediaResource::Type::kGraphicMemory: return "graphic-memory";
+ case MediaResource::Type::kCpuBoost: return "cpu-boost";
+ case MediaResource::Type::kBattery: return "battery";
+ case MediaResource::Type::kDrmSession: return "drm-session";
+ default: return def;
}
}
inline static const char *asString(MediaResource::SubType i, const char *def = "??") {
switch (i) {
- case MediaResource::kUnspecifiedSubType: return "unspecified";
- case MediaResource::kAudioCodec: return "audio-codec";
- case MediaResource::kVideoCodec: return "video-codec";
+ case MediaResource::SubType::kUnspecifiedSubType: return "unspecified";
+ case MediaResource::SubType::kAudioCodec: return "audio-codec";
+ case MediaResource::SubType::kVideoCodec: return "video-codec";
default: return def;
}
}
+String8 toString(const MediaResourceParcel& resource);
+
}; // namespace android
#endif // ANDROID_MEDIA_RESOURCE_H
diff --git a/media/libmedia/include/media/MediaResourcePolicy.h b/media/libmedia/include/media/MediaResourcePolicy.h
index 9bc2eec..7ae1a73 100644
--- a/media/libmedia/include/media/MediaResourcePolicy.h
+++ b/media/libmedia/include/media/MediaResourcePolicy.h
@@ -18,28 +18,23 @@
#ifndef ANDROID_MEDIA_RESOURCE_POLICY_H
#define ANDROID_MEDIA_RESOURCE_POLICY_H
-#include <binder/Parcel.h>
-#include <utils/String8.h>
+#include <android/media/MediaResourcePolicyParcel.h>
namespace android {
-extern const char kPolicySupportsMultipleSecureCodecs[];
-extern const char kPolicySupportsSecureWithNonSecureCodec[];
+using media::MediaResourcePolicyParcel;
-class MediaResourcePolicy {
+class MediaResourcePolicy : public MediaResourcePolicyParcel {
public:
- MediaResourcePolicy();
- MediaResourcePolicy(String8 type, String8 value);
+ MediaResourcePolicy() = delete;
+ MediaResourcePolicy(const std::string& type, const std::string& value);
- void readFromParcel(const Parcel &parcel);
- void writeToParcel(Parcel *parcel) const;
-
- String8 toString() const;
-
- String8 mType;
- String8 mValue;
+ static const ::std::string& kPolicySupportsMultipleSecureCodecs();
+ static const ::std::string& kPolicySupportsSecureWithNonSecureCodec();
};
+String8 toString(const MediaResourcePolicyParcel &policy);
+
}; // namespace android
#endif // ANDROID_MEDIA_RESOURCE_POLICY_H
diff --git a/media/libmedia/include/media/MidiDeviceInfo.h b/media/libmedia/include/media/MidiDeviceInfo.h
deleted file mode 100644
index 5b4a241..0000000
--- a/media/libmedia/include/media/MidiDeviceInfo.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_MIDI_DEVICE_INFO_H
-#define ANDROID_MEDIA_MIDI_DEVICE_INFO_H
-
-#include <binder/Parcelable.h>
-#include <binder/PersistableBundle.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-
-namespace android {
-namespace media {
-namespace midi {
-
-class MidiDeviceInfo : public Parcelable {
-public:
- MidiDeviceInfo() = default;
- virtual ~MidiDeviceInfo() = default;
- MidiDeviceInfo(const MidiDeviceInfo& midiDeviceInfo) = default;
-
- status_t writeToParcel(Parcel* parcel) const override;
- status_t readFromParcel(const Parcel* parcel) override;
-
- int getType() const { return mType; }
- int getUid() const { return mId; }
- bool isPrivate() const { return mIsPrivate; }
- const Vector<String16>& getInputPortNames() const { return mInputPortNames; }
- const Vector<String16>& getOutputPortNames() const { return mOutputPortNames; }
- String16 getProperty(const char* propertyName);
-
- // The constants need to be kept in sync with MidiDeviceInfo.java
- enum {
- TYPE_USB = 1,
- TYPE_VIRTUAL = 2,
- TYPE_BLUETOOTH = 3,
- };
- static const char* const PROPERTY_NAME;
- static const char* const PROPERTY_MANUFACTURER;
- static const char* const PROPERTY_PRODUCT;
- static const char* const PROPERTY_VERSION;
- static const char* const PROPERTY_SERIAL_NUMBER;
- static const char* const PROPERTY_ALSA_CARD;
- static const char* const PROPERTY_ALSA_DEVICE;
-
- friend bool operator==(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs);
- friend bool operator!=(const MidiDeviceInfo& lhs, const MidiDeviceInfo& rhs) {
- return !(lhs == rhs);
- }
-
-private:
- status_t readStringVector(
- const Parcel* parcel, Vector<String16> *vectorPtr, size_t defaultLength);
- status_t writeStringVector(Parcel* parcel, const Vector<String16>& vector) const;
-
- int32_t mType;
- int32_t mId;
- Vector<String16> mInputPortNames;
- Vector<String16> mOutputPortNames;
- os::PersistableBundle mProperties;
- bool mIsPrivate;
-};
-
-} // namespace midi
-} // namespace media
-} // namespace android
-
-#endif // ANDROID_MEDIA_MIDI_DEVICE_INFO_H
diff --git a/media/libmedia/include/media/Visualizer.h b/media/libmedia/include/media/Visualizer.h
deleted file mode 100644
index 8078e36..0000000
--- a/media/libmedia/include/media/Visualizer.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_VISUALIZER_H
-#define ANDROID_MEDIA_VISUALIZER_H
-
-#include <media/AudioEffect.h>
-#include <system/audio_effects/effect_visualizer.h>
-#include <utils/Thread.h>
-
-/**
- * The Visualizer class enables application to retrieve part of the currently playing audio for
- * visualization purpose. It is not an audio recording interface and only returns partial and low
- * quality audio content. However, to protect privacy of certain audio data (e.g voice mail) the use
- * of the visualizer requires the permission android.permission.RECORD_AUDIO.
- * The audio session ID passed to the constructor indicates which audio content should be
- * visualized:
- * - If the session is 0, the audio output mix is visualized
- * - If the session is not 0, the audio from a particular MediaPlayer or AudioTrack
- * using this audio session is visualized
- * Two types of representation of audio content can be captured:
- * - Waveform data: consecutive 8-bit (unsigned) mono samples by using the getWaveForm() method
- * - Frequency data: 8-bit magnitude FFT by using the getFft() method
- *
- * The length of the capture can be retrieved or specified by calling respectively
- * getCaptureSize() and setCaptureSize() methods. Note that the size of the FFT
- * is half of the specified capture size but both sides of the spectrum are returned yielding in a
- * number of bytes equal to the capture size. The capture size must be a power of 2 in the range
- * returned by getMinCaptureSize() and getMaxCaptureSize().
- * In addition to the polling capture mode, a callback mode is also available by installing a
- * callback function by use of the setCaptureCallBack() method. The rate at which the callback
- * is called as well as the type of data returned is specified.
- * Before capturing data, the Visualizer must be enabled by calling the setEnabled() method.
- * When data capture is not needed any more, the Visualizer should be disabled.
- */
-
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-class Visualizer: public AudioEffect {
-public:
-
- enum callback_flags {
- CAPTURE_WAVEFORM = 0x00000001, // capture callback returns a PCM wave form
- CAPTURE_FFT = 0x00000002, // apture callback returns a frequency representation
- CAPTURE_CALL_JAVA = 0x00000004 // the callback thread can call java
- };
-
-
- /* Constructor.
- * See AudioEffect constructor for details on parameters.
- */
- Visualizer(const String16& opPackageName,
- int32_t priority = 0,
- effect_callback_t cbf = NULL,
- void* user = NULL,
- audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
-
- ~Visualizer();
-
- virtual status_t setEnabled(bool enabled);
-
- // maximum capture size in samples
- static uint32_t getMaxCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MAX; }
- // minimum capture size in samples
- static uint32_t getMinCaptureSize() { return VISUALIZER_CAPTURE_SIZE_MIN; }
- // maximum capture rate in millihertz
- static uint32_t getMaxCaptureRate() { return CAPTURE_RATE_MAX; }
-
- // callback used to return periodic PCM or FFT captures to the application. Either one or both
- // types of data are returned (PCM and FFT) according to flags indicated when installing the
- // callback. When a type of data is not present, the corresponding size (waveformSize or
- // fftSize) is 0.
- typedef void (*capture_cbk_t)(void* user,
- uint32_t waveformSize,
- uint8_t *waveform,
- uint32_t fftSize,
- uint8_t *fft,
- uint32_t samplingrate);
-
- // install a callback to receive periodic captures. The capture rate is specified in milliHertz
- // and the capture format is according to flags (see callback_flags).
- status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate);
-
- // set the capture size capture size must be a power of two in the range
- // [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
- // must be called when the visualizer is not enabled
- status_t setCaptureSize(uint32_t size);
- uint32_t getCaptureSize() { return mCaptureSize; }
-
- // returns the capture rate indicated when installing the callback
- uint32_t getCaptureRate() { return mCaptureRate; }
-
- // returns the sampling rate of the audio being captured
- uint32_t getSamplingRate() { return mSampleRate; }
-
- // set the way volume affects the captured data
- // mode must one of VISUALIZER_SCALING_MODE_NORMALIZED,
- // VISUALIZER_SCALING_MODE_AS_PLAYED
- status_t setScalingMode(uint32_t mode);
- uint32_t getScalingMode() { return mScalingMode; }
-
- // set which measurements are done on the audio buffers processed by the effect.
- // valid measurements (mask): MEASUREMENT_MODE_PEAK_RMS
- status_t setMeasurementMode(uint32_t mode);
- uint32_t getMeasurementMode() { return mMeasurementMode; }
-
- // return a set of int32_t measurements
- status_t getIntMeasurements(uint32_t type, uint32_t number, int32_t *measurements);
-
- // return a capture in PCM 8 bit unsigned format. The size of the capture is equal to
- // getCaptureSize()
- status_t getWaveForm(uint8_t *waveform);
-
- // return a capture in FFT 8 bit signed format. The size of the capture is equal to
- // getCaptureSize() but the length of the FFT is half of the size (both parts of the spectrum
- // are returned
- status_t getFft(uint8_t *fft);
- void release();
-
-protected:
- // from IEffectClient
- virtual void controlStatusChanged(bool controlGranted);
-
-private:
-
- static const uint32_t CAPTURE_RATE_MAX = 20000;
- static const uint32_t CAPTURE_RATE_DEF = 10000;
- static const uint32_t CAPTURE_SIZE_DEF = VISUALIZER_CAPTURE_SIZE_MAX;
-
- /* internal class to handle the callback */
- class CaptureThread : public Thread
- {
- public:
- CaptureThread(Visualizer* visualizer, uint32_t captureRate, bool bCanCallJava = false);
-
- private:
- friend class Visualizer;
- virtual bool threadLoop();
- wp<Visualizer> mReceiver;
- Mutex mLock;
- uint32_t mSleepTimeUs;
- };
-
- status_t doFft(uint8_t *fft, uint8_t *waveform);
- void periodicCapture();
- uint32_t initCaptureSize();
-
- Mutex mCaptureLock;
- uint32_t mCaptureRate;
- uint32_t mCaptureSize;
- uint32_t mSampleRate;
- uint32_t mScalingMode;
- uint32_t mMeasurementMode;
- capture_cbk_t mCaptureCallBack;
- void *mCaptureCbkUser;
- sp<CaptureThread> mCaptureThread;
- uint32_t mCaptureFlags;
-};
-
-
-}; // namespace android
-
-#endif // ANDROID_MEDIA_VISUALIZER_H
diff --git a/media/libmedia/include/media/omx/1.0/Conversion.h b/media/libmedia/include/media/omx/1.0/Conversion.h
index 6dc46b7..811936b 100644
--- a/media/libmedia/include/media/omx/1.0/Conversion.h
+++ b/media/libmedia/include/media/omx/1.0/Conversion.h
@@ -45,7 +45,6 @@
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
-#include <android/IGraphicBufferSource.h>
#include <android/IOMXBufferSource.h>
namespace android {
diff --git a/media/libmedia/include/media/omx/1.0/WOmx.h b/media/libmedia/include/media/omx/1.0/WOmx.h
index 0680eec..46ada9b 100644
--- a/media/libmedia/include/media/omx/1.0/WOmx.h
+++ b/media/libmedia/include/media/omx/1.0/WOmx.h
@@ -67,7 +67,7 @@
sp<IOMXNode>* omxNode) override;
status_t createInputSurface(
sp<::android::IGraphicBufferProducer>* bufferProducer,
- sp<::android::IGraphicBufferSource>* bufferSource) override;
+ sp<::android::hardware::media::omx::V1_0::IGraphicBufferSource>* bufferSource) override;
};
} // namespace utils
diff --git a/media/libmedia/omx/1.0/WOmx.cpp b/media/libmedia/omx/1.0/WOmx.cpp
index ce624fa..4bacdda 100644
--- a/media/libmedia/omx/1.0/WOmx.cpp
+++ b/media/libmedia/omx/1.0/WOmx.cpp
@@ -18,7 +18,6 @@
#include <media/omx/1.0/WOmx.h>
#include <media/omx/1.0/WOmxNode.h>
#include <media/omx/1.0/WOmxObserver.h>
-#include <media/omx/1.0/WGraphicBufferSource.h>
#include <media/omx/1.0/Conversion.h>
namespace android {
@@ -70,7 +69,7 @@
status_t LWOmx::createInputSurface(
sp<::android::IGraphicBufferProducer>* bufferProducer,
- sp<::android::IGraphicBufferSource>* bufferSource) {
+ sp<::android::hardware::media::omx::V1_0::IGraphicBufferSource>* bufferSource) {
status_t fnStatus;
status_t transStatus = toStatusT(mBase->createInputSurface(
[&fnStatus, bufferProducer, bufferSource] (
@@ -79,7 +78,7 @@
sp<IGraphicBufferSource> const& tSource) {
fnStatus = toStatusT(status);
*bufferProducer = new H2BGraphicBufferProducer(tProducer);
- *bufferSource = new LWGraphicBufferSource(tSource);
+ *bufferSource = tSource;
}));
return transStatus == NO_ERROR ? fnStatus : transStatus;
}
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
new file mode 100644
index 0000000..72edeec
--- /dev/null
+++ b/media/libmediahelper/Android.bp
@@ -0,0 +1,29 @@
+cc_library_headers {
+ name: "libmedia_helper_headers",
+ vendor_available: true,
+ export_include_dirs: ["include"],
+}
+
+cc_library {
+ name: "libmedia_helper",
+ vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
+ double_loadable: true,
+ srcs: ["AudioParameter.cpp", "TypeConverter.cpp"],
+ cflags: [
+ "-Werror",
+ "-Wextra",
+ "-Wall",
+ ],
+ shared_libs: ["libutils", "liblog"],
+ header_libs: [
+ "libmedia_helper_headers",
+ "libaudio_system_headers",
+ ],
+ export_header_lib_headers: [
+ "libmedia_helper_headers",
+ ],
+ clang: true,
+}
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmediahelper/AudioParameter.cpp
similarity index 100%
rename from media/libmedia/AudioParameter.cpp
rename to media/libmediahelper/AudioParameter.cpp
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
similarity index 100%
rename from media/libmedia/TypeConverter.cpp
rename to media/libmediahelper/TypeConverter.cpp
diff --git a/media/libaudioclient/include/media/AudioParameter.h b/media/libmediahelper/include/media/AudioParameter.h
similarity index 100%
rename from media/libaudioclient/include/media/AudioParameter.h
rename to media/libmediahelper/include/media/AudioParameter.h
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmediahelper/include/media/TypeConverter.h
similarity index 100%
rename from media/libmedia/include/media/TypeConverter.h
rename to media/libmediahelper/include/media/TypeConverter.h
diff --git a/media/libmedia/include/media/convert.h b/media/libmediahelper/include/media/convert.h
similarity index 100%
rename from media/libmedia/include/media/convert.h
rename to media/libmediahelper/include/media/convert.h
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 9d348ec..f599190 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -44,6 +44,7 @@
},
visibility: [
+ "//cts/tests/tests/nativemedia/mediametrics",
"//frameworks/av:__subpackages__",
"//frameworks/base/core/jni",
"//frameworks/base/media/jni",
diff --git a/media/libmediametrics/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
index 9114927..1ab6653 100644
--- a/media/libmediametrics/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -32,15 +32,10 @@
#include <media/MediaAnalyticsItem.h>
#include <media/IMediaAnalyticsService.h>
-#define DEBUGGING 0
-#define DEBUGGING_FLOW 0
-#define DEBUGGING_RETURNS 0
-
namespace android {
enum {
- GENERATE_UNIQUE_SESSIONID = IBinder::FIRST_CALL_TRANSACTION,
- SUBMIT_ITEM,
+ SUBMIT_ITEM_ONEWAY = IBinder::FIRST_CALL_TRANSACTION,
};
class BpMediaAnalyticsService: public BpInterface<IMediaAnalyticsService>
@@ -51,61 +46,23 @@
{
}
- virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() {
- Parcel data, reply;
- status_t err;
- MediaAnalyticsItem::SessionID_t sessionid =
- MediaAnalyticsItem::SessionIDInvalid;
-
- data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
- err = remote()->transact(GENERATE_UNIQUE_SESSIONID, data, &reply);
- if (err != NO_ERROR) {
- ALOGW("bad response from service for generateSessionId, err=%d", err);
- return MediaAnalyticsItem::SessionIDInvalid;
- }
- sessionid = reply.readInt64();
- if (DEBUGGING_RETURNS) {
- ALOGD("the caller gets a sessionid of %" PRId64 " back", sessionid);
- }
- return sessionid;
- }
-
- virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew)
+ status_t submit(MediaAnalyticsItem *item) override
{
- // have this record submit itself
- // this will be a binder call with appropriate timing
- // return value is the uuid that the system generated for it.
- // the return value 0 and -1 are reserved.
- // -1 to indicate that there was a problem recording...
-
- Parcel data, reply;
- status_t err;
-
- if (item == NULL) {
- return MediaAnalyticsItem::SessionIDInvalid;
+ if (item == nullptr) {
+ return BAD_VALUE;
}
+ ALOGV("%s: (ONEWAY) item=%s", __func__, item->toString().c_str());
+ Parcel data;
data.writeInterfaceToken(IMediaAnalyticsService::getInterfaceDescriptor());
- if(DEBUGGING_FLOW) {
- ALOGD("client offers record: %s", item->toString().c_str());
- }
- data.writeBool(forcenew);
item->writeToParcel(&data);
- err = remote()->transact(SUBMIT_ITEM, data, &reply);
- if (err != NO_ERROR) {
- ALOGW("bad response from service for submit, err=%d", err);
- return MediaAnalyticsItem::SessionIDInvalid;
- }
-
- // get an answer out of 'reply'
- int64_t sessionid = reply.readInt64();
- if (DEBUGGING_RETURNS) {
- ALOGD("the caller gets sessionid=%" PRId64 "", sessionid);
- }
- return sessionid;
+ status_t err = remote()->transact(
+ SUBMIT_ITEM_ONEWAY, data, nullptr /* reply */, IBinder::FLAG_ONEWAY);
+ ALOGW_IF(err != NO_ERROR, "%s: bad response from service for submit, err=%d",
+ __func__, err);
+ return err;
}
-
};
IMPLEMENT_META_INTERFACE(MediaAnalyticsService, "android.media.IMediaAnalyticsService");
@@ -115,49 +72,23 @@
status_t BnMediaAnalyticsService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
-
-
- // get calling pid/tid
- IPCThreadState *ipc = IPCThreadState::self();
- int clientPid = ipc->getCallingPid();
- // permission checking
-
- if(DEBUGGING_FLOW) {
- ALOGD("running in service, code %d, pid %d; called from pid %d",
- code, getpid(), clientPid);
- }
+ const int clientPid = IPCThreadState::self()->getCallingPid();
switch (code) {
+ case SUBMIT_ITEM_ONEWAY: {
+ CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
- case GENERATE_UNIQUE_SESSIONID: {
- CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
+ MediaAnalyticsItem * const item = MediaAnalyticsItem::create();
+ if (item->readFromParcel(data) < 0) {
+ return BAD_VALUE;
+ }
+ item->setPid(clientPid);
+ const status_t status __unused = submitInternal(item, true /* release */);
+ return NO_ERROR;
+ } break;
- MediaAnalyticsItem::SessionID_t sessionid = generateUniqueSessionID();
- reply->writeInt64(sessionid);
-
- return NO_ERROR;
- } break;
-
- case SUBMIT_ITEM: {
- CHECK_INTERFACE(IMediaAnalyticsService, data, reply);
-
- bool forcenew;
- MediaAnalyticsItem *item = MediaAnalyticsItem::create();
-
- data.readBool(&forcenew);
- item->readFromParcel(data);
-
- item->setPid(clientPid);
-
- // submit() takes over ownership of 'item'
- MediaAnalyticsItem::SessionID_t sessionid = submit(item, forcenew);
- reply->writeInt64(sessionid);
-
- return NO_ERROR;
- } break;
-
- default:
- return BBinder::onTransact(code, data, reply, flags);
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
}
}
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index 02c23b1..a4efa49 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -22,10 +22,11 @@
#include <string.h>
#include <sys/types.h>
+#include <mutex>
+
#include <binder/Parcel.h>
#include <utils/Errors.h>
#include <utils/Log.h>
-#include <utils/Mutex.h>
#include <utils/SortedVector.h>
#include <utils/threads.h>
@@ -44,14 +45,6 @@
// the service is off.
#define SVC_TRIES 2
-// the few universal keys we have
-const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyAny = "any";
-const MediaAnalyticsItem::Key MediaAnalyticsItem::kKeyNone = "none";
-
-const char * const MediaAnalyticsItem::EnabledProperty = "media.metrics.enabled";
-const char * const MediaAnalyticsItem::EnabledPropertyPersist = "persist.media.metrics.enabled";
-const int MediaAnalyticsItem::EnabledProperty_default = 1;
-
// So caller doesn't need to know size of allocated space
MediaAnalyticsItem *MediaAnalyticsItem::create()
{
@@ -64,32 +57,14 @@
return item;
}
-// access functions for the class
-MediaAnalyticsItem::MediaAnalyticsItem()
- : mPid(-1),
- mUid(-1),
- mPkgVersionCode(0),
- mSessionID(MediaAnalyticsItem::SessionIDNone),
- mTimestamp(0),
- mFinalized(1),
- mPropCount(0), mPropSize(0), mProps(NULL)
-{
- mKey = MediaAnalyticsItem::kKeyNone;
+MediaAnalyticsItem* MediaAnalyticsItem::convert(mediametrics_handle_t handle) {
+ MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ return item;
}
-MediaAnalyticsItem::MediaAnalyticsItem(MediaAnalyticsItem::Key key)
- : mPid(-1),
- mUid(-1),
- mPkgVersionCode(0),
- mSessionID(MediaAnalyticsItem::SessionIDNone),
- mTimestamp(0),
- mFinalized(1),
- mPropCount(0), mPropSize(0), mProps(NULL)
-{
- if (DEBUG_ALLOCATIONS) {
- ALOGD("Allocate MediaAnalyticsItem @ %p", this);
- }
- mKey = key;
+mediametrics_handle_t MediaAnalyticsItem::convert(MediaAnalyticsItem *item ) {
+ mediametrics_handle_t handle = (mediametrics_handle_t) item;
+ return handle;
}
MediaAnalyticsItem::~MediaAnalyticsItem() {
@@ -104,13 +79,10 @@
// clean allocated storage from key
mKey.clear();
- // clean various major parameters
- mSessionID = MediaAnalyticsItem::SessionIDNone;
-
// clean attributes
// contents of the attributes
for (size_t i = 0 ; i < mPropCount; i++ ) {
- clearProp(&mProps[i]);
+ mProps[i].clear();
}
// the attribute records themselves
if (mProps != NULL) {
@@ -133,14 +105,12 @@
dst->mUid = this->mUid;
dst->mPkgName = this->mPkgName;
dst->mPkgVersionCode = this->mPkgVersionCode;
- dst->mSessionID = this->mSessionID;
dst->mTimestamp = this->mTimestamp;
- dst->mFinalized = this->mFinalized;
// properties aka attributes
dst->growProps(this->mPropCount);
for(size_t i=0;i<mPropCount;i++) {
- copyProp(&dst->mProps[i], &this->mProps[i]);
+ dst->mProps[i] = this->mProps[i];
}
dst->mPropCount = this->mPropCount;
}
@@ -148,35 +118,6 @@
return dst;
}
-MediaAnalyticsItem &MediaAnalyticsItem::setSessionID(MediaAnalyticsItem::SessionID_t id) {
- mSessionID = id;
- return *this;
-}
-
-MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::getSessionID() const {
- return mSessionID;
-}
-
-MediaAnalyticsItem::SessionID_t MediaAnalyticsItem::generateSessionID() {
-
- if (mSessionID == SessionIDNone) {
- // get one from the server
- MediaAnalyticsItem::SessionID_t newid = SessionIDNone;
- sp<IMediaAnalyticsService> svc = getInstance();
- if (svc != NULL) {
- newid = svc->generateUniqueSessionID();
- }
- mSessionID = newid;
- }
-
- return mSessionID;
-}
-
-MediaAnalyticsItem &MediaAnalyticsItem::clearSessionID() {
- mSessionID = MediaAnalyticsItem::SessionIDNone;
- return *this;
-}
-
MediaAnalyticsItem &MediaAnalyticsItem::setTimestamp(nsecs_t ts) {
mTimestamp = ts;
return *this;
@@ -224,32 +165,22 @@
return *this;
}
-MediaAnalyticsItem::Key MediaAnalyticsItem::getKey() {
- return mKey;
-}
-
// number of attributes we have in this record
int32_t MediaAnalyticsItem::count() const {
return mPropCount;
}
// find the proper entry in the list
-size_t MediaAnalyticsItem::findPropIndex(const char *name, size_t len)
+size_t MediaAnalyticsItem::findPropIndex(const char *name, size_t len) const
{
size_t i = 0;
for (; i < mPropCount; i++) {
- Prop *prop = &mProps[i];
- if (prop->mNameLen != len) {
- continue;
- }
- if (memcmp(name, prop->mName, len) == 0) {
- break;
- }
+ if (mProps[i].isNamed(name, len)) break;
}
return i;
}
-MediaAnalyticsItem::Prop *MediaAnalyticsItem::findProp(const char *name) {
+MediaAnalyticsItem::Prop *MediaAnalyticsItem::findProp(const char *name) const {
size_t len = strlen(name);
size_t i = findPropIndex(name, len);
if (i < mPropCount) {
@@ -258,16 +189,6 @@
return NULL;
}
-void MediaAnalyticsItem::Prop::setName(const char *name, size_t len) {
- free((void *)mName);
- mName = (const char *) malloc(len+1);
- LOG_ALWAYS_FATAL_IF(mName == NULL,
- "failed malloc() for property '%s' (len %zu)",
- name, len);
- memcpy ((void *)mName, name, len+1);
- mNameLen = len;
-}
-
// consider this "find-or-allocate".
// caller validates type and uses clearPropValue() accordingly
MediaAnalyticsItem::Prop *MediaAnalyticsItem::allocateProp(const char *name) {
@@ -297,12 +218,10 @@
size_t len = strlen(name);
size_t i = findPropIndex(name, len);
if (i < mPropCount) {
- Prop *prop = &mProps[i];
- clearProp(prop);
+ mProps[i].clear();
if (i != mPropCount-1) {
// in the middle, bring last one down to fill gap
- copyProp(prop, &mProps[mPropCount-1]);
- clearProp(&mProps[mPropCount-1]);
+ mProps[i].swap(mProps[mPropCount-1]);
}
mPropCount--;
return true;
@@ -310,206 +229,6 @@
return false;
}
-// set the values
-void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
- Prop *prop = allocateProp(name);
- if (prop != NULL) {
- clearPropValue(prop);
- prop->mType = kTypeInt32;
- prop->u.int32Value = value;
- }
-}
-
-void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
- Prop *prop = allocateProp(name);
- if (prop != NULL) {
- clearPropValue(prop);
- prop->mType = kTypeInt64;
- prop->u.int64Value = value;
- }
-}
-
-void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
- Prop *prop = allocateProp(name);
- if (prop != NULL) {
- clearPropValue(prop);
- prop->mType = kTypeDouble;
- prop->u.doubleValue = value;
- }
-}
-
-void MediaAnalyticsItem::setCString(MediaAnalyticsItem::Attr name, const char *value) {
-
- Prop *prop = allocateProp(name);
- // any old value will be gone
- if (prop != NULL) {
- clearPropValue(prop);
- prop->mType = kTypeCString;
- prop->u.CStringValue = strdup(value);
- }
-}
-
-void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
- Prop *prop = allocateProp(name);
- if (prop != NULL) {
- clearPropValue(prop);
- prop->mType = kTypeRate;
- prop->u.rate.count = count;
- prop->u.rate.duration = duration;
- }
-}
-
-
-// find/add/set fused into a single operation
-void MediaAnalyticsItem::addInt32(MediaAnalyticsItem::Attr name, int32_t value) {
- Prop *prop = allocateProp(name);
- if (prop == NULL) {
- return;
- }
- switch (prop->mType) {
- case kTypeInt32:
- prop->u.int32Value += value;
- break;
- default:
- clearPropValue(prop);
- prop->mType = kTypeInt32;
- prop->u.int32Value = value;
- break;
- }
-}
-
-void MediaAnalyticsItem::addInt64(MediaAnalyticsItem::Attr name, int64_t value) {
- Prop *prop = allocateProp(name);
- if (prop == NULL) {
- return;
- }
- switch (prop->mType) {
- case kTypeInt64:
- prop->u.int64Value += value;
- break;
- default:
- clearPropValue(prop);
- prop->mType = kTypeInt64;
- prop->u.int64Value = value;
- break;
- }
-}
-
-void MediaAnalyticsItem::addRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
- Prop *prop = allocateProp(name);
- if (prop == NULL) {
- return;
- }
- switch (prop->mType) {
- case kTypeRate:
- prop->u.rate.count += count;
- prop->u.rate.duration += duration;
- break;
- default:
- clearPropValue(prop);
- prop->mType = kTypeRate;
- prop->u.rate.count = count;
- prop->u.rate.duration = duration;
- break;
- }
-}
-
-void MediaAnalyticsItem::addDouble(MediaAnalyticsItem::Attr name, double value) {
- Prop *prop = allocateProp(name);
- if (prop == NULL) {
- return;
- }
- switch (prop->mType) {
- case kTypeDouble:
- prop->u.doubleValue += value;
- break;
- default:
- clearPropValue(prop);
- prop->mType = kTypeDouble;
- prop->u.doubleValue = value;
- break;
- }
-}
-
-// find & extract values
-bool MediaAnalyticsItem::getInt32(MediaAnalyticsItem::Attr name, int32_t *value) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeInt32) {
- return false;
- }
- if (value != NULL) {
- *value = prop->u.int32Value;
- }
- return true;
-}
-
-bool MediaAnalyticsItem::getInt64(MediaAnalyticsItem::Attr name, int64_t *value) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeInt64) {
- return false;
- }
- if (value != NULL) {
- *value = prop->u.int64Value;
- }
- return true;
-}
-
-bool MediaAnalyticsItem::getRate(MediaAnalyticsItem::Attr name, int64_t *count, int64_t *duration, double *rate) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeRate) {
- return false;
- }
- if (count != NULL) {
- *count = prop->u.rate.count;
- }
- if (duration != NULL) {
- *duration = prop->u.rate.duration;
- }
- if (rate != NULL) {
- double r = 0.0;
- if (prop->u.rate.duration != 0) {
- r = prop->u.rate.count / (double) prop->u.rate.duration;
- }
- *rate = r;
- }
- return true;
-}
-
-bool MediaAnalyticsItem::getDouble(MediaAnalyticsItem::Attr name, double *value) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeDouble) {
- return false;
- }
- if (value != NULL) {
- *value = prop->u.doubleValue;
- }
- return true;
-}
-
-// caller responsible for the returned string
-bool MediaAnalyticsItem::getCString(MediaAnalyticsItem::Attr name, char **value) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeCString) {
- return false;
- }
- if (value != NULL) {
- *value = strdup(prop->u.CStringValue);
- }
- return true;
-}
-
-bool MediaAnalyticsItem::getString(MediaAnalyticsItem::Attr name, std::string *value) {
- Prop *prop = findProp(name);
- if (prop == NULL || prop->mType != kTypeCString) {
- return false;
- }
- if (value != NULL) {
- // std::string makes a copy for us
- *value = prop->u.CStringValue;
- }
- return true;
-}
-
// remove indicated keys and their values
// return value is # keys removed
int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
@@ -527,12 +246,12 @@
} else if (j+1 == mPropCount) {
// last one, shorten
zapped++;
- clearProp(&mProps[j]);
+ mProps[j].clear();
mPropCount--;
} else {
// in the middle, bring last one down and shorten
zapped++;
- clearProp(&mProps[j]);
+ mProps[j].clear();
mProps[j] = mProps[mPropCount-1];
mPropCount--;
}
@@ -550,13 +269,13 @@
for (ssize_t i = mPropCount-1 ; i >=0 ; i--) {
Prop *prop = &mProps[i];
for (ssize_t j = 0; j < n ; j++) {
- if (strcmp(prop->mName, attrs[j]) == 0) {
- clearProp(prop);
+ if (prop->isNamed(attrs[j])) {
+ prop->clear();
zapped++;
if (i != (ssize_t)(mPropCount-1)) {
*prop = mProps[mPropCount-1];
}
- initProp(&mProps[mPropCount-1]);
+ mProps[mPropCount-1].clear();
mPropCount--;
break;
}
@@ -571,63 +290,6 @@
return filter(1, &name);
}
-// handle individual items/properties stored within the class
-//
-
-void MediaAnalyticsItem::initProp(Prop *prop) {
- if (prop != NULL) {
- prop->mName = NULL;
- prop->mNameLen = 0;
-
- prop->mType = kTypeNone;
- }
-}
-
-void MediaAnalyticsItem::clearProp(Prop *prop)
-{
- if (prop != NULL) {
- if (prop->mName != NULL) {
- free((void *)prop->mName);
- prop->mName = NULL;
- prop->mNameLen = 0;
- }
-
- clearPropValue(prop);
- }
-}
-
-void MediaAnalyticsItem::clearPropValue(Prop *prop)
-{
- if (prop != NULL) {
- if (prop->mType == kTypeCString && prop->u.CStringValue != NULL) {
- free(prop->u.CStringValue);
- prop->u.CStringValue = NULL;
- }
- prop->mType = kTypeNone;
- }
-}
-
-void MediaAnalyticsItem::copyProp(Prop *dst, const Prop *src)
-{
- // get rid of any pointers in the dst
- clearProp(dst);
-
- *dst = *src;
-
- // fix any pointers that we blindly copied, so we have our own copies
- if (dst->mName) {
- void *p = malloc(dst->mNameLen + 1);
- LOG_ALWAYS_FATAL_IF(p == NULL,
- "failed malloc() duping property '%s' (len %zu)",
- dst->mName, dst->mNameLen);
- memcpy (p, src->mName, dst->mNameLen + 1);
- dst->mName = (const char *) p;
- }
- if (dst->mType == kTypeCString) {
- dst->u.CStringValue = strdup(src->u.CStringValue);
- }
-}
-
bool MediaAnalyticsItem::growProps(int increment)
{
if (increment <= 0) {
@@ -638,7 +300,7 @@
if (ni != NULL) {
for (int i = mPropSize; i < nsize; i++) {
- initProp(&ni[i]);
+ new (&ni[i]) Prop(); // placement new
}
mProps = ni;
mPropSize = nsize;
@@ -673,11 +335,8 @@
mUid = data.readInt32();
mPkgName = data.readCString();
mPkgVersionCode = data.readInt64();
- mSessionID = data.readInt64();
// We no longer pay attention to user setting of finalized, BUT it's
// still part of the wire packet -- so read & discard.
- mFinalized = data.readInt32();
- mFinalized = 1;
mTimestamp = data.readInt64();
int count = data.readInt32();
@@ -738,41 +397,14 @@
data->writeInt32(mUid);
data->writeCString(mPkgName.c_str());
data->writeInt64(mPkgVersionCode);
- data->writeInt64(mSessionID);
- data->writeInt32(mFinalized);
data->writeInt64(mTimestamp);
// set of items
- int count = mPropCount;
+ const size_t count = mPropCount;
data->writeInt32(count);
- for (int i = 0 ; i < count; i++ ) {
- Prop *prop = &mProps[i];
- data->writeCString(prop->mName);
- data->writeInt32(prop->mType);
- switch (prop->mType) {
- case MediaAnalyticsItem::kTypeInt32:
- data->writeInt32(prop->u.int32Value);
- break;
- case MediaAnalyticsItem::kTypeInt64:
- data->writeInt64(prop->u.int64Value);
- break;
- case MediaAnalyticsItem::kTypeDouble:
- data->writeDouble(prop->u.doubleValue);
- break;
- case MediaAnalyticsItem::kTypeRate:
- data->writeInt64(prop->u.rate.count);
- data->writeInt64(prop->u.rate.duration);
- break;
- case MediaAnalyticsItem::kTypeCString:
- data->writeCString(prop->u.CStringValue);
- break;
- default:
- ALOGE("found bad Prop type: %d, idx %d, name %s",
- prop->mType, i, prop->mName);
- break;
- }
+ for (size_t i = 0 ; i < count; i++ ) {
+ mProps[i].writeToParcel(data);
}
-
return 0;
}
@@ -785,11 +417,11 @@
return strdup(val.c_str());
}
-std::string MediaAnalyticsItem::toString() {
+std::string MediaAnalyticsItem::toString() const {
return toString(PROTO_LAST);
}
-std::string MediaAnalyticsItem::toString(int version) {
+std::string MediaAnalyticsItem::toString(int version) const {
// v0 : released with 'o'
// v1 : bug fix (missing pid/finalized separator),
@@ -815,9 +447,7 @@
// same order as we spill into the parcel, although not required
// key+session are our primary matching criteria
result.append(mKey.c_str());
- result.append(":");
- snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mSessionID);
- result.append(buffer);
+ result.append(":0:"); // sessionID
snprintf(buffer, sizeof(buffer), "%d:", mUid);
result.append(buffer);
@@ -836,7 +466,7 @@
}
result.append(buffer);
- snprintf(buffer, sizeof(buffer), "%d:", mFinalized);
+ snprintf(buffer, sizeof(buffer), "%d:", 0 /* finalized */); // TODO: remove this.
result.append(buffer);
snprintf(buffer, sizeof(buffer), "%" PRId64 ":", mTimestamp);
result.append(buffer);
@@ -846,39 +476,8 @@
snprintf(buffer, sizeof(buffer), "%d:", count);
result.append(buffer);
for (int i = 0 ; i < count; i++ ) {
- Prop *prop = &mProps[i];
- switch (prop->mType) {
- case MediaAnalyticsItem::kTypeInt32:
- snprintf(buffer,sizeof(buffer),
- "%s=%d:", prop->mName, prop->u.int32Value);
- break;
- case MediaAnalyticsItem::kTypeInt64:
- snprintf(buffer,sizeof(buffer),
- "%s=%" PRId64 ":", prop->mName, prop->u.int64Value);
- break;
- case MediaAnalyticsItem::kTypeDouble:
- snprintf(buffer,sizeof(buffer),
- "%s=%e:", prop->mName, prop->u.doubleValue);
- break;
- case MediaAnalyticsItem::kTypeRate:
- snprintf(buffer,sizeof(buffer),
- "%s=%" PRId64 "/%" PRId64 ":", prop->mName,
- prop->u.rate.count, prop->u.rate.duration);
- break;
- case MediaAnalyticsItem::kTypeCString:
- snprintf(buffer,sizeof(buffer), "%s=", prop->mName);
- result.append(buffer);
- // XXX: sanitize string for ':' '='
- result.append(prop->u.CStringValue);
- buffer[0] = ':';
- buffer[1] = '\0';
- break;
- default:
- ALOGE("to_String bad item type: %d for %s",
- prop->mType, prop->mName);
- break;
- }
- result.append(buffer);
+ mProps[i].toString(buffer, sizeof(buffer));
+ result.append(buffer);
}
if (version == PROTO_V0) {
@@ -893,23 +492,12 @@
// for the lazy, we offer methods that finds the service and
// calls the appropriate daemon
bool MediaAnalyticsItem::selfrecord() {
- return selfrecord(false);
-}
-
-bool MediaAnalyticsItem::selfrecord(bool forcenew) {
-
- if (DEBUG_API) {
- std::string p = this->toString();
- ALOGD("selfrecord of: %s [forcenew=%d]", p.c_str(), forcenew);
- }
-
+ ALOGD_IF(DEBUG_API, "%s: delivering %s", __func__, this->toString().c_str());
sp<IMediaAnalyticsService> svc = getInstance();
-
if (svc != NULL) {
- MediaAnalyticsItem::SessionID_t newid = svc->submit(this, forcenew);
- if (newid == SessionIDInvalid) {
- std::string p = this->toString();
- ALOGW("Failed to record: %s [forcenew=%d]", p.c_str(), forcenew);
+ status_t status = svc->submit(this);
+ if (status != NO_ERROR) {
+ ALOGW("%s: failed to record: %s", __func__, this->toString().c_str());
return false;
}
return true;
@@ -918,29 +506,33 @@
}
}
-// get a connection we can reuse for most of our lifetime
-// static
-sp<IMediaAnalyticsService> MediaAnalyticsItem::sAnalyticsService;
-static Mutex sInitMutex;
-static int remainingBindAttempts = SVC_TRIES;
//static
bool MediaAnalyticsItem::isEnabled() {
- int enabled = property_get_int32(MediaAnalyticsItem::EnabledProperty, -1);
+ // completely skip logging from certain UIDs. We do this here
+ // to avoid the multi-second timeouts while we learn that
+ // sepolicy will not let us find the service.
+ // We do this only for a select set of UIDs
+ // The sepolicy protection is still in place, we just want a faster
+ // response from this specific, small set of uids.
+ // This is checked only once in the lifetime of the process.
+ const uid_t uid = getuid();
+ switch (uid) {
+ case AID_RADIO: // telephony subsystem, RIL
+ return false;
+ }
+
+ int enabled = property_get_int32(MediaAnalyticsItem::EnabledProperty, -1);
if (enabled == -1) {
enabled = property_get_int32(MediaAnalyticsItem::EnabledPropertyPersist, -1);
}
if (enabled == -1) {
enabled = MediaAnalyticsItem::EnabledProperty_default;
}
- if (enabled <= 0) {
- return false;
- }
- return true;
+ return enabled > 0;
}
-
// monitor health of our connection to the metrics service
class MediaMetricsDeathNotifier : public IBinder::DeathRecipient {
virtual void binderDied(const wp<IBinder> &) {
@@ -949,83 +541,56 @@
}
};
-static sp<MediaMetricsDeathNotifier> sNotifier = NULL;
+static sp<MediaMetricsDeathNotifier> sNotifier;
+// static
+sp<IMediaAnalyticsService> MediaAnalyticsItem::sAnalyticsService;
+static std::mutex sServiceMutex;
+static int sRemainingBindAttempts = SVC_TRIES;
// static
void MediaAnalyticsItem::dropInstance() {
- Mutex::Autolock _l(sInitMutex);
- remainingBindAttempts = SVC_TRIES;
- sAnalyticsService = NULL;
+ std::lock_guard _l(sServiceMutex);
+ sRemainingBindAttempts = SVC_TRIES;
+ sAnalyticsService = nullptr;
}
//static
sp<IMediaAnalyticsService> MediaAnalyticsItem::getInstance() {
-
static const char *servicename = "media.metrics";
- int enabled = isEnabled();
+ static const bool enabled = isEnabled(); // singleton initialized
if (enabled == false) {
- if (DEBUG_SERVICEACCESS) {
- ALOGD("disabled");
- }
- return NULL;
+ ALOGD_IF(DEBUG_SERVICEACCESS, "disabled");
+ return nullptr;
}
-
- // completely skip logging from certain UIDs. We do this here
- // to avoid the multi-second timeouts while we learn that
- // sepolicy will not let us find the service.
- // We do this only for a select set of UIDs
- // The sepolicy protection is still in place, we just want a faster
- // response from this specific, small set of uids.
- {
- uid_t uid = getuid();
- switch (uid) {
- case AID_RADIO: // telephony subsystem, RIL
- return NULL;
- break;
- default:
- // let sepolicy deny access if appropriate
- break;
- }
- }
-
- {
- Mutex::Autolock _l(sInitMutex);
+ std::lock_guard _l(sServiceMutex);
+ // think of remainingBindAttempts as telling us whether service == nullptr because
+ // (1) we haven't tried to initialize it yet
+ // (2) we've tried to initialize it, but failed.
+ if (sAnalyticsService == nullptr && sRemainingBindAttempts > 0) {
const char *badness = "";
-
- // think of remainingBindAttempts as telling us whether service==NULL because
- // (1) we haven't tried to initialize it yet
- // (2) we've tried to initialize it, but failed.
- if (sAnalyticsService == NULL && remainingBindAttempts > 0) {
- sp<IServiceManager> sm = defaultServiceManager();
- if (sm != NULL) {
- sp<IBinder> binder = sm->getService(String16(servicename));
- if (binder != NULL) {
- sAnalyticsService = interface_cast<IMediaAnalyticsService>(binder);
- if (sNotifier != NULL) {
- sNotifier = NULL;
- }
- sNotifier = new MediaMetricsDeathNotifier();
- binder->linkToDeath(sNotifier);
- } else {
- badness = "did not find service";
- }
+ sp<IServiceManager> sm = defaultServiceManager();
+ if (sm != nullptr) {
+ sp<IBinder> binder = sm->getService(String16(servicename));
+ if (binder != nullptr) {
+ sAnalyticsService = interface_cast<IMediaAnalyticsService>(binder);
+ sNotifier = new MediaMetricsDeathNotifier();
+ binder->linkToDeath(sNotifier);
} else {
- badness = "No Service Manager access";
+ badness = "did not find service";
}
-
- if (sAnalyticsService == NULL) {
- if (remainingBindAttempts > 0) {
- remainingBindAttempts--;
- }
- if (DEBUG_SERVICEACCESS) {
- ALOGD("Unable to bind to service %s: %s", servicename, badness);
- }
- }
+ } else {
+ badness = "No Service Manager access";
}
-
- return sAnalyticsService;
+ if (sAnalyticsService == nullptr) {
+ if (sRemainingBindAttempts > 0) {
+ sRemainingBindAttempts--;
+ }
+ ALOGD_IF(DEBUG_SERVICEACCESS, "%s: unable to bind to service %s: %s",
+ __func__, servicename, badness);
+ }
}
+ return sAnalyticsService;
}
// merge the info from 'incoming' into this record.
@@ -1036,8 +601,6 @@
// 'this' should never be missing both of them...
if (mKey.empty()) {
mKey = incoming->mKey;
- } else if (mSessionID == 0) {
- mSessionID = incoming->mSessionID;
}
// for each attribute from 'incoming', resolve appropriately
@@ -1058,12 +621,12 @@
// no oprop, so we insert the new one
oprop = allocateProp(p);
if (oprop != NULL) {
- copyProp(oprop, iprop);
+ *oprop = *iprop;
} else {
ALOGW("dropped property '%s'", iprop->mName);
}
} else {
- copyProp(oprop, iprop);
+ *oprop = *iprop;
}
}
@@ -1236,5 +799,58 @@
return false;
}
+void MediaAnalyticsItem::Prop::writeToParcel(Parcel *data) const
+{
+ data->writeCString(mName);
+ data->writeInt32(mType);
+ switch (mType) {
+ case kTypeInt32:
+ data->writeInt32(u.int32Value);
+ break;
+ case kTypeInt64:
+ data->writeInt64(u.int64Value);
+ break;
+ case kTypeDouble:
+ data->writeDouble(u.doubleValue);
+ break;
+ case kTypeRate:
+ data->writeInt64(u.rate.count);
+ data->writeInt64(u.rate.duration);
+ break;
+ case kTypeCString:
+ data->writeCString(u.CStringValue);
+ break;
+ default:
+ ALOGE("%s: found bad type: %d, name %s", __func__, mType, mName);
+ break;
+ }
+}
+
+void MediaAnalyticsItem::Prop::toString(char *buffer, size_t length) const {
+ switch (mType) {
+ case kTypeInt32:
+ snprintf(buffer, length, "%s=%d:", mName, u.int32Value);
+ break;
+ case MediaAnalyticsItem::kTypeInt64:
+ snprintf(buffer, length, "%s=%lld:", mName, (long long)u.int64Value);
+ break;
+ case MediaAnalyticsItem::kTypeDouble:
+ snprintf(buffer, length, "%s=%e:", mName, u.doubleValue);
+ break;
+ case MediaAnalyticsItem::kTypeRate:
+ snprintf(buffer, length, "%s=%lld/%lld:",
+ mName, (long long)u.rate.count, (long long)u.rate.duration);
+ break;
+ case MediaAnalyticsItem::kTypeCString:
+ // TODO sanitize string for ':' '='
+ snprintf(buffer, length, "%s=%s:", mName, u.CStringValue);
+ break;
+ default:
+ ALOGE("%s: bad item type: %d for %s", __func__, mType, mName);
+ if (length > 0) buffer[0] = 0;
+ break;
+ }
+}
+
} // namespace android
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
index 6109190..360ae0c 100644
--- a/media/libmediametrics/MediaMetrics.cpp
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -169,6 +169,11 @@
return item->selfrecord();
}
+mediametrics_handle_t mediametrics_dup(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return android::MediaAnalyticsItem::convert(item);
+ return android::MediaAnalyticsItem::convert(item->dup());
+}
const char *mediametrics_readable(mediametrics_handle_t handle) {
android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
diff --git a/media/libmediametrics/include/IMediaAnalyticsService.h b/media/libmediametrics/include/IMediaAnalyticsService.h
index f635e94..1453b5f 100644
--- a/media/libmediametrics/include/IMediaAnalyticsService.h
+++ b/media/libmediametrics/include/IMediaAnalyticsService.h
@@ -39,19 +39,16 @@
public:
DECLARE_META_INTERFACE(MediaAnalyticsService);
- // generate a unique sessionID to use across multiple requests
- // 'unique' is within this device, since last reboot
- virtual MediaAnalyticsItem::SessionID_t generateUniqueSessionID() = 0;
-
- // submit the indicated record to the mediaanalytics service, where
- // it will be merged (if appropriate) with incomplete records that
- // share the same key and sessionid.
- // 'forcenew' marks any matching incomplete record as complete before
- // inserting this new record.
- // returns the sessionID associated with that item.
- // caller continues to own the passed item
- virtual MediaAnalyticsItem::SessionID_t submit(MediaAnalyticsItem *item, bool forcenew) = 0;
-
+ /**
+ * Submits the indicated record to the mediaanalytics service, where
+ * it will be merged (if appropriate) with incomplete records that
+ * share the same key and sessionID.
+ *
+ * \param item the item to submit.
+ * \return status which is negative if an error is detected (some errors
+ may be silent and return 0 - success).
+ */
+ virtual status_t submit(MediaAnalyticsItem *item) = 0;
};
// ----------------------------------------------------------------------------
@@ -59,10 +56,15 @@
class BnMediaAnalyticsService: public BnInterface<IMediaAnalyticsService>
{
public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
+ status_t onTransact(uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0) override;
+
+protected:
+ // Internal call where release is true if the service is to delete the item.
+ virtual status_t submitInternal(
+ MediaAnalyticsItem *item, bool release) = 0;
};
}; // namespace android
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 4a36f6a..f0deaaf 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_MEDIA_MEDIAANALYTICSITEM_H
#define ANDROID_MEDIA_MEDIAANALYTICSITEM_H
+#include "MediaMetrics.h"
+
#include <string>
#include <sys/types.h>
@@ -36,14 +38,10 @@
//
class MediaAnalyticsItem {
+ friend class MediaMetricsJNI; // TODO: remove this access
+ friend class MediaMetricsDeathNotifier; // for dropInstance
- friend class MediaAnalyticsService;
- friend class IMediaAnalyticsService;
- friend class MediaMetricsJNI;
- friend class MetricsSummarizer;
- friend class MediaMetricsDeathNotifier;
-
- public:
+public:
enum Type {
kTypeNone = 0,
@@ -54,20 +52,14 @@
kTypeRate = 5,
};
- // sessionid
- // unique within device, within boot,
- typedef int64_t SessionID_t;
- static constexpr SessionID_t SessionIDInvalid = -1;
- static constexpr SessionID_t SessionIDNone = 0;
-
- // Key: the record descriminator
- // values for the record discriminator
- // values can be "component/component"
- // basic values: "video", "audio", "drm"
- // XXX: need to better define the format
- typedef std::string Key;
- static const Key kKeyNone; // ""
- static const Key kKeyAny; // "*"
+ // Key: the record descriminator
+ // values for the record discriminator
+ // values can be "component/component"
+ // basic values: "video", "audio", "drm"
+ // XXX: need to better define the format
+ using Key = std::string;
+ static constexpr const char * const kKeyNone = "none";
+ static constexpr const char * const kKeyAny = "any";
// Attr: names for attributes within a record
// format "prop1" or "prop/subprop"
@@ -82,31 +74,22 @@
PROTO_LAST = PROTO_V1,
};
- private:
- // use the ::create() method instead
- MediaAnalyticsItem();
- MediaAnalyticsItem(Key);
- MediaAnalyticsItem(const MediaAnalyticsItem&);
- MediaAnalyticsItem &operator=(const MediaAnalyticsItem&);
-
- public:
+ // T must be convertible to mKey
+ template <typename T>
+ explicit MediaAnalyticsItem(T key)
+ : mKey(key) { }
+ MediaAnalyticsItem(const MediaAnalyticsItem&) = delete;
+ MediaAnalyticsItem &operator=(const MediaAnalyticsItem&) = delete;
static MediaAnalyticsItem* create(Key key);
static MediaAnalyticsItem* create();
+ static MediaAnalyticsItem* convert(mediametrics_handle_t);
+ static mediametrics_handle_t convert(MediaAnalyticsItem *);
+
// access functions for the class
~MediaAnalyticsItem();
- // SessionID ties multiple submissions for same key together
- // so that if video "height" and "width" are known at one point
- // and "framerate" is only known later, they can be be brought
- // together.
- MediaAnalyticsItem &setSessionID(SessionID_t);
- MediaAnalyticsItem &clearSessionID();
- SessionID_t getSessionID() const;
- // generates and stores a new ID iff mSessionID == SessionIDNone
- SessionID_t generateSessionID();
-
// reset all contents, discarding any extra data
void clear();
MediaAnalyticsItem *dup();
@@ -114,40 +97,96 @@
// set the key discriminator for the record.
// most often initialized as part of the constructor
MediaAnalyticsItem &setKey(MediaAnalyticsItem::Key);
- MediaAnalyticsItem::Key getKey();
+ const MediaAnalyticsItem::Key& getKey() const { return mKey; }
// # of attributes in the record
int32_t count() const;
- // set values appropriately
- void setInt32(Attr, int32_t value);
- void setInt64(Attr, int64_t value);
- void setDouble(Attr, double value);
- void setRate(Attr, int64_t count, int64_t duration);
- void setCString(Attr, const char *value);
+ template<typename S, typename T>
+ MediaAnalyticsItem &set(S key, T value) {
+ allocateProp(key)->set(value);
+ return *this;
+ }
- // fused get/add/set; if attr wasn't there, it's a simple set.
- // type-mismatch counts as "wasn't there".
- void addInt32(Attr, int32_t value);
- void addInt64(Attr, int64_t value);
- void addDouble(Attr, double value);
- void addRate(Attr, int64_t count, int64_t duration);
+ // set values appropriately
+ MediaAnalyticsItem &setInt32(Attr key, int32_t value) {
+ return set(key, value);
+ }
+ MediaAnalyticsItem &setInt64(Attr key, int64_t value) {
+ return set(key, value);
+ }
+ MediaAnalyticsItem &setDouble(Attr key, double value) {
+ return set(key, value);
+ }
+ MediaAnalyticsItem &setRate(Attr key, int64_t count, int64_t duration) {
+ return set(key, std::make_pair(count, duration));
+ }
+ MediaAnalyticsItem &setCString(Attr key, const char *value) {
+ return set(key, value);
+ }
- // find & extract values
- // return indicates whether attr exists (and thus value filled in)
- // NULL parameter value suppresses storage of value.
- bool getInt32(Attr, int32_t *value);
- bool getInt64(Attr, int64_t *value);
- bool getDouble(Attr, double *value);
- bool getRate(Attr, int64_t *count, int64_t *duration, double *rate);
- // Caller owns the returned string
- bool getCString(Attr, char **value);
- bool getString(Attr, std::string *value);
+ // fused get/add/set; if attr wasn't there, it's a simple set.
+ // type-mismatch counts as "wasn't there".
+ template<typename S, typename T>
+ MediaAnalyticsItem &add(S key, T value) {
+ allocateProp(key)->add(value);
+ return *this;
+ }
- // parameter indicates whether to close any existing open
- // record with same key before establishing a new record
- // caller retains ownership of 'this'.
- bool selfrecord(bool);
+ MediaAnalyticsItem &addInt32(Attr key, int32_t value) {
+ return add(key, value);
+ }
+ MediaAnalyticsItem &addInt64(Attr key, int64_t value) {
+ return add(key, value);
+ }
+ MediaAnalyticsItem &addDouble(Attr key, double value) {
+ return add(key, value);
+ }
+ MediaAnalyticsItem &addRate(Attr key, int64_t count, int64_t duration) {
+ return add(key, std::make_pair(count, duration));
+ }
+
+ // find & extract values
+ // return indicates whether attr exists (and thus value filled in)
+ // NULL parameter value suppresses storage of value.
+ template<typename S, typename T>
+ bool get(S key, T *value) const {
+ Prop *prop = findProp(key);
+ return prop != nullptr && prop->get(value);
+ }
+
+ bool getInt32(Attr key, int32_t *value) const {
+ return get(key, value);
+ }
+ bool getInt64(Attr key, int64_t *value) const {
+ return get(key, value);
+ }
+ bool getDouble(Attr key, double *value) const {
+ return get(key, value);
+ }
+ bool getRate(Attr key, int64_t *count, int64_t *duration, double *rate) const {
+ std::pair<int64_t, int64_t> value;
+ if (!get(key, &value)) return false;
+ if (count != nullptr) *count = value.first;
+ if (duration != nullptr) *duration = value.second;
+ if (rate != nullptr) {
+ if (value.second != 0) {
+ *rate = (double)value.first / value.second; // TODO: isn't INF OK?
+ } else {
+ *rate = 0.;
+ }
+ }
+ return true;
+ }
+ // Caller owns the returned string
+ bool getCString(Attr key, char **value) const {
+ return get(key, value);
+ }
+ bool getString(Attr key, std::string *value) const {
+ return get(key, value);
+ }
+
+ // Deliver the item to MediaMetrics
bool selfrecord();
// remove indicated attributes and their values
@@ -186,8 +225,8 @@
// supports the stable interface
bool dumpAttributes(char **pbuffer, size_t *plength);
- std::string toString();
- std::string toString(int version);
+ std::string toString() const;
+ std::string toString(int version) const;
const char *toCString();
const char *toCString(int version);
@@ -207,64 +246,261 @@
// caller continues to own 'incoming'
bool merge(MediaAnalyticsItem *incoming);
- // enabled 1, disabled 0
- static const char * const EnabledProperty;
- static const char * const EnabledPropertyPersist;
- static const int EnabledProperty_default;
+ // enabled 1, disabled 0
+ static constexpr const char * const EnabledProperty = "media.metrics.enabled";
+ static constexpr const char * const EnabledPropertyPersist = "persist.media.metrics.enabled";
+ static const int EnabledProperty_default = 1;
private:
- // to help validate that A doesn't mess with B's records
- pid_t mPid;
- uid_t mUid;
- std::string mPkgName;
- int64_t mPkgVersionCode;
+ // let's reuse a binder connection
+ static sp<IMediaAnalyticsService> sAnalyticsService;
+ static sp<IMediaAnalyticsService> getInstance();
+ static void dropInstance();
- // let's reuse a binder connection
- static sp<IMediaAnalyticsService> sAnalyticsService;
- static sp<IMediaAnalyticsService> getInstance();
- static void dropInstance();
+ class Prop {
+ friend class MediaMetricsJNI; // TODO: remove this access
+ public:
+ Prop() = default;
+ Prop(const Prop& other) {
+ *this = other;
+ }
+ Prop& operator=(const Prop& other) {
+ if (other.mName != nullptr) {
+ mName = strdup(other.mName);
+ } else {
+ mName = nullptr;
+ }
+ mNameLen = other.mNameLen;
+ mType = other.mType;
+ switch (mType) {
+ case kTypeInt32:
+ u.int32Value = other.u.int32Value;
+ break;
+ case kTypeInt64:
+ u.int64Value = other.u.int64Value;
+ break;
+ case kTypeDouble:
+ u.doubleValue = other.u.doubleValue;
+ break;
+ case kTypeCString:
+ u.CStringValue = strdup(other.u.CStringValue);
+ break;
+ case kTypeRate:
+ u.rate = {other.u.rate.count, other.u.rate.duration};
+ break;
+ case kTypeNone:
+ break;
+ default:
+ // abort?
+ break;
+ }
+ return *this;
+ }
- // tracking information
- SessionID_t mSessionID; // grouping similar records
- nsecs_t mTimestamp; // ns, system_time_monotonic
+ void clear() {
+ free(mName);
+ mName = nullptr;
+ mNameLen = 0;
+ clearValue();
+ }
+ void clearValue() {
+ if (mType == kTypeCString) {
+ free(u.CStringValue);
+ u.CStringValue = nullptr;
+ }
+ mType = kTypeNone;
+ }
- // will this record accept further updates
- bool mFinalized;
+ Type getType() const {
+ return mType;
+ }
- Key mKey;
+ const char *getName() const {
+ return mName;
+ }
- struct Prop {
+ void swap(Prop& other) {
+ std::swap(mName, other.mName);
+ std::swap(mNameLen, other.mNameLen);
+ std::swap(mType, other.mType);
+ std::swap(u, other.u);
+ }
- Type mType;
- const char *mName;
- size_t mNameLen; // the strlen(), doesn't include the null
- union {
- int32_t int32Value;
- int64_t int64Value;
- double doubleValue;
- char *CStringValue;
- struct { int64_t count, duration; } rate;
- } u;
- void setName(const char *name, size_t len);
- };
+ void setName(const char *name, size_t len) {
+ free(mName);
+ if (name != nullptr) {
+ mName = (char *)malloc(len + 1);
+ mNameLen = len;
+ strncpy(mName, name, len);
+ mName[len] = 0;
+ } else {
+ mName = nullptr;
+ mNameLen = 0;
+ }
+ }
- void initProp(Prop *item);
- void clearProp(Prop *item);
- void clearPropValue(Prop *item);
- void copyProp(Prop *dst, const Prop *src);
+ bool isNamed(const char *name, size_t len) const {
+ return len == mNameLen && memcmp(name, mName, len) == 0;
+ }
+
+ // TODO: remove duplicate but different definition
+ bool isNamed(const char *name) const {
+ return strcmp(name, mName) == 0;
+ }
+
+ template <typename T> bool get(T *value) const = delete;
+ template <>
+ bool get(int32_t *value) const {
+ if (mType != kTypeInt32) return false;
+ if (value != nullptr) *value = u.int32Value;
+ return true;
+ }
+ template <>
+ bool get(int64_t *value) const {
+ if (mType != kTypeInt64) return false;
+ if (value != nullptr) *value = u.int64Value;
+ return true;
+ }
+ template <>
+ bool get(double *value) const {
+ if (mType != kTypeDouble) return false;
+ if (value != nullptr) *value = u.doubleValue;
+ return true;
+ }
+ template <>
+ bool get(char** value) const {
+ if (mType != kTypeCString) return false;
+ if (value != nullptr) *value = strdup(u.CStringValue);
+ return true;
+ }
+ template <>
+ bool get(std::string* value) const {
+ if (mType != kTypeCString) return false;
+ if (value != nullptr) *value = u.CStringValue;
+ return true;
+ }
+ template <>
+ bool get(std::pair<int64_t, int64_t> *value) const {
+ if (mType != kTypeRate) return false;
+ if (value != nullptr) {
+ value->first = u.rate.count;
+ value->second = u.rate.duration;
+ }
+ return true;
+ }
+
+ template <typename T> void set(const T& value) = delete;
+ template <>
+ void set(const int32_t& value) {
+ mType = kTypeInt32;
+ u.int32Value = value;
+ }
+ template <>
+ void set(const int64_t& value) {
+ mType = kTypeInt64;
+ u.int64Value = value;
+ }
+ template <>
+ void set(const double& value) {
+ mType = kTypeDouble;
+ u.doubleValue = value;
+ }
+ template <>
+ void set(const char* const& value) {
+ if (mType == kTypeCString) {
+ free(u.CStringValue);
+ } else {
+ mType = kTypeCString;
+ }
+ if (value == nullptr) {
+ u.CStringValue = nullptr;
+ } else {
+ u.CStringValue = strdup(value);
+ }
+ }
+ template <>
+ void set(const std::pair<int64_t, int64_t> &value) {
+ mType = kTypeRate;
+ u.rate = {value.first, value.second};
+ }
+
+ template <typename T> void add(const T& value) = delete;
+ template <>
+ void add(const int32_t& value) {
+ if (mType == kTypeInt32) {
+ u.int32Value += value;
+ } else {
+ mType = kTypeInt32;
+ u.int32Value = value;
+ }
+ }
+ template <>
+ void add(const int64_t& value) {
+ if (mType == kTypeInt64) {
+ u.int64Value += value;
+ } else {
+ mType = kTypeInt64;
+ u.int64Value = value;
+ }
+ }
+ template <>
+ void add(const double& value) {
+ if (mType == kTypeDouble) {
+ u.doubleValue += value;
+ } else {
+ mType = kTypeDouble;
+ u.doubleValue = value;
+ }
+ }
+ template <>
+ void add(const std::pair<int64_t, int64_t>& value) {
+ if (mType == kTypeRate) {
+ u.rate.count += value.first;
+ u.rate.duration += value.second;
+ } else {
+ mType = kTypeRate;
+ u.rate = {value.first, value.second};
+ }
+ }
+
+ void writeToParcel(Parcel *data) const;
+ void toString(char *buffer, size_t length) const;
+
+ // TODO: make private
+ // private:
+ char *mName = nullptr;
+ size_t mNameLen = 0; // the strlen(), doesn't include the null
+ Type mType = kTypeNone;
+ union {
+ int32_t int32Value;
+ int64_t int64Value;
+ double doubleValue;
+ char *CStringValue;
+ struct { int64_t count, duration; } rate;
+ } u;
+ };
+
+ size_t findPropIndex(const char *name, size_t len) const;
+ Prop *findProp(const char *name) const;
+
enum {
kGrowProps = 10
};
bool growProps(int increment = kGrowProps);
- size_t findPropIndex(const char *name, size_t len);
- Prop *findProp(const char *name);
Prop *allocateProp(const char *name);
bool removeProp(const char *name);
- size_t mPropCount;
- size_t mPropSize;
- Prop *mProps;
+ size_t mPropCount = 0;
+ size_t mPropSize = 0;
+ Prop *mProps = nullptr;
+
+ pid_t mPid = -1;
+ uid_t mUid = -1;
+ std::string mPkgName;
+ int64_t mPkgVersionCode = 0;
+ Key mKey{kKeyNone};
+ nsecs_t mTimestamp = 0;
};
} // namespace android
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
index a4e1ed2..29fb241 100644
--- a/media/libmediametrics/include/MediaMetrics.h
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -79,6 +79,7 @@
// # of attributes set within this record.
int32_t mediametrics_count(mediametrics_handle_t handle);
+mediametrics_handle_t mediametrics_dup(mediametrics_handle_t handle);
bool mediametrics_selfRecord(mediametrics_handle_t handle);
const char *mediametrics_readable(mediametrics_handle_t handle);
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
deleted file mode 100644
index dca6bb6..0000000
--- a/media/libmediaplayer2/Android.bp
+++ /dev/null
@@ -1,129 +0,0 @@
-cc_library_headers {
- name: "libmediaplayer2_headers",
- vendor_available: true,
- export_include_dirs: ["include"],
-}
-
-cc_library_static {
- name: "libmediaplayer2",
-
- srcs: [
- "MediaPlayer2AudioOutput.cpp",
- "mediaplayer2.cpp",
- ],
-
- shared_libs: [
- "libandroid_runtime",
- "libaudioclient",
- "libbinder",
- "libbinder_ndk",
- "libcutils",
- "libgui",
- "liblog",
- "libmedia_omx",
- "libui",
- "libutils",
-
- "libcrypto",
- "libmediametrics",
- "libmediandk",
- "libmediandk_utils",
- "libmediautils",
- "libmemunreachable",
- "libnativewindow",
- "libpowermanager",
- "libstagefright_httplive",
- ],
-
- export_shared_lib_headers: [
- "libaudioclient",
- "libbinder",
- "libgui",
- "libmedia_omx",
- ],
-
- header_libs: [
- "media_plugin_headers",
- ],
-
- include_dirs: [
- "frameworks/base/core/jni",
- ],
-
- static_libs: [
- "libmedia_helper",
- "libmediaplayer2-protos",
- "libmedia_player2_util",
- "libprotobuf-cpp-lite",
- "libstagefright_foundation_without_imemory",
- "libstagefright_nuplayer2",
- "libstagefright_player2",
- "libstagefright_rtsp",
- "libstagefright_timedtext2",
- "libmedia2_jni_core",
- ],
-
- export_include_dirs: [
- "include",
- ],
-
- cflags: [
- "-Werror",
- "-Wno-error=deprecated-declarations",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "unsigned-integer-overflow",
- "signed-integer-overflow",
- ],
- cfi: true,
- },
-}
-
-cc_library {
- name: "libmedia2_jni_core",
-
- srcs: [
- "JavaVMHelper.cpp",
- "JAudioTrack.cpp",
- "JMedia2HTTPService.cpp",
- "JMedia2HTTPConnection.cpp",
- ],
-
- header_libs: [
- "libbinder_headers",
- "libnativehelper_header_only",
- ],
-
- shared_libs: [
- "liblog",
- "libutils",
- "libdl",
- ],
-
- include_dirs: [
- "frameworks/av/media/libmedia/include",
- "frameworks/base/core/jni",
- ],
-
- export_include_dirs: [
- "include",
- ],
-
- cflags: [
- "-Werror",
- "-Wno-error=deprecated-declarations",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "unsigned-integer-overflow",
- "signed-integer-overflow",
- ],
- cfi: true,
- },
-
-}
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
deleted file mode 100644
index fab6c64..0000000
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "JAudioTrack"
-
-#include "media/JAudioAttributes.h"
-#include "media/JAudioFormat.h"
-#include "mediaplayer2/JAudioTrack.h"
-
-#include <android_media_AudioErrors.h>
-#include <mediaplayer2/JavaVMHelper.h>
-
-namespace android {
-
-// TODO: Store Java class/methodID as a member variable in the class.
-// TODO: Add NULL && Exception checks after every JNI call.
-JAudioTrack::JAudioTrack( // < Usages of the arguments are below >
- uint32_t sampleRate, // AudioFormat && bufferSizeInBytes
- audio_format_t format, // AudioFormat && bufferSizeInBytes
- audio_channel_mask_t channelMask, // AudioFormat && bufferSizeInBytes
- callback_t cbf, // Offload
- void* user, // Offload
- size_t frameCount, // bufferSizeInBytes
- int32_t sessionId, // AudioTrack
- const jobject attributes, // AudioAttributes
- float maxRequiredSpeed) { // bufferSizeInBytes
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
- mAudioTrackCls = reinterpret_cast<jclass>(env->NewGlobalRef(jAudioTrackCls));
- env->DeleteLocalRef(jAudioTrackCls);
-
- maxRequiredSpeed = std::min(std::max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
-
- int bufferSizeInBytes = 0;
- if (sampleRate == 0 || frameCount > 0) {
- // Manually calculate buffer size.
- bufferSizeInBytes = audio_channel_count_from_out_mask(channelMask)
- * audio_bytes_per_sample(format) * (frameCount > 0 ? frameCount : 1);
- } else if (sampleRate > 0) {
- // Call Java AudioTrack::getMinBufferSize().
- jmethodID jGetMinBufferSize =
- env->GetStaticMethodID(mAudioTrackCls, "getMinBufferSize", "(III)I");
- bufferSizeInBytes = env->CallStaticIntMethod(mAudioTrackCls, jGetMinBufferSize,
- sampleRate, outChannelMaskFromNative(channelMask), audioFormatFromNative(format));
- }
- bufferSizeInBytes = (int) (bufferSizeInBytes * maxRequiredSpeed);
-
- // Create a Java AudioTrack object through its Builder.
- jclass jBuilderCls = env->FindClass("android/media/AudioTrack$Builder");
- jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
- jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
-
- {
- sp<JObjectHolder> audioAttributesObj;
- if (attributes != NULL) {
- audioAttributesObj = new JObjectHolder(attributes);
- } else {
- audioAttributesObj = new JObjectHolder(
- JAudioAttributes::createAudioAttributesObj(env, NULL));
- }
- jmethodID jSetAudioAttributes = env->GetMethodID(jBuilderCls, "setAudioAttributes",
- "(Landroid/media/AudioAttributes;)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj,
- jSetAudioAttributes, audioAttributesObj->getJObject());
- }
-
- jmethodID jSetAudioFormat = env->GetMethodID(jBuilderCls, "setAudioFormat",
- "(Landroid/media/AudioFormat;)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioFormat,
- JAudioFormat::createAudioFormatObj(env, sampleRate, format, channelMask));
-
- jmethodID jSetBufferSizeInBytes = env->GetMethodID(jBuilderCls, "setBufferSizeInBytes",
- "(I)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetBufferSizeInBytes, bufferSizeInBytes);
-
- // We only use streaming mode of Java AudioTrack.
- jfieldID jModeStream = env->GetStaticFieldID(mAudioTrackCls, "MODE_STREAM", "I");
- jint transferMode = env->GetStaticIntField(mAudioTrackCls, jModeStream);
- jmethodID jSetTransferMode = env->GetMethodID(jBuilderCls, "setTransferMode",
- "(I)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetTransferMode,
- transferMode /* Java AudioTrack::MODE_STREAM */);
-
- if (sessionId != 0) {
- jmethodID jSetSessionId = env->GetMethodID(jBuilderCls, "setSessionId",
- "(I)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetSessionId, sessionId);
- }
-
- mFlags = AUDIO_OUTPUT_FLAG_NONE;
- if (cbf != NULL) {
- jmethodID jSetOffloadedPlayback = env->GetMethodID(jBuilderCls, "setOffloadedPlayback",
- "(Z)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetOffloadedPlayback, true);
- mFlags = AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
- }
-
- jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioTrack;");
- jobject jAudioTrackObj = env->CallObjectMethod(jBuilderObj, jBuild);
- mAudioTrackObj = reinterpret_cast<jobject>(env->NewGlobalRef(jAudioTrackObj));
- env->DeleteLocalRef(jBuilderObj);
-
- if (cbf != NULL) {
- // Set offload mode callback
- jobject jStreamEventCallbackObj = createStreamEventCallback(cbf, user);
- jobject jExecutorObj = createCallbackExecutor();
- jmethodID jSetStreamEventCallback = env->GetMethodID(
- jAudioTrackCls,
- "setStreamEventCallback",
- "(Ljava/util/concurrent/Executor;Landroid/media/AudioTrack$StreamEventCallback;)V");
- env->CallVoidMethod(
- mAudioTrackObj, jSetStreamEventCallback, jExecutorObj, jStreamEventCallbackObj);
- }
-}
-
-JAudioTrack::~JAudioTrack() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- env->DeleteGlobalRef(mAudioTrackCls);
- env->DeleteGlobalRef(mAudioTrackObj);
-}
-
-size_t JAudioTrack::frameCount() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetBufferSizeInFrames = env->GetMethodID(
- mAudioTrackCls, "getBufferSizeInFrames", "()I");
- return env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
-}
-
-size_t JAudioTrack::channelCount() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
- return env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
-}
-
-uint32_t JAudioTrack::latency() {
- // TODO: Currently hard-coded as returning zero.
- return 0;
-}
-
-status_t JAudioTrack::getPosition(uint32_t *position) {
- if (position == NULL) {
- return BAD_VALUE;
- }
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetPlaybackHeadPosition = env->GetMethodID(
- mAudioTrackCls, "getPlaybackHeadPosition", "()I");
- *position = env->CallIntMethod(mAudioTrackObj, jGetPlaybackHeadPosition);
-
- return NO_ERROR;
-}
-
-status_t JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
- jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
-
- jfieldID jFramePosition = env->GetFieldID(jAudioTimeStampCls, "framePosition", "J");
- jfieldID jNanoTime = env->GetFieldID(jAudioTimeStampCls, "nanoTime", "J");
-
- jmethodID jGetTimestamp = env->GetMethodID(mAudioTrackCls,
- "getTimestamp", "(Landroid/media/AudioTimestamp;)Z");
- bool success = env->CallBooleanMethod(mAudioTrackObj, jGetTimestamp, jAudioTimeStampObj);
-
- if (!success) {
- return NO_INIT;
- }
-
- long long framePosition = env->GetLongField(jAudioTimeStampObj, jFramePosition);
- long long nanoTime = env->GetLongField(jAudioTimeStampObj, jNanoTime);
-
- struct timespec ts;
- const long long secondToNano = 1000000000LL; // 1E9
- ts.tv_sec = nanoTime / secondToNano;
- ts.tv_nsec = nanoTime % secondToNano;
- timestamp.mTime = ts;
- timestamp.mPosition = (uint32_t) framePosition;
-
- return NO_ERROR;
-}
-
-status_t JAudioTrack::getTimestamp(ExtendedTimestamp *timestamp __unused) {
- // TODO: Implement this after appropriate Java AudioTrack method is available.
- return NO_ERROR;
-}
-
-status_t JAudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) {
- // TODO: existing native AudioTrack returns INVALID_OPERATION on offload/direct/fast tracks.
- // Should we do the same thing?
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
- jmethodID jPlaybackParamsCtor = env->GetMethodID(jPlaybackParamsCls, "<init>", "()V");
- jobject jPlaybackParamsObj = env->NewObject(jPlaybackParamsCls, jPlaybackParamsCtor);
-
- jmethodID jSetAudioFallbackMode = env->GetMethodID(
- jPlaybackParamsCls, "setAudioFallbackMode", "(I)Landroid/media/PlaybackParams;");
- jPlaybackParamsObj = env->CallObjectMethod(
- jPlaybackParamsObj, jSetAudioFallbackMode, playbackRate.mFallbackMode);
-
- jmethodID jSetAudioStretchMode = env->GetMethodID(
- jPlaybackParamsCls, "setAudioStretchMode", "(I)Landroid/media/PlaybackParams;");
- jPlaybackParamsObj = env->CallObjectMethod(
- jPlaybackParamsObj, jSetAudioStretchMode, playbackRate.mStretchMode);
-
- jmethodID jSetPitch = env->GetMethodID(
- jPlaybackParamsCls, "setPitch", "(F)Landroid/media/PlaybackParams;");
- jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetPitch, playbackRate.mPitch);
-
- jmethodID jSetSpeed = env->GetMethodID(
- jPlaybackParamsCls, "setSpeed", "(F)Landroid/media/PlaybackParams;");
- jPlaybackParamsObj = env->CallObjectMethod(jPlaybackParamsObj, jSetSpeed, playbackRate.mSpeed);
-
-
- // Set this Java PlaybackParams object into Java AudioTrack.
- jmethodID jSetPlaybackParams = env->GetMethodID(
- mAudioTrackCls, "setPlaybackParams", "(Landroid/media/PlaybackParams;)V");
- env->CallVoidMethod(mAudioTrackObj, jSetPlaybackParams, jPlaybackParamsObj);
- // TODO: Should we catch the Java IllegalArgumentException?
-
- return NO_ERROR;
-}
-
-const AudioPlaybackRate JAudioTrack::getPlaybackRate() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jmethodID jGetPlaybackParams = env->GetMethodID(
- mAudioTrackCls, "getPlaybackParams", "()Landroid/media/PlaybackParams;");
- jobject jPlaybackParamsObj = env->CallObjectMethod(mAudioTrackObj, jGetPlaybackParams);
-
- AudioPlaybackRate playbackRate;
- jclass jPlaybackParamsCls = env->FindClass("android/media/PlaybackParams");
-
- jmethodID jGetAudioFallbackMode = env->GetMethodID(
- jPlaybackParamsCls, "getAudioFallbackMode", "()I");
- // TODO: Should we enable passing AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT?
- // The enum is internal only, so it is not defined in PlaybackParmas.java.
- // TODO: Is this right way to convert an int to an enum?
- playbackRate.mFallbackMode = static_cast<AudioTimestretchFallbackMode>(
- env->CallIntMethod(jPlaybackParamsObj, jGetAudioFallbackMode));
-
- jmethodID jGetAudioStretchMode = env->GetMethodID(
- jPlaybackParamsCls, "getAudioStretchMode", "()I");
- playbackRate.mStretchMode = static_cast<AudioTimestretchStretchMode>(
- env->CallIntMethod(jPlaybackParamsObj, jGetAudioStretchMode));
-
- jmethodID jGetPitch = env->GetMethodID(jPlaybackParamsCls, "getPitch", "()F");
- playbackRate.mPitch = env->CallFloatMethod(jPlaybackParamsObj, jGetPitch);
-
- jmethodID jGetSpeed = env->GetMethodID(jPlaybackParamsCls, "getSpeed", "()F");
- playbackRate.mSpeed = env->CallFloatMethod(jPlaybackParamsObj, jGetSpeed);
-
- return playbackRate;
-}
-
-media::VolumeShaper::Status JAudioTrack::applyVolumeShaper(
- const sp<media::VolumeShaper::Configuration>& configuration,
- const sp<media::VolumeShaper::Operation>& operation) {
-
- jobject jConfigurationObj = createVolumeShaperConfigurationObj(configuration);
- jobject jOperationObj = createVolumeShaperOperationObj(operation);
-
- if (jConfigurationObj == NULL || jOperationObj == NULL) {
- return media::VolumeShaper::Status(BAD_VALUE);
- }
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jmethodID jCreateVolumeShaper = env->GetMethodID(mAudioTrackCls, "createVolumeShaper",
- "(Landroid/media/VolumeShaper$Configuration;)Landroid/media/VolumeShaper;");
- jobject jVolumeShaperObj = env->CallObjectMethod(
- mAudioTrackObj, jCreateVolumeShaper, jConfigurationObj);
-
- jclass jVolumeShaperCls = env->FindClass("android/media/VolumeShaper");
- jmethodID jApply = env->GetMethodID(jVolumeShaperCls, "apply",
- "(Landroid/media/VolumeShaper$Operation;)V");
- env->CallVoidMethod(jVolumeShaperObj, jApply, jOperationObj);
-
- return media::VolumeShaper::Status(NO_ERROR);
-}
-
-status_t JAudioTrack::setAuxEffectSendLevel(float level) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jSetAuxEffectSendLevel = env->GetMethodID(
- mAudioTrackCls, "setAuxEffectSendLevel", "(F)I");
- int result = env->CallIntMethod(mAudioTrackObj, jSetAuxEffectSendLevel, level);
- return javaToNativeStatus(result);
-}
-
-status_t JAudioTrack::attachAuxEffect(int effectId) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jAttachAuxEffect = env->GetMethodID(mAudioTrackCls, "attachAuxEffect", "(I)I");
- int result = env->CallIntMethod(mAudioTrackObj, jAttachAuxEffect, effectId);
- return javaToNativeStatus(result);
-}
-
-status_t JAudioTrack::setVolume(float left, float right) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- // TODO: Java setStereoVolume is deprecated. Do we really need this method?
- jmethodID jSetStereoVolume = env->GetMethodID(mAudioTrackCls, "setStereoVolume", "(FF)I");
- int result = env->CallIntMethod(mAudioTrackObj, jSetStereoVolume, left, right);
- return javaToNativeStatus(result);
-}
-
-status_t JAudioTrack::setVolume(float volume) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jSetVolume = env->GetMethodID(mAudioTrackCls, "setVolume", "(F)I");
- int result = env->CallIntMethod(mAudioTrackObj, jSetVolume, volume);
- return javaToNativeStatus(result);
-}
-
-status_t JAudioTrack::start() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jPlay = env->GetMethodID(mAudioTrackCls, "play", "()V");
- // TODO: Should we catch the Java IllegalStateException from play()?
- env->CallVoidMethod(mAudioTrackObj, jPlay);
- return NO_ERROR;
-}
-
-ssize_t JAudioTrack::write(const void* buffer, size_t size, bool blocking) {
- if (buffer == NULL) {
- return BAD_VALUE;
- }
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jbyteArray jAudioData = env->NewByteArray(size);
- env->SetByteArrayRegion(jAudioData, 0, size, (jbyte *) buffer);
-
- jclass jByteBufferCls = env->FindClass("java/nio/ByteBuffer");
- jmethodID jWrap = env->GetStaticMethodID(jByteBufferCls, "wrap", "([B)Ljava/nio/ByteBuffer;");
- jobject jByteBufferObj = env->CallStaticObjectMethod(jByteBufferCls, jWrap, jAudioData);
-
- int writeMode = 0;
- if (blocking) {
- jfieldID jWriteBlocking = env->GetStaticFieldID(mAudioTrackCls, "WRITE_BLOCKING", "I");
- writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteBlocking);
- } else {
- jfieldID jWriteNonBlocking = env->GetStaticFieldID(
- mAudioTrackCls, "WRITE_NON_BLOCKING", "I");
- writeMode = env->GetStaticIntField(mAudioTrackCls, jWriteNonBlocking);
- }
-
- jmethodID jWrite = env->GetMethodID(mAudioTrackCls, "write", "(Ljava/nio/ByteBuffer;II)I");
- int result = env->CallIntMethod(mAudioTrackObj, jWrite, jByteBufferObj, size, writeMode);
-
- if (result >= 0) {
- return result;
- } else {
- return javaToNativeStatus(result);
- }
-}
-
-void JAudioTrack::stop() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jStop = env->GetMethodID(mAudioTrackCls, "stop", "()V");
- env->CallVoidMethod(mAudioTrackObj, jStop);
- // TODO: Should we catch IllegalStateException?
-}
-
-// TODO: Is the right implementation?
-bool JAudioTrack::stopped() const {
- return !isPlaying();
-}
-
-void JAudioTrack::flush() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jFlush = env->GetMethodID(mAudioTrackCls, "flush", "()V");
- env->CallVoidMethod(mAudioTrackObj, jFlush);
-}
-
-void JAudioTrack::pause() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jPause = env->GetMethodID(mAudioTrackCls, "pause", "()V");
- env->CallVoidMethod(mAudioTrackObj, jPause);
- // TODO: Should we catch IllegalStateException?
-}
-
-bool JAudioTrack::isPlaying() const {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetPlayState = env->GetMethodID(mAudioTrackCls, "getPlayState", "()I");
- int currentPlayState = env->CallIntMethod(mAudioTrackObj, jGetPlayState);
-
- // TODO: In Java AudioTrack, there is no STOPPING state.
- // This means while stopping, isPlaying() will return different value in two class.
- // - in existing native AudioTrack: true
- // - in JAudioTrack: false
- // If not okay, also modify the implementation of stopped().
- jfieldID jPlayStatePlaying = env->GetStaticFieldID(mAudioTrackCls, "PLAYSTATE_PLAYING", "I");
- int statePlaying = env->GetStaticIntField(mAudioTrackCls, jPlayStatePlaying);
- return currentPlayState == statePlaying;
-}
-
-uint32_t JAudioTrack::getSampleRate() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetSampleRate = env->GetMethodID(mAudioTrackCls, "getSampleRate", "()I");
- return env->CallIntMethod(mAudioTrackObj, jGetSampleRate);
-}
-
-status_t JAudioTrack::getBufferDurationInUs(int64_t *duration) {
- if (duration == nullptr) {
- return BAD_VALUE;
- }
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetBufferSizeInFrames = env->GetMethodID(
- mAudioTrackCls, "getBufferSizeInFrames", "()I");
- int bufferSizeInFrames = env->CallIntMethod(mAudioTrackObj, jGetBufferSizeInFrames);
-
- const double secondToMicro = 1000000LL; // 1E6
- int sampleRate = JAudioTrack::getSampleRate();
- float speed = JAudioTrack::getPlaybackRate().mSpeed;
-
- *duration = (int64_t) (bufferSizeInFrames * secondToMicro / (sampleRate * speed));
- return NO_ERROR;
-}
-
-audio_format_t JAudioTrack::format() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
- int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
- return audioFormatToNative(javaFormat);
-}
-
-size_t JAudioTrack::frameSize() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetFormat = env->GetMethodID(mAudioTrackCls,
- "getFormat", "()Landroid/media/AudioFormat;");
- jobject jAudioFormatObj = env->CallObjectMethod(mAudioTrackObj, jGetFormat);
-
- jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
- jmethodID jGetFrameSizeInBytes = env->GetMethodID(
- jAudioFormatCls, "getFrameSizeInBytes", "()I");
- jint javaFrameSizeInBytes = env->CallIntMethod(jAudioFormatObj, jGetFrameSizeInBytes);
-
- return (size_t)javaFrameSizeInBytes;
-}
-
-status_t JAudioTrack::dump(int fd, const Vector<String16>& args __unused) const
-{
- String8 result;
-
- result.append(" JAudioTrack::dump\n");
-
- // TODO: Remove logs that includes unavailable information from below.
-// result.appendFormat(" status(%d), state(%d), session Id(%d), flags(%#x)\n",
-// mStatus, mState, mSessionId, mFlags);
-// result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
-// format(), mChannelMask, channelCount());
-// result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
-// getSampleRate(), mOriginalSampleRate, mPlaybackRate.mSpeed);
-// result.appendFormat(" frame count(%zu), req. frame count(%zu)\n",
-// frameCount(), mReqFrameCount);
-// result.appendFormat(" notif. frame count(%u), req. notif. frame count(%u),"
-// " req. notif. per buff(%u)\n",
-// mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
-// result.appendFormat(" latency (%d), selected device Id(%d), routed device Id(%d)\n",
-// latency(), mSelectedDeviceId, getRoutedDeviceId());
-// result.appendFormat(" output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
-// mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
- ::write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-jobject JAudioTrack::getRoutedDevice() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
- "()Landroid/media/AudioDeviceInfo;");
- return env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
-}
-
-int32_t JAudioTrack::getAudioSessionId() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetAudioSessionId = env->GetMethodID(mAudioTrackCls, "getAudioSessionId", "()I");
- jint sessionId = env->CallIntMethod(mAudioTrackObj, jGetAudioSessionId);
- return sessionId;
-}
-
-status_t JAudioTrack::setPreferredDevice(jobject device) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jSetPreferredDeviceId = env->GetMethodID(mAudioTrackCls, "setPreferredDevice",
- "(Landroid/media/AudioDeviceInfo;)Z");
- jboolean result = env->CallBooleanMethod(mAudioTrackObj, jSetPreferredDeviceId, device);
- return result == true ? NO_ERROR : BAD_VALUE;
-}
-
-audio_stream_type_t JAudioTrack::getAudioStreamType() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jGetAudioAttributes = env->GetMethodID(mAudioTrackCls, "getAudioAttributes",
- "()Landroid/media/AudioAttributes;");
- jobject jAudioAttributes = env->CallObjectMethod(mAudioTrackObj, jGetAudioAttributes);
- jclass jAudioAttributesCls = env->FindClass("android/media/AudioAttributes");
- jmethodID jGetVolumeControlStream = env->GetMethodID(jAudioAttributesCls,
- "getVolumeControlStream", "()I");
- int javaAudioStreamType = env->CallIntMethod(jAudioAttributes, jGetVolumeControlStream);
- return (audio_stream_type_t)javaAudioStreamType;
-}
-
-status_t JAudioTrack::pendingDuration(int32_t *msec) {
- if (msec == nullptr) {
- return BAD_VALUE;
- }
-
- bool isPurePcmData = audio_is_linear_pcm(format()) && (getFlags() & AUDIO_FLAG_HW_AV_SYNC) == 0;
- if (!isPurePcmData) {
- return INVALID_OPERATION;
- }
-
- // TODO: Need to know the difference btw. client and server time.
- // If getTimestamp(ExtendedTimestamp) is ready, and un-comment below and modify appropriately.
- // (copied from AudioTrack.cpp)
-
-// ExtendedTimestamp ets;
-// ExtendedTimestamp::LOCATION location = ExtendedTimestamp::LOCATION_SERVER;
-// if (getTimestamp_l(&ets) == OK && ets.mTimeNs[location] > 0) {
-// int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
-// - ets.mPosition[location];
-// if (diff < 0) {
-// *msec = 0;
-// } else {
-// // ms is the playback time by frames
-// int64_t ms = (int64_t)((double)diff * 1000 /
-// ((double)mSampleRate * mPlaybackRate.mSpeed));
-// // clockdiff is the timestamp age (negative)
-// int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
-// ets.mTimeNs[location]
-// + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
-// - systemTime(SYSTEM_TIME_MONOTONIC);
-//
-// //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff);
-// static const int NANOS_PER_MILLIS = 1000000;
-// *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
-// }
-// return NO_ERROR;
-// }
-
- return NO_ERROR;
-}
-
-status_t JAudioTrack::addAudioDeviceCallback(jobject listener, jobject handler) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jAddOnRoutingChangedListener = env->GetMethodID(mAudioTrackCls,
- "addOnRoutingChangedListener",
- "(Landroid/media/AudioRouting$OnRoutingChangedListener;Landroid/os/Handler;)V");
- env->CallVoidMethod(mAudioTrackObj, jAddOnRoutingChangedListener, listener, handler);
- return NO_ERROR;
-}
-
-status_t JAudioTrack::removeAudioDeviceCallback(jobject listener) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jmethodID jRemoveOnRoutingChangedListener = env->GetMethodID(mAudioTrackCls,
- "removeOnRoutingChangedListener",
- "(Landroid/media/AudioRouting$OnRoutingChangedListener;)V");
- env->CallVoidMethod(mAudioTrackObj, jRemoveOnRoutingChangedListener, listener);
- return NO_ERROR;
-}
-
-void JAudioTrack::registerRoutingDelegates(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& routingDelegates) {
- for (auto it = routingDelegates.begin(); it != routingDelegates.end(); it++) {
- addAudioDeviceCallback(it->second->getJObject(), getHandler(it->second->getJObject()));
- }
-}
-
-/////////////////////////////////////////////////////////////
-/// Static methods begin ///
-/////////////////////////////////////////////////////////////
-jobject JAudioTrack::getListener(const jobject routingDelegateObj) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jRoutingDelegateCls = env->FindClass("android/media/RoutingDelegate");
- jmethodID jGetListener = env->GetMethodID(jRoutingDelegateCls,
- "getListener", "()Landroid/media/AudioRouting$OnRoutingChangedListener;");
- return env->CallObjectMethod(routingDelegateObj, jGetListener);
-}
-
-jobject JAudioTrack::getHandler(const jobject routingDelegateObj) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jRoutingDelegateCls = env->FindClass("android/media/RoutingDelegate");
- jmethodID jGetHandler = env->GetMethodID(jRoutingDelegateCls,
- "getHandler", "()Landroid/os/Handler;");
- return env->CallObjectMethod(routingDelegateObj, jGetHandler);
-}
-
-jobject JAudioTrack::findByKey(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& mp, const jobject key) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- for (auto it = mp.begin(); it != mp.end(); it++) {
- if (env->IsSameObject(it->first->getJObject(), key)) {
- return it->second->getJObject();
- }
- }
- return nullptr;
-}
-
-void JAudioTrack::eraseByKey(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& mp, const jobject key) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- for (auto it = mp.begin(); it != mp.end(); it++) {
- if (env->IsSameObject(it->first->getJObject(), key)) {
- mp.erase(it);
- return;
- }
- }
-}
-
-/////////////////////////////////////////////////////////////
-/// Private method begins ///
-/////////////////////////////////////////////////////////////
-
-jobject JAudioTrack::createVolumeShaperConfigurationObj(
- const sp<media::VolumeShaper::Configuration>& config) {
-
- // TODO: Java VolumeShaper's setId() / setOptionFlags() are hidden.
- if (config == NULL || config->getType() == media::VolumeShaper::Configuration::TYPE_ID) {
- return NULL;
- }
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- // Referenced "android_media_VolumeShaper.h".
- jfloatArray xarray = nullptr;
- jfloatArray yarray = nullptr;
- if (config->getType() == media::VolumeShaper::Configuration::TYPE_SCALE) {
- // convert curve arrays
- xarray = env->NewFloatArray(config->size());
- yarray = env->NewFloatArray(config->size());
- float * const x = env->GetFloatArrayElements(xarray, nullptr /* isCopy */);
- float * const y = env->GetFloatArrayElements(yarray, nullptr /* isCopy */);
- float *xptr = x, *yptr = y;
- for (const auto &pt : *config.get()) {
- *xptr++ = pt.first;
- *yptr++ = pt.second;
- }
- env->ReleaseFloatArrayElements(xarray, x, 0 /* mode */);
- env->ReleaseFloatArrayElements(yarray, y, 0 /* mode */);
- }
-
- jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Configuration$Builder");
- jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
- jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
-
- jmethodID jSetDuration = env->GetMethodID(jBuilderCls, "setDuration",
- "(L)Landroid/media/VolumeShaper$Configuration$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetDuration, (jlong) config->getDurationMs());
-
- jmethodID jSetInterpolatorType = env->GetMethodID(jBuilderCls, "setInterpolatorType",
- "(I)Landroid/media/VolumeShaper$Configuration$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetInterpolatorType,
- config->getInterpolatorType());
-
- jmethodID jSetCurve = env->GetMethodID(jBuilderCls, "setCurve",
- "([F[F)Landroid/media/VolumeShaper$Configuration$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetCurve, xarray, yarray);
-
- jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
- "()Landroid/media/VolumeShaper$Configuration;");
- return env->CallObjectMethod(jBuilderObj, jBuild);
-}
-
-jobject JAudioTrack::createVolumeShaperOperationObj(
- const sp<media::VolumeShaper::Operation>& operation) {
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
-
- jclass jBuilderCls = env->FindClass("android/media/VolumeShaper$Operation$Builder");
- jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
- jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
-
- // Set XOffset
- jmethodID jSetXOffset = env->GetMethodID(jBuilderCls, "setXOffset",
- "(F)Landroid/media/VolumeShaper$Operation$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jSetXOffset, operation->getXOffset());
-
- int32_t flags = operation->getFlags();
-
- if (operation->getReplaceId() >= 0) {
- jmethodID jReplace = env->GetMethodID(jBuilderCls, "replace",
- "(IB)Landroid/media/VolumeShaper$Operation$Builder;");
- bool join = (flags | media::VolumeShaper::Operation::FLAG_JOIN) != 0;
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jReplace, operation->getReplaceId(), join);
- }
-
- if (flags | media::VolumeShaper::Operation::FLAG_REVERSE) {
- jmethodID jReverse = env->GetMethodID(jBuilderCls, "reverse",
- "()Landroid/media/VolumeShaper$Operation$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jReverse);
- }
-
- // TODO: VolumeShaper Javadoc says "Do not call terminate() directly". Can we call this?
- if (flags | media::VolumeShaper::Operation::FLAG_TERMINATE) {
- jmethodID jTerminate = env->GetMethodID(jBuilderCls, "terminate",
- "()Landroid/media/VolumeShaper$Operation$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jTerminate);
- }
-
- if (flags | media::VolumeShaper::Operation::FLAG_DELAY) {
- jmethodID jDefer = env->GetMethodID(jBuilderCls, "defer",
- "()Landroid/media/VolumeShaper$Operation$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jDefer);
- }
-
- if (flags | media::VolumeShaper::Operation::FLAG_CREATE_IF_NECESSARY) {
- jmethodID jCreateIfNeeded = env->GetMethodID(jBuilderCls, "createIfNeeded",
- "()Landroid/media/VolumeShaper$Operation$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderCls, jCreateIfNeeded);
- }
-
- // TODO: Handle error case (can it be NULL?)
- jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
- "()Landroid/media/VolumeShaper$Operation;");
- return env->CallObjectMethod(jBuilderObj, jBuild);
-}
-
-jobject JAudioTrack::createStreamEventCallback(callback_t cbf, void* user) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jCallbackCls = env->FindClass("android/media/MediaPlayer2$StreamEventCallback");
- jmethodID jCallbackCtor = env->GetMethodID(jCallbackCls, "<init>", "(JJJ)V");
- jobject jCallbackObj = env->NewObject(jCallbackCls, jCallbackCtor, this, cbf, user);
- return jCallbackObj;
-}
-
-jobject JAudioTrack::createCallbackExecutor() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jExecutorsCls = env->FindClass("java/util/concurrent/Executors");
- jmethodID jNewSingleThreadExecutor = env->GetStaticMethodID(jExecutorsCls,
- "newSingleThreadExecutor", "()Ljava/util/concurrent/ExecutorService;");
- jobject jSingleThreadExecutorObj =
- env->CallStaticObjectMethod(jExecutorsCls, jNewSingleThreadExecutor);
- return jSingleThreadExecutorObj;
-}
-
-status_t JAudioTrack::javaToNativeStatus(int javaStatus) {
- switch (javaStatus) {
- case AUDIO_JAVA_SUCCESS:
- return NO_ERROR;
- case AUDIO_JAVA_BAD_VALUE:
- return BAD_VALUE;
- case AUDIO_JAVA_INVALID_OPERATION:
- return INVALID_OPERATION;
- case AUDIO_JAVA_PERMISSION_DENIED:
- return PERMISSION_DENIED;
- case AUDIO_JAVA_NO_INIT:
- return NO_INIT;
- case AUDIO_JAVA_WOULD_BLOCK:
- return WOULD_BLOCK;
- case AUDIO_JAVA_DEAD_OBJECT:
- return DEAD_OBJECT;
- default:
- return UNKNOWN_ERROR;
- }
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/JMedia2HTTPConnection.cpp b/media/libmediaplayer2/JMedia2HTTPConnection.cpp
deleted file mode 100644
index e1baa10..0000000
--- a/media/libmediaplayer2/JMedia2HTTPConnection.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "JMedia2HTTPConnection"
-#include <utils/Log.h>
-
-#include <mediaplayer2/JavaVMHelper.h>
-#include <mediaplayer2/JMedia2HTTPConnection.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <nativehelper/scoped_local_ref.h>
-
-#include "log/log.h"
-#include "jni.h"
-
-namespace android {
-
-static const size_t kBufferSize = 32768;
-
-JMedia2HTTPConnection::JMedia2HTTPConnection(JNIEnv *env, jobject thiz) {
- mMedia2HTTPConnectionObj = env->NewGlobalRef(thiz);
- CHECK(mMedia2HTTPConnectionObj != NULL);
-
- ScopedLocalRef<jclass> media2HTTPConnectionClass(
- env, env->GetObjectClass(mMedia2HTTPConnectionObj));
- CHECK(media2HTTPConnectionClass.get() != NULL);
-
- mConnectMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "connect",
- "(Ljava/lang/String;Ljava/lang/String;)Z");
- CHECK(mConnectMethod != NULL);
-
- mDisconnectMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "disconnect",
- "()V");
- CHECK(mDisconnectMethod != NULL);
-
- mReadAtMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "readAt",
- "(J[BI)I");
- CHECK(mReadAtMethod != NULL);
-
- mGetSizeMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "getSize",
- "()J");
- CHECK(mGetSizeMethod != NULL);
-
- mGetMIMETypeMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "getMIMEType",
- "()Ljava/lang/String;");
- CHECK(mGetMIMETypeMethod != NULL);
-
- mGetUriMethod = env->GetMethodID(
- media2HTTPConnectionClass.get(),
- "getUri",
- "()Ljava/lang/String;");
- CHECK(mGetUriMethod != NULL);
-
- ScopedLocalRef<jbyteArray> tmp(
- env, env->NewByteArray(kBufferSize));
- mByteArrayObj = (jbyteArray)env->NewGlobalRef(tmp.get());
- CHECK(mByteArrayObj != NULL);
-}
-
-JMedia2HTTPConnection::~JMedia2HTTPConnection() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- env->DeleteGlobalRef(mMedia2HTTPConnectionObj);
- env->DeleteGlobalRef(mByteArrayObj);
-}
-
-bool JMedia2HTTPConnection::connect(
- const char *uri, const KeyedVector<String8, String8> *headers) {
- String8 tmp("");
- if (headers != NULL) {
- for (size_t i = 0; i < headers->size(); ++i) {
- tmp.append(headers->keyAt(i));
- tmp.append(String8(": "));
- tmp.append(headers->valueAt(i));
- tmp.append(String8("\r\n"));
- }
- }
-
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- jstring juri = env->NewStringUTF(uri);
- jstring jheaders = env->NewStringUTF(tmp.string());
-
- jboolean ret =
- env->CallBooleanMethod(mMedia2HTTPConnectionObj, mConnectMethod, juri, jheaders);
-
- env->DeleteLocalRef(juri);
- env->DeleteLocalRef(jheaders);
-
- return (bool)ret;
-}
-
-void JMedia2HTTPConnection::disconnect() {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- env->CallVoidMethod(mMedia2HTTPConnectionObj, mDisconnectMethod);
-}
-
-ssize_t JMedia2HTTPConnection::readAt(off64_t offset, void *data, size_t size) {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
-
- if (size > kBufferSize) {
- size = kBufferSize;
- }
-
- jint n = env->CallIntMethod(
- mMedia2HTTPConnectionObj, mReadAtMethod, (jlong)offset, mByteArrayObj, (jint)size);
-
- if (n > 0) {
- env->GetByteArrayRegion(
- mByteArrayObj,
- 0,
- n,
- (jbyte *)data);
- }
-
- return n;
-}
-
-off64_t JMedia2HTTPConnection::getSize() {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- return (off64_t)(env->CallLongMethod(mMedia2HTTPConnectionObj, mGetSizeMethod));
-}
-
-status_t JMedia2HTTPConnection::getMIMEType(String8 *mimeType) {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- jstring jmime = (jstring)env->CallObjectMethod(mMedia2HTTPConnectionObj, mGetMIMETypeMethod);
- jboolean flag = env->ExceptionCheck();
- if (flag) {
- env->ExceptionClear();
- return UNKNOWN_ERROR;
- }
-
- const char *str = env->GetStringUTFChars(jmime, 0);
- if (str != NULL) {
- *mimeType = String8(str);
- } else {
- *mimeType = "application/octet-stream";
- }
- env->ReleaseStringUTFChars(jmime, str);
- return OK;
-}
-
-status_t JMedia2HTTPConnection::getUri(String8 *uri) {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- jstring juri = (jstring)env->CallObjectMethod(mMedia2HTTPConnectionObj, mGetUriMethod);
- jboolean flag = env->ExceptionCheck();
- if (flag) {
- env->ExceptionClear();
- return UNKNOWN_ERROR;
- }
-
- const char *str = env->GetStringUTFChars(juri, 0);
- *uri = String8(str);
- env->ReleaseStringUTFChars(juri, str);
- return OK;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/JMedia2HTTPService.cpp b/media/libmediaplayer2/JMedia2HTTPService.cpp
deleted file mode 100644
index 20e3573..0000000
--- a/media/libmediaplayer2/JMedia2HTTPService.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "JMedia2HTTPService"
-#include <utils/Log.h>
-
-#include <jni.h>
-
-#include <mediaplayer2/JavaVMHelper.h>
-#include <mediaplayer2/JMedia2HTTPService.h>
-#include <mediaplayer2/JMedia2HTTPConnection.h>
-#include <media/stagefright/foundation/ADebug.h>
-
-#include <nativehelper/scoped_local_ref.h>
-
-namespace android {
-
-JMedia2HTTPService::JMedia2HTTPService(JNIEnv *env, jobject thiz) {
- mMedia2HTTPServiceObj = env->NewGlobalRef(thiz);
- CHECK(mMedia2HTTPServiceObj != NULL);
-
- ScopedLocalRef<jclass> media2HTTPServiceClass(env, env->GetObjectClass(mMedia2HTTPServiceObj));
- CHECK(media2HTTPServiceClass.get() != NULL);
-
- mMakeHTTPConnectionMethod = env->GetMethodID(
- media2HTTPServiceClass.get(),
- "makeHTTPConnection",
- "()Landroid/media/Media2HTTPConnection;");
- CHECK(mMakeHTTPConnectionMethod != NULL);
-}
-
-JMedia2HTTPService::~JMedia2HTTPService() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- env->DeleteGlobalRef(mMedia2HTTPServiceObj);
-}
-
-sp<MediaHTTPConnection> JMedia2HTTPService::makeHTTPConnection() {
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- jobject media2HTTPConnectionObj =
- env->CallObjectMethod(mMedia2HTTPServiceObj, mMakeHTTPConnectionMethod);
-
- return new JMedia2HTTPConnection(env, media2HTTPConnectionObj);
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/JavaVMHelper.cpp b/media/libmediaplayer2/JavaVMHelper.cpp
deleted file mode 100644
index 8d03ed0..0000000
--- a/media/libmediaplayer2/JavaVMHelper.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "JavaVMHelper"
-
-#include "mediaplayer2/JavaVMHelper.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <utils/threads.h>
-
-#include <stdlib.h>
-
-namespace android {
-
-// static
-std::atomic<JavaVM *> JavaVMHelper::sJavaVM(NULL);
-
-/*
- * Makes the current thread visible to the VM.
- *
- * The JNIEnv pointer returned is only valid for the current thread, and
- * thus must be tucked into thread-local storage.
- */
-static int javaAttachThread(const char* threadName, JNIEnv** pEnv) {
- JavaVMAttachArgs args;
- JavaVM* vm;
- jint result;
-
- vm = JavaVMHelper::getJavaVM();
- if (vm == NULL) {
- return JNI_ERR;
- }
-
- args.version = JNI_VERSION_1_4;
- args.name = (char*) threadName;
- args.group = NULL;
-
- result = vm->AttachCurrentThread(pEnv, (void*) &args);
- if (result != JNI_OK) {
- ALOGI("NOTE: attach of thread '%s' failed\n", threadName);
- }
-
- return result;
-}
-
-/*
- * Detach the current thread from the set visible to the VM.
- */
-static int javaDetachThread(void) {
- JavaVM* vm;
- jint result;
-
- vm = JavaVMHelper::getJavaVM();
- if (vm == NULL) {
- return JNI_ERR;
- }
-
- result = vm->DetachCurrentThread();
- if (result != JNI_OK) {
- ALOGE("ERROR: thread detach failed\n");
- }
- return result;
-}
-
-/*
- * When starting a native thread that will be visible from the VM, we
- * bounce through this to get the right attach/detach action.
- * Note that this function calls free(args)
- */
-static int javaThreadShell(void* args) {
- void* start = ((void**)args)[0];
- void* userData = ((void **)args)[1];
- char* name = (char*) ((void **)args)[2]; // we own this storage
- free(args);
- JNIEnv* env;
- int result;
-
- /* hook us into the VM */
- if (javaAttachThread(name, &env) != JNI_OK) {
- return -1;
- }
-
- /* start the thread running */
- result = (*(android_thread_func_t)start)(userData);
-
- /* unhook us */
- javaDetachThread();
- free(name);
-
- return result;
-}
-
-/*
- * This is invoked from androidCreateThreadEtc() via the callback
- * set with androidSetCreateThreadFunc().
- *
- * We need to create the new thread in such a way that it gets hooked
- * into the VM before it really starts executing.
- */
-static int javaCreateThreadEtc(
- android_thread_func_t entryFunction,
- void* userData,
- const char* threadName,
- int32_t threadPriority,
- size_t threadStackSize,
- android_thread_id_t* threadId) {
- void** args = (void**) malloc(3 * sizeof(void*)); // javaThreadShell must free
- int result;
-
- LOG_ALWAYS_FATAL_IF(threadName == nullptr, "threadName not provided to javaCreateThreadEtc");
-
- args[0] = (void*) entryFunction;
- args[1] = userData;
- args[2] = (void*) strdup(threadName); // javaThreadShell must free
-
- result = androidCreateRawThreadEtc(javaThreadShell, args,
- threadName, threadPriority, threadStackSize, threadId);
- return result;
-}
-
-// static
-JNIEnv *JavaVMHelper::getJNIEnv() {
- JNIEnv *env;
- JavaVM *vm = sJavaVM.load();
- CHECK(vm != NULL);
-
- if (vm->GetEnv((void **)&env, JNI_VERSION_1_4) != JNI_OK) {
- return NULL;
- }
-
- return env;
-}
-
-//static
-JavaVM *JavaVMHelper::getJavaVM() {
- return sJavaVM.load();
-}
-
-// static
-void JavaVMHelper::setJavaVM(JavaVM *vm) {
- sJavaVM.store(vm);
-
- // Ensure that Thread(/*canCallJava*/ true) in libutils is attached to the VM.
- // This is supposed to be done by runtime, but when libutils is used with linker
- // namespace, CreateThreadFunc should be initialized separately within the namespace.
- androidSetCreateThreadFunc((android_create_thread_fn) javaCreateThreadEtc);
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
deleted file mode 100644
index b4fa0c1..0000000
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ /dev/null
@@ -1,656 +0,0 @@
-/*
-**
-** Copyright 2018, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaPlayer2AudioOutput"
-#include <mediaplayer2/MediaPlayer2AudioOutput.h>
-
-#include <cutils/properties.h> // for property_get
-#include <utils/Log.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace {
-
-const float kMaxRequiredSpeed = 8.0f; // for PCM tracks allow up to 8x speedup.
-
-} // anonymous namespace
-
-namespace android {
-
-// TODO: Find real cause of Audio/Video delay in PV framework and remove this workaround
-/* static */ int MediaPlayer2AudioOutput::mMinBufferCount = 4;
-/* static */ bool MediaPlayer2AudioOutput::mIsOnEmulator = false;
-
-status_t MediaPlayer2AudioOutput::dump(int fd, const Vector<String16>& args) const {
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
- result.append(" MediaPlayer2AudioOutput\n");
- snprintf(buffer, 255, " volume(%f)\n", mVolume);
- result.append(buffer);
- snprintf(buffer, 255, " msec per frame(%f), latency (%d)\n",
- mMsecsPerFrame, (mJAudioTrack != nullptr) ? mJAudioTrack->latency() : -1);
- result.append(buffer);
- snprintf(buffer, 255, " aux effect id(%d), send level (%f)\n",
- mAuxEffectId, mSendLevel);
- result.append(buffer);
-
- ::write(fd, result.string(), result.size());
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->dump(fd, args);
- }
- return NO_ERROR;
-}
-
-MediaPlayer2AudioOutput::MediaPlayer2AudioOutput(int32_t sessionId, uid_t uid, int pid,
- const jobject attributes)
- : mCallback(nullptr),
- mCallbackCookie(nullptr),
- mCallbackData(nullptr),
- mVolume(1.0),
- mPlaybackRate(AUDIO_PLAYBACK_RATE_DEFAULT),
- mSampleRateHz(0),
- mMsecsPerFrame(0),
- mFrameSize(0),
- mSessionId(sessionId),
- mUid(uid),
- mPid(pid),
- mSendLevel(0.0),
- mAuxEffectId(0),
- mFlags(AUDIO_OUTPUT_FLAG_NONE) {
- ALOGV("MediaPlayer2AudioOutput(%d)", sessionId);
-
- if (attributes != nullptr) {
- mAttributes = new JObjectHolder(attributes);
- }
-
- setMinBufferCount();
- mRoutingDelegates.clear();
-}
-
-MediaPlayer2AudioOutput::~MediaPlayer2AudioOutput() {
- close();
- delete mCallbackData;
-}
-
-//static
-void MediaPlayer2AudioOutput::setMinBufferCount() {
- char value[PROPERTY_VALUE_MAX];
- if (property_get("ro.kernel.qemu", value, 0)) {
- mIsOnEmulator = true;
- mMinBufferCount = 12; // to prevent systematic buffer underrun for emulator
- }
-}
-
-// static
-bool MediaPlayer2AudioOutput::isOnEmulator() {
- setMinBufferCount(); // benign race wrt other threads
- return mIsOnEmulator;
-}
-
-// static
-int MediaPlayer2AudioOutput::getMinBufferCount() {
- setMinBufferCount(); // benign race wrt other threads
- return mMinBufferCount;
-}
-
-ssize_t MediaPlayer2AudioOutput::bufferSize() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mJAudioTrack->frameCount() * mFrameSize;
-}
-
-ssize_t MediaPlayer2AudioOutput::frameCount() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mJAudioTrack->frameCount();
-}
-
-ssize_t MediaPlayer2AudioOutput::channelCount() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mJAudioTrack->channelCount();
-}
-
-ssize_t MediaPlayer2AudioOutput::frameSize() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mFrameSize;
-}
-
-uint32_t MediaPlayer2AudioOutput::latency () const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return 0;
- }
- return mJAudioTrack->latency();
-}
-
-float MediaPlayer2AudioOutput::msecsPerFrame() const {
- Mutex::Autolock lock(mLock);
- return mMsecsPerFrame;
-}
-
-status_t MediaPlayer2AudioOutput::getPosition(uint32_t *position) const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mJAudioTrack->getPosition(position);
-}
-
-status_t MediaPlayer2AudioOutput::getTimestamp(AudioTimestamp &ts) const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- return mJAudioTrack->getTimestamp(ts);
-}
-
-// TODO: Remove unnecessary calls to getPlayedOutDurationUs()
-// as it acquires locks and may query the audio driver.
-//
-// Some calls could conceivably retrieve extrapolated data instead of
-// accessing getTimestamp() or getPosition() every time a data buffer with
-// a media time is received.
-//
-// Calculate duration of played samples if played at normal rate (i.e., 1.0).
-int64_t MediaPlayer2AudioOutput::getPlayedOutDurationUs(int64_t nowUs) const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr || mSampleRateHz == 0) {
- return 0;
- }
-
- uint32_t numFramesPlayed;
- int64_t numFramesPlayedAtUs;
- AudioTimestamp ts;
-
- status_t res = mJAudioTrack->getTimestamp(ts);
-
- if (res == OK) { // case 1: mixing audio tracks and offloaded tracks.
- numFramesPlayed = ts.mPosition;
- numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
- //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
- } else { // case 2: transitory state on start of a new track
- // case 3: transitory at new track or audio fast tracks.
- numFramesPlayed = 0;
- numFramesPlayedAtUs = nowUs;
- //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
- // numFramesPlayed, (long long)numFramesPlayedAtUs);
- }
-
- // CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test
- // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
- int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000000LL / mSampleRateHz)
- + nowUs - numFramesPlayedAtUs;
- if (durationUs < 0) {
- // Occurs when numFramesPlayed position is very small and the following:
- // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
- // numFramesPlayedAtUs is greater than nowUs by time more than numFramesPlayed.
- // (2) In case 3, using getPosition and adding mAudioSink->latency() to
- // numFramesPlayedAtUs, by a time amount greater than numFramesPlayed.
- //
- // Both of these are transitory conditions.
- ALOGV("getPlayedOutDurationUs: negative duration %lld set to zero", (long long)durationUs);
- durationUs = 0;
- }
- ALOGV("getPlayedOutDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
- (long long)durationUs, (long long)nowUs,
- numFramesPlayed, (long long)numFramesPlayedAtUs);
- return durationUs;
-}
-
-status_t MediaPlayer2AudioOutput::getFramesWritten(uint32_t *frameswritten) const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return NO_INIT;
- }
- ExtendedTimestamp ets;
- status_t status = mJAudioTrack->getTimestamp(&ets);
- if (status == OK || status == WOULD_BLOCK) {
- *frameswritten = (uint32_t)ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT];
- }
- return status;
-}
-
-void MediaPlayer2AudioOutput::setAudioAttributes(const jobject attributes) {
- Mutex::Autolock lock(mLock);
- mAttributes = (attributes == nullptr) ? nullptr : new JObjectHolder(attributes);
-}
-
-audio_stream_type_t MediaPlayer2AudioOutput::getAudioStreamType() const {
- ALOGV("getAudioStreamType");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == nullptr) {
- return AUDIO_STREAM_DEFAULT;
- }
- return mJAudioTrack->getAudioStreamType();
-}
-
-void MediaPlayer2AudioOutput::close_l() {
- mJAudioTrack.clear();
-}
-
-status_t MediaPlayer2AudioOutput::open(
- uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
- audio_format_t format,
- AudioCallback cb, void *cookie,
- audio_output_flags_t flags,
- const audio_offload_info_t *offloadInfo,
- uint32_t suggestedFrameCount) {
- ALOGV("open(%u, %d, 0x%x, 0x%x, %d 0x%x)", sampleRate, channelCount, channelMask,
- format, mSessionId, flags);
-
- // offloading is only supported in callback mode for now.
- // offloadInfo must be present if offload flag is set
- if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
- ((cb == nullptr) || (offloadInfo == nullptr))) {
- return BAD_VALUE;
- }
-
- // compute frame count for the AudioTrack internal buffer
- const size_t frameCount =
- ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) ? 0 : suggestedFrameCount;
-
- if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
- channelMask = audio_channel_out_mask_from_count(channelCount);
- if (0 == channelMask) {
- ALOGE("open() error, can\'t derive mask for %d audio channels", channelCount);
- return NO_INIT;
- }
- }
-
- Mutex::Autolock lock(mLock);
- mCallback = cb;
- mCallbackCookie = cookie;
-
- sp<JAudioTrack> jT;
- CallbackData *newcbd = nullptr;
-
- ALOGV("creating new JAudioTrack");
-
- if (mCallback != nullptr) {
- newcbd = new CallbackData(this);
- jT = new JAudioTrack(
- sampleRate,
- format,
- channelMask,
- CallbackWrapper,
- newcbd,
- frameCount,
- mSessionId,
- mAttributes != nullptr ? mAttributes->getJObject() : nullptr,
- 1.0f); // default value for maxRequiredSpeed
- } else {
- // TODO: Due to buffer memory concerns, we use a max target playback speed
- // based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
- // also clamping the target speed to 1.0 <= targetSpeed <= kMaxRequiredSpeed.
- const float targetSpeed =
- std::min(std::max(mPlaybackRate.mSpeed, 1.0f), kMaxRequiredSpeed);
- ALOGW_IF(targetSpeed != mPlaybackRate.mSpeed,
- "track target speed:%f clamped from playback speed:%f",
- targetSpeed, mPlaybackRate.mSpeed);
- jT = new JAudioTrack(
- sampleRate,
- format,
- channelMask,
- nullptr,
- nullptr,
- frameCount,
- mSessionId,
- mAttributes != nullptr ? mAttributes->getJObject() : nullptr,
- targetSpeed);
- }
-
- if (jT == 0) {
- ALOGE("Unable to create audio track");
- delete newcbd;
- // t goes out of scope, so reference count drops to zero
- return NO_INIT;
- }
-
- CHECK((jT != nullptr) && ((mCallback == nullptr) || (newcbd != nullptr)));
-
- mCallbackData = newcbd;
- ALOGV("setVolume");
- jT->setVolume(mVolume);
-
- mSampleRateHz = sampleRate;
- mFlags = flags;
- mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
- mFrameSize = jT->frameSize();
- mJAudioTrack = jT;
-
- return updateTrack_l();
-}
-
-status_t MediaPlayer2AudioOutput::updateTrack_l() {
- if (mJAudioTrack == nullptr) {
- return NO_ERROR;
- }
-
- status_t res = NO_ERROR;
- // Note some output devices may give us a direct track even though we don't specify it.
- // Example: Line application b/17459982.
- if ((mJAudioTrack->getFlags()
- & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) {
- res = mJAudioTrack->setPlaybackRate(mPlaybackRate);
- if (res == NO_ERROR) {
- mJAudioTrack->setAuxEffectSendLevel(mSendLevel);
- res = mJAudioTrack->attachAuxEffect(mAuxEffectId);
- }
- }
- if (mPreferredDevice != nullptr) {
- mJAudioTrack->setPreferredDevice(mPreferredDevice->getJObject());
- }
-
- mJAudioTrack->registerRoutingDelegates(mRoutingDelegates);
-
- ALOGV("updateTrack_l() DONE status %d", res);
- return res;
-}
-
-status_t MediaPlayer2AudioOutput::start() {
- ALOGV("start");
- Mutex::Autolock lock(mLock);
- if (mCallbackData != nullptr) {
- mCallbackData->endTrackSwitch();
- }
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->setVolume(mVolume);
- mJAudioTrack->setAuxEffectSendLevel(mSendLevel);
- status_t status = mJAudioTrack->start();
- return status;
- }
- return NO_INIT;
-}
-
-ssize_t MediaPlayer2AudioOutput::write(const void* buffer, size_t size, bool blocking) {
- Mutex::Autolock lock(mLock);
- LOG_ALWAYS_FATAL_IF(mCallback != nullptr, "Don't call write if supplying a callback.");
-
- //ALOGV("write(%p, %u)", buffer, size);
- if (mJAudioTrack != nullptr) {
- return mJAudioTrack->write(buffer, size, blocking);
- }
- return NO_INIT;
-}
-
-void MediaPlayer2AudioOutput::stop() {
- ALOGV("stop");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->stop();
- }
-}
-
-void MediaPlayer2AudioOutput::flush() {
- ALOGV("flush");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->flush();
- }
-}
-
-void MediaPlayer2AudioOutput::pause() {
- ALOGV("pause");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->pause();
- }
-}
-
-void MediaPlayer2AudioOutput::close() {
- ALOGV("close");
- sp<JAudioTrack> track;
- {
- Mutex::Autolock lock(mLock);
- track = mJAudioTrack;
- close_l(); // clears mJAudioTrack
- }
- // destruction of the track occurs outside of mutex.
-}
-
-void MediaPlayer2AudioOutput::setVolume(float volume) {
- ALOGV("setVolume(%f)", volume);
- Mutex::Autolock lock(mLock);
- mVolume = volume;
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->setVolume(volume);
- }
-}
-
-status_t MediaPlayer2AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate) {
- ALOGV("setPlaybackRate(%f %f %d %d)",
- rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == 0) {
- // remember rate so that we can set it when the track is opened
- mPlaybackRate = rate;
- return OK;
- }
- status_t res = mJAudioTrack->setPlaybackRate(rate);
- if (res != NO_ERROR) {
- return res;
- }
- // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
- CHECK_GT(rate.mSpeed, 0.f);
- mPlaybackRate = rate;
- if (mSampleRateHz != 0) {
- mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
- }
- return res;
-}
-
-status_t MediaPlayer2AudioOutput::getPlaybackRate(AudioPlaybackRate *rate) {
- ALOGV("getPlaybackRate");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == 0) {
- return NO_INIT;
- }
- *rate = mJAudioTrack->getPlaybackRate();
- return NO_ERROR;
-}
-
-status_t MediaPlayer2AudioOutput::setAuxEffectSendLevel(float level) {
- ALOGV("setAuxEffectSendLevel(%f)", level);
- Mutex::Autolock lock(mLock);
- mSendLevel = level;
- if (mJAudioTrack != nullptr) {
- return mJAudioTrack->setAuxEffectSendLevel(level);
- }
- return NO_ERROR;
-}
-
-status_t MediaPlayer2AudioOutput::attachAuxEffect(int effectId) {
- ALOGV("attachAuxEffect(%d)", effectId);
- Mutex::Autolock lock(mLock);
- mAuxEffectId = effectId;
- if (mJAudioTrack != nullptr) {
- return mJAudioTrack->attachAuxEffect(effectId);
- }
- return NO_ERROR;
-}
-
-status_t MediaPlayer2AudioOutput::setPreferredDevice(jobject device) {
- ALOGV("setPreferredDevice");
- Mutex::Autolock lock(mLock);
- status_t ret = NO_ERROR;
- if (mJAudioTrack != nullptr) {
- ret = mJAudioTrack->setPreferredDevice(device);
- }
- if (ret == NO_ERROR) {
- mPreferredDevice = new JObjectHolder(device);
- }
- return ret;
-}
-
-jobject MediaPlayer2AudioOutput::getRoutedDevice() {
- ALOGV("getRoutedDevice");
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack != nullptr) {
- return mJAudioTrack->getRoutedDevice();
- }
- return nullptr;
-}
-
-status_t MediaPlayer2AudioOutput::addAudioDeviceCallback(jobject jRoutingDelegate) {
- ALOGV("addAudioDeviceCallback");
- Mutex::Autolock lock(mLock);
- jobject listener = JAudioTrack::getListener(jRoutingDelegate);
- if (JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
- sp<JObjectHolder> listenerHolder = new JObjectHolder(listener);
- jobject handler = JAudioTrack::getHandler(jRoutingDelegate);
- sp<JObjectHolder> routingDelegateHolder = new JObjectHolder(jRoutingDelegate);
-
- mRoutingDelegates.push_back(std::pair<sp<JObjectHolder>, sp<JObjectHolder>>(
- listenerHolder, routingDelegateHolder));
-
- if (mJAudioTrack != nullptr) {
- return mJAudioTrack->addAudioDeviceCallback(
- routingDelegateHolder->getJObject(), handler);
- }
- }
- return NO_ERROR;
-}
-
-status_t MediaPlayer2AudioOutput::removeAudioDeviceCallback(jobject listener) {
- ALOGV("removeAudioDeviceCallback");
- Mutex::Autolock lock(mLock);
- jobject routingDelegate = nullptr;
- if ((routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
- if (mJAudioTrack != nullptr) {
- mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
- }
- JAudioTrack::eraseByKey(mRoutingDelegates, listener);
- }
- return NO_ERROR;
-}
-
-// static
-void MediaPlayer2AudioOutput::CallbackWrapper(
- int event, void *cookie, void *info) {
- //ALOGV("callbackwrapper");
- CallbackData *data = (CallbackData*)cookie;
- // lock to ensure we aren't caught in the middle of a track switch.
- data->lock();
- MediaPlayer2AudioOutput *me = data->getOutput();
- JAudioTrack::Buffer *buffer = (JAudioTrack::Buffer *)info;
- if (me == nullptr) {
- // no output set, likely because the track was scheduled to be reused
- // by another player, but the format turned out to be incompatible.
- data->unlock();
- if (buffer != nullptr) {
- buffer->mSize = 0;
- }
- return;
- }
-
- switch(event) {
- case JAudioTrack::EVENT_MORE_DATA: {
- size_t actualSize = (*me->mCallback)(
- me, buffer->mData, buffer->mSize, me->mCallbackCookie,
- CB_EVENT_FILL_BUFFER);
-
- // Log when no data is returned from the callback.
- // (1) We may have no data (especially with network streaming sources).
- // (2) We may have reached the EOS and the audio track is not stopped yet.
- // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
- // NuPlayer2Renderer will return zero when it doesn't have data (it doesn't block to fill).
- //
- // This is a benign busy-wait, with the next data request generated 10 ms or more later;
- // nevertheless for power reasons, we don't want to see too many of these.
-
- ALOGV_IF(actualSize == 0 && buffer->mSize > 0, "callbackwrapper: empty buffer returned");
-
- buffer->mSize = actualSize;
- } break;
-
- case JAudioTrack::EVENT_STREAM_END:
- // currently only occurs for offloaded callbacks
- ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
- (*me->mCallback)(me, nullptr /* buffer */, 0 /* size */,
- me->mCallbackCookie, CB_EVENT_STREAM_END);
- break;
-
- case JAudioTrack::EVENT_NEW_IAUDIOTRACK :
- ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
- (*me->mCallback)(me, nullptr /* buffer */, 0 /* size */,
- me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
- break;
-
- case JAudioTrack::EVENT_UNDERRUN:
- // This occurs when there is no data available, typically
- // when there is a failure to supply data to the AudioTrack. It can also
- // occur in non-offloaded mode when the audio device comes out of standby.
- //
- // If an AudioTrack underruns it outputs silence. Since this happens suddenly
- // it may sound like an audible pop or glitch.
- //
- // The underrun event is sent once per track underrun; the condition is reset
- // when more data is sent to the AudioTrack.
- ALOGD("callbackwrapper: EVENT_UNDERRUN (discarded)");
- break;
-
- default:
- ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
- }
-
- data->unlock();
-}
-
-int32_t MediaPlayer2AudioOutput::getSessionId() const {
- Mutex::Autolock lock(mLock);
- return mSessionId;
-}
-
-void MediaPlayer2AudioOutput::setSessionId(const int32_t sessionId) {
- Mutex::Autolock lock(mLock);
- mSessionId = sessionId;
-}
-
-uint32_t MediaPlayer2AudioOutput::getSampleRate() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == 0) {
- return 0;
- }
- return mJAudioTrack->getSampleRate();
-}
-
-int64_t MediaPlayer2AudioOutput::getBufferDurationInUs() const {
- Mutex::Autolock lock(mLock);
- if (mJAudioTrack == 0) {
- return 0;
- }
- int64_t duration;
- if (mJAudioTrack->getBufferDurationInUs(&duration) != OK) {
- return 0;
- }
- return duration;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
deleted file mode 100644
index 2ed4632..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_JAUDIOTRACK_H
-#define ANDROID_JAUDIOTRACK_H
-
-#include <utility>
-#include <jni.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/AudioSystem.h>
-#include <media/VolumeShaper.h>
-#include <system/audio.h>
-#include <utils/Errors.h>
-#include <utils/Vector.h>
-#include <mediaplayer2/JObjectHolder.h>
-#include <media/AudioTimestamp.h> // It has dependency on audio.h/Errors.h, but doesn't
- // include them in it. Therefore it is included here at last.
-
-namespace android {
-
-class JAudioTrack : public RefBase {
-public:
-
- /* Events used by AudioTrack callback function (callback_t).
- * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
- */
- enum event_type {
- EVENT_MORE_DATA = 0, // Request to write more data to buffer.
- EVENT_UNDERRUN = 1, // Buffer underrun occurred. This will not occur for
- // static tracks.
- EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
- // voluntary invalidation by mediaserver, or mediaserver crash.
- EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
- // back (after stop is called) for an offloaded track.
- };
-
- class Buffer
- {
- public:
- size_t mSize; // input/output in bytes.
- void* mData; // pointer to the audio data.
- };
-
- /* As a convenience, if a callback is supplied, a handler thread
- * is automatically created with the appropriate priority. This thread
- * invokes the callback when a new buffer becomes available or various conditions occur.
- *
- * Parameters:
- *
- * event: type of event notified (see enum AudioTrack::event_type).
- * user: Pointer to context for use by the callback receiver.
- * info: Pointer to optional parameter according to event type:
- * - EVENT_MORE_DATA: pointer to JAudioTrack::Buffer struct. The callback must not
- * write more bytes than indicated by 'size' field and update 'size' if fewer bytes
- * are written.
- * - EVENT_NEW_IAUDIOTRACK: unused.
- * - EVENT_STREAM_END: unused.
- */
-
- typedef void (*callback_t)(int event, void* user, void *info);
-
- /* Creates an JAudioTrack object for non-offload mode.
- * Once created, the track needs to be started before it can be used.
- * Unspecified values are set to appropriate default values.
- *
- * Parameters:
- *
- * streamType: Select the type of audio stream this track is attached to
- * (e.g. AUDIO_STREAM_MUSIC).
- * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
- * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
- * 0 will not work with current policy implementation for direct output
- * selection where an exact match is needed for sampling rate.
- * (TODO: Check direct output after flags can be used in Java AudioTrack.)
- * format: Audio format. For mixed tracks, any PCM format supported by server is OK.
- * For direct and offloaded tracks, the possible format(s) depends on the
- * output sink.
- * (TODO: How can we check whether a format is supported?)
- * channelMask: Channel mask, such that audio_is_output_channel(channelMask) is true.
- * cbf: Callback function. If not null, this function is called periodically
- * to provide new data and inform of marker, position updates, etc.
- * user: Context for use by the callback receiver.
- * frameCount: Minimum size of track PCM buffer in frames. This defines the
- * application's contribution to the latency of the track.
- * The actual size selected by the JAudioTrack could be larger if the
- * requested size is not compatible with current audio HAL configuration.
- * Zero means to use a default value.
- * sessionId: Specific session ID, or zero to use default.
- * pAttributes: If not NULL, supersedes streamType for use case selection.
- * maxRequiredSpeed: For PCM tracks, this creates an appropriate buffer size that will allow
- * maxRequiredSpeed playback. Values less than 1.0f and greater than
- * AUDIO_TIMESTRETCH_SPEED_MAX will be clamped. For non-PCM tracks
- * and direct or offloaded tracks, this parameter is ignored.
- * (TODO: Handle this after offload / direct track is supported.)
- *
- * TODO: Revive removed arguments after offload mode is supported.
- */
- JAudioTrack(uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- callback_t cbf,
- void* user,
- size_t frameCount = 0,
- int32_t sessionId = AUDIO_SESSION_ALLOCATE,
- const jobject pAttributes = NULL,
- float maxRequiredSpeed = 1.0f);
-
- /*
- // Q. May be used in AudioTrack.setPreferredDevice(AudioDeviceInfo)?
- audio_port_handle_t selectedDeviceId,
-
- // TODO: No place to use these values.
- int32_t notificationFrames,
- const audio_offload_info_t *offloadInfo,
- */
-
- virtual ~JAudioTrack();
-
- size_t frameCount();
- size_t channelCount();
-
- /* Returns this track's estimated latency in milliseconds.
- * This includes the latency due to AudioTrack buffer size, AudioMixer (if any)
- * and audio hardware driver.
- */
- uint32_t latency();
-
- /* Return the total number of frames played since playback start.
- * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
- * It is reset to zero by flush(), reload(), and stop().
- *
- * Parameters:
- *
- * position: Address where to return play head position.
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - BAD_VALUE: position is NULL
- */
- status_t getPosition(uint32_t *position);
-
- // TODO: Does this comment apply same to Java AudioTrack::getTimestamp?
- // Changed the return type from status_t to bool, since Java AudioTrack::getTimestamp returns
- // boolean. Will Java getTimestampWithStatus() be public?
- /* Poll for a timestamp on demand.
- * Use if EVENT_NEW_TIMESTAMP is not delivered often enough for your needs,
- * or if you need to get the most recent timestamp outside of the event callback handler.
- * Caution: calling this method too often may be inefficient;
- * if you need a high resolution mapping between frame position and presentation time,
- * consider implementing that at application level, based on the low resolution timestamps.
- * Returns NO_ERROR if timestamp is valid.
- * NO_INIT if finds error, and timestamp parameter will be undefined on return.
- */
- status_t getTimestamp(AudioTimestamp& timestamp);
-
- // TODO: This doc is just copied from AudioTrack.h. Revise it after implemenation.
- /* Return the extended timestamp, with additional timebase info and improved drain behavior.
- *
- * This is similar to the AudioTrack.java API:
- * getTimestamp(@NonNull AudioTimestamp timestamp, @AudioTimestamp.Timebase int timebase)
- *
- * Some differences between this method and the getTimestamp(AudioTimestamp& timestamp) method
- *
- * 1. stop() by itself does not reset the frame position.
- * A following start() resets the frame position to 0.
- * 2. flush() by itself does not reset the frame position.
- * The frame position advances by the number of frames flushed,
- * when the first frame after flush reaches the audio sink.
- * 3. BOOTTIME clock offsets are provided to help synchronize with
- * non-audio streams, e.g. sensor data.
- * 4. Position is returned with 64 bits of resolution.
- *
- * Parameters:
- * timestamp: A pointer to the caller allocated ExtendedTimestamp.
- *
- * Returns NO_ERROR on success; timestamp is filled with valid data.
- * BAD_VALUE if timestamp is NULL.
- * WOULD_BLOCK if called immediately after start() when the number
- * of frames consumed is less than the
- * overall hardware latency to physical output. In WOULD_BLOCK cases,
- * one might poll again, or use getPosition(), or use 0 position and
- * current time for the timestamp.
- * If WOULD_BLOCK is returned, the timestamp is still
- * modified with the LOCATION_CLIENT portion filled.
- * DEAD_OBJECT if AudioFlinger dies or the output device changes and
- * the track cannot be automatically restored.
- * The application needs to recreate the AudioTrack
- * because the audio device changed or AudioFlinger died.
- * This typically occurs for direct or offloaded tracks
- * or if mDoNotReconnect is true.
- * INVALID_OPERATION if called on a offloaded or direct track.
- * Use getTimestamp(AudioTimestamp& timestamp) instead.
- */
- status_t getTimestamp(ExtendedTimestamp *timestamp);
-
- /* Set source playback rate for timestretch
- * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
- * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
- *
- * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
- * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
- *
- * Speed increases the playback rate of media, but does not alter pitch.
- * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
- */
- status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
-
- /* Return current playback rate */
- const AudioPlaybackRate getPlaybackRate();
-
- /* Sets the volume shaper object */
- media::VolumeShaper::Status applyVolumeShaper(
- const sp<media::VolumeShaper::Configuration>& configuration,
- const sp<media::VolumeShaper::Operation>& operation);
-
- /* Set the send level for this track. An auxiliary effect should be attached
- * to the track with attachEffect(). Level must be >= 0.0 and <= 1.0.
- */
- status_t setAuxEffectSendLevel(float level);
-
- /* Attach track auxiliary output to specified effect. Use effectId = 0
- * to detach track from effect.
- *
- * Parameters:
- *
- * effectId: effectId obtained from AudioEffect::id().
- *
- * Returned status (from utils/Errors.h) can be:
- * - NO_ERROR: successful operation
- * - INVALID_OPERATION: The effect is not an auxiliary effect.
- * - BAD_VALUE: The specified effect ID is invalid.
- */
- status_t attachAuxEffect(int effectId);
-
- /* Set volume for this track, mostly used for games' sound effects
- * left and right volumes. Levels must be >= 0.0 and <= 1.0.
- * This is the older API. New applications should use setVolume(float) when possible.
- */
- status_t setVolume(float left, float right);
-
- /* Set volume for all channels. This is the preferred API for new applications,
- * especially for multi-channel content.
- */
- status_t setVolume(float volume);
-
- // TODO: Does this comment equally apply to the Java AudioTrack::play()?
- /* After it's created the track is not active. Call start() to
- * make it active. If set, the callback will start being called.
- * If the track was previously paused, volume is ramped up over the first mix buffer.
- */
- status_t start();
-
- // TODO: Does this comment still applies? It seems not. (obtainBuffer, AudioFlinger, ...)
- /* As a convenience we provide a write() interface to the audio buffer.
- * Input parameter 'size' is in byte units.
- * This is implemented on top of obtainBuffer/releaseBuffer. For best
- * performance use callbacks. Returns actual number of bytes written >= 0,
- * or one of the following negative status codes:
- * INVALID_OPERATION AudioTrack is configured for static buffer or streaming mode
- * BAD_VALUE size is invalid
- * WOULD_BLOCK when obtainBuffer() returns same, or
- * AudioTrack was stopped during the write
- * DEAD_OBJECT when AudioFlinger dies or the output device changes and
- * the track cannot be automatically restored.
- * The application needs to recreate the AudioTrack
- * because the audio device changed or AudioFlinger died.
- * This typically occurs for direct or offload tracks
- * or if mDoNotReconnect is true.
- * or any other error code returned by IAudioTrack::start() or restoreTrack_l().
- * Default behavior is to only return when all data has been transferred. Set 'blocking' to
- * false for the method to return immediately without waiting to try multiple times to write
- * the full content of the buffer.
- */
- ssize_t write(const void* buffer, size_t size, bool blocking = true);
-
- // TODO: Does this comment equally apply to the Java AudioTrack::stop()?
- /* Stop a track.
- * In static buffer mode, the track is stopped immediately.
- * In streaming mode, the callback will cease being called. Note that obtainBuffer() still
- * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
- * In streaming mode the stop does not occur immediately: any data remaining in the buffer
- * is first drained, mixed, and output, and only then is the track marked as stopped.
- */
- void stop();
- bool stopped() const;
-
- // TODO: Does this comment equally apply to the Java AudioTrack::flush()?
- /* Flush a stopped or paused track. All previously buffered data is discarded immediately.
- * This has the effect of draining the buffers without mixing or output.
- * Flush is intended for streaming mode, for example before switching to non-contiguous content.
- * This function is a no-op if the track is not stopped or paused, or uses a static buffer.
- */
- void flush();
-
- // TODO: Does this comment equally apply to the Java AudioTrack::pause()?
- // At least we are not using obtainBuffer.
- /* Pause a track. After pause, the callback will cease being called and
- * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
- * and will fill up buffers until the pool is exhausted.
- * Volume is ramped down over the next mix buffer following the pause request,
- * and then the track is marked as paused. It can be resumed with ramp up by start().
- */
- void pause();
-
- bool isPlaying() const;
-
- /* Return current source sample rate in Hz.
- * If specified as zero in constructor, this will be the sink sample rate.
- */
- uint32_t getSampleRate();
-
- /* Returns the buffer duration in microseconds at current playback rate. */
- status_t getBufferDurationInUs(int64_t *duration);
-
- audio_format_t format();
-
- size_t frameSize();
-
- /*
- * Dumps the state of an audio track.
- * Not a general-purpose API; intended only for use by media player service to dump its tracks.
- */
- status_t dump(int fd, const Vector<String16>& args) const;
-
- /* Returns the AudioDeviceInfo used by the output to which this AudioTrack is
- * attached.
- */
- jobject getRoutedDevice();
-
- /* Returns the ID of the audio session this AudioTrack belongs to. */
- int32_t getAudioSessionId();
-
- /* Sets the preferred audio device to use for output of this AudioTrack.
- *
- * Parameters:
- * Device: an AudioDeviceInfo object.
- *
- * Returned value:
- * - NO_ERROR: successful operation
- * - BAD_VALUE: failed to set the device
- */
- status_t setPreferredDevice(jobject device);
-
- // TODO: Add AUDIO_OUTPUT_FLAG_DIRECT when it is possible to check.
- // TODO: Add AUDIO_FLAG_HW_AV_SYNC when it is possible to check.
- /* Returns the flags */
- audio_output_flags_t getFlags() const { return mFlags; }
-
- /* We don't keep stream type here,
- * instead, we keep attributes and call getVolumeControlStream() to get stream type
- */
- audio_stream_type_t getAudioStreamType();
-
- /* Obtain the pending duration in milliseconds for playback of pure PCM data remaining in
- * AudioTrack.
- *
- * Returns NO_ERROR if successful.
- * INVALID_OPERATION if the AudioTrack does not contain pure PCM data.
- * BAD_VALUE if msec is nullptr.
- */
- status_t pendingDuration(int32_t *msec);
-
- /* Adds an AudioDeviceCallback. The caller will be notified when the audio device to which this
- * AudioTrack is routed is updated.
- * Replaces any previously installed callback.
- *
- * Parameters:
- * Listener: the listener to receive notification of rerouting events.
- * Handler: the handler to handler the rerouting events.
- *
- * Returns NO_ERROR if successful.
- * (TODO) INVALID_OPERATION if the same callback is already installed.
- * (TODO) NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
- * (TODO) BAD_VALUE if the callback is NULL
- */
- status_t addAudioDeviceCallback(jobject listener, jobject rd);
-
- /* Removes an AudioDeviceCallback.
- *
- * Parameters:
- * Listener: the listener to receive notification of rerouting events.
- *
- * Returns NO_ERROR if successful.
- * (TODO) INVALID_OPERATION if the callback is not installed
- * (TODO) BAD_VALUE if the callback is NULL
- */
- status_t removeAudioDeviceCallback(jobject listener);
-
- /* Register all backed-up routing delegates.
- *
- * Parameters:
- * routingDelegates: backed-up routing delegates
- *
- */
- void registerRoutingDelegates(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& routingDelegates);
-
- /* get listener from RoutingDelegate object
- */
- static jobject getListener(const jobject routingDelegateObj);
-
- /* get handler from RoutingDelegate object
- */
- static jobject getHandler(const jobject routingDelegateObj);
-
- /*
- * Parameters:
- * map and key
- *
- * Returns value if key is in the map
- * nullptr if key is not in the map
- */
- static jobject findByKey(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& mp, const jobject key);
-
- /*
- * Parameters:
- * map and key
- */
- static void eraseByKey(
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>>& mp, const jobject key);
-
-private:
- audio_output_flags_t mFlags;
-
- jclass mAudioTrackCls;
- jobject mAudioTrackObj;
-
- /* Creates a Java VolumeShaper.Configuration object from VolumeShaper::Configuration */
- jobject createVolumeShaperConfigurationObj(
- const sp<media::VolumeShaper::Configuration>& config);
-
- /* Creates a Java VolumeShaper.Operation object from VolumeShaper::Operation */
- jobject createVolumeShaperOperationObj(
- const sp<media::VolumeShaper::Operation>& operation);
-
- /* Creates a Java StreamEventCallback object */
- jobject createStreamEventCallback(callback_t cbf, void* user);
-
- /* Creates a Java Executor object for running a callback */
- jobject createCallbackExecutor();
-
- status_t javaToNativeStatus(int javaStatus);
-};
-
-}; // namespace android
-
-#endif // ANDROID_JAUDIOTRACK_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPConnection.h b/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPConnection.h
deleted file mode 100644
index 15f7f83..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPConnection.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _J_MEDIA2_HTTP_CONNECTION_H_
-#define _J_MEDIA2_HTTP_CONNECTION_H_
-
-#include "jni.h"
-
-#include <media/MediaHTTPConnection.h>
-#include <media/stagefright/foundation/ABase.h>
-
-namespace android {
-
-struct JMedia2HTTPConnection : public MediaHTTPConnection {
- JMedia2HTTPConnection(JNIEnv *env, jobject thiz);
-
- virtual bool connect(
- const char *uri, const KeyedVector<String8, String8> *headers) override;
-
- virtual void disconnect() override;
- virtual ssize_t readAt(off64_t offset, void *data, size_t size) override;
- virtual off64_t getSize() override;
- virtual status_t getMIMEType(String8 *mimeType) override;
- virtual status_t getUri(String8 *uri) override;
-
-protected:
- virtual ~JMedia2HTTPConnection();
-
-private:
- jobject mMedia2HTTPConnectionObj;
- jmethodID mConnectMethod;
- jmethodID mDisconnectMethod;
- jmethodID mReadAtMethod;
- jmethodID mGetSizeMethod;
- jmethodID mGetMIMETypeMethod;
- jmethodID mGetUriMethod;
-
- jbyteArray mByteArrayObj;
-
- DISALLOW_EVIL_CONSTRUCTORS(JMedia2HTTPConnection);
-};
-
-} // namespace android
-
-#endif // _J_MEDIA2_HTTP_CONNECTION_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPService.h b/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPService.h
deleted file mode 100644
index bf61a7f..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/JMedia2HTTPService.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright 2017, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _J_MEDIA2_HTTP_SERVICE_H_
-#define _J_MEDIA2_HTTP_SERVICE_H_
-
-#include <jni.h>
-#include <utils/RefBase.h>
-
-#include <media/MediaHTTPService.h>
-#include <media/MediaHTTPConnection.h>
-#include <media/stagefright/foundation/ABase.h>
-
-namespace android {
-
-struct JMedia2HTTPService : public MediaHTTPService {
- JMedia2HTTPService(JNIEnv *env, jobject thiz);
-
- virtual sp<MediaHTTPConnection> makeHTTPConnection() override;
-
-protected:
- virtual ~JMedia2HTTPService();
-
-private:
- jobject mMedia2HTTPServiceObj;
-
- jmethodID mMakeHTTPConnectionMethod;
-
- DISALLOW_EVIL_CONSTRUCTORS(JMedia2HTTPService);
-};
-
-} // namespace android
-
-#endif // _J_MEDIA2_HTTP_SERVICE_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h b/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h
deleted file mode 100644
index 93d8b40..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright 2018, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef JOBJECT_HOLDER_H_
-
-#define JOBJECT_HOLDER_H_
-
-#include "jni.h"
-#include <mediaplayer2/JavaVMHelper.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-// Helper class for managing global reference of jobject.
-struct JObjectHolder : public RefBase {
- JObjectHolder(jobject obj) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- mJObject = reinterpret_cast<jobject>(env->NewGlobalRef(obj));
- }
-
- virtual ~JObjectHolder() {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- env->DeleteGlobalRef(mJObject);
- }
-
- jobject getJObject() { return mJObject; }
-
-private:
- jobject mJObject;
-};
-
-} //" android
-
-#endif // JOBJECT_HOLDER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h b/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
deleted file mode 100644
index 4b56aca..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/JavaVMHelper.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef JAVA_VM_HELPER_H_
-
-#define JAVA_VM_HELPER_H_
-
-#include "jni.h"
-
-#include <atomic>
-
-namespace android {
-
-struct JavaVMHelper {
- static JNIEnv *getJNIEnv();
- static JavaVM *getJavaVM();
- static void setJavaVM(JavaVM *vm);
-
-private:
- // Once a valid JavaVM has been set, it should never be reset or changed.
- // However, as it may be accessed from multiple threads, access needs to be
- // synchronized.
- static std::atomic<JavaVM *> sJavaVM;
-};
-
-} // namespace android
-
-#endif // JAVA_VM_HELPER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
deleted file mode 100644
index f38b7cc..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
-**
-** Copyright 2018, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#ifndef ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
-#define ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
-
-#include <mediaplayer2/MediaPlayer2Interface.h>
-#include <mediaplayer2/JAudioTrack.h>
-#include <mediaplayer2/JObjectHolder.h>
-
-#include <utility>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-
-#include "jni.h"
-
-namespace android {
-
-class AudioTrack;
-
-class MediaPlayer2AudioOutput : public MediaPlayer2Interface::AudioSink
-{
- class CallbackData;
-
-public:
- MediaPlayer2AudioOutput(int32_t sessionId,
- uid_t uid,
- int pid,
- const jobject attributes);
- virtual ~MediaPlayer2AudioOutput();
-
- virtual bool ready() const {
- return mJAudioTrack != nullptr;
- }
- virtual ssize_t bufferSize() const;
- virtual ssize_t frameCount() const;
- virtual ssize_t channelCount() const;
- virtual ssize_t frameSize() const;
- virtual uint32_t latency() const;
- virtual float msecsPerFrame() const;
- virtual status_t getPosition(uint32_t *position) const;
- virtual status_t getTimestamp(AudioTimestamp &ts) const;
- virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const;
- virtual status_t getFramesWritten(uint32_t *frameswritten) const;
- virtual int32_t getSessionId() const;
- virtual void setSessionId(const int32_t id);
- virtual uint32_t getSampleRate() const;
- virtual int64_t getBufferDurationInUs() const;
-
- virtual status_t open(
- uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
- audio_format_t format,
- AudioCallback cb, void *cookie,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL,
- uint32_t suggestedFrameCount = 0);
-
- virtual status_t start();
- virtual ssize_t write(const void* buffer, size_t size, bool blocking = true);
- virtual void stop();
- virtual void flush();
- virtual void pause();
- virtual void close();
- void setAudioAttributes(const jobject attributes);
- virtual audio_stream_type_t getAudioStreamType() const;
-
- void setVolume(float volume);
- virtual status_t setPlaybackRate(const AudioPlaybackRate& rate);
- virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */);
-
- status_t setAuxEffectSendLevel(float level);
- status_t attachAuxEffect(int effectId);
- virtual status_t dump(int fd, const Vector<String16>& args) const;
-
- static bool isOnEmulator();
- static int getMinBufferCount();
- virtual bool needsTrailingPadding() {
- return true;
- // TODO: return correct value.
- //return mNextOutput == NULL;
- }
- // AudioRouting
- virtual status_t setPreferredDevice(jobject device);
- virtual jobject getRoutedDevice();
- virtual status_t addAudioDeviceCallback(jobject routingDelegate);
- virtual status_t removeAudioDeviceCallback(jobject listener);
-
-private:
- static void setMinBufferCount();
- static void CallbackWrapper(int event, void *me, void *info);
- void deleteRecycledTrack_l();
- void close_l();
- status_t updateTrack_l();
-
- sp<JAudioTrack> mJAudioTrack;
- AudioCallback mCallback;
- void * mCallbackCookie;
- CallbackData * mCallbackData;
- sp<JObjectHolder> mAttributes;
- float mVolume;
- AudioPlaybackRate mPlaybackRate;
- uint32_t mSampleRateHz; // sample rate of the content, as set in open()
- float mMsecsPerFrame;
- size_t mFrameSize;
- int32_t mSessionId;
- uid_t mUid;
- int mPid;
- float mSendLevel;
- int mAuxEffectId;
- audio_output_flags_t mFlags;
- sp<JObjectHolder> mPreferredDevice;
- mutable Mutex mLock;
-
- // <listener, routingDelegate>
- Vector<std::pair<sp<JObjectHolder>, sp<JObjectHolder>>> mRoutingDelegates;
-
- // static variables below not protected by mutex
- static bool mIsOnEmulator;
- static int mMinBufferCount; // 12 for emulator; otherwise 4
-
- // CallbackData is what is passed to the AudioTrack as the "user" data.
- // We need to be able to target this to a different Output on the fly,
- // so we can't use the Output itself for this.
- class CallbackData {
- friend MediaPlayer2AudioOutput;
- public:
- explicit CallbackData(MediaPlayer2AudioOutput *cookie) {
- mData = cookie;
- mSwitching = false;
- }
- MediaPlayer2AudioOutput *getOutput() const {
- return mData;
- }
- void setOutput(MediaPlayer2AudioOutput* newcookie) {
- mData = newcookie;
- }
- // lock/unlock are used by the callback before accessing the payload of this object
- void lock() const {
- mLock.lock();
- }
- void unlock() const {
- mLock.unlock();
- }
-
- // tryBeginTrackSwitch/endTrackSwitch are used when the CallbackData is handed over
- // to the next sink.
-
- // tryBeginTrackSwitch() returns true only if it obtains the lock.
- bool tryBeginTrackSwitch() {
- LOG_ALWAYS_FATAL_IF(mSwitching, "tryBeginTrackSwitch() already called");
- if (mLock.tryLock() != OK) {
- return false;
- }
- mSwitching = true;
- return true;
- }
- void endTrackSwitch() {
- if (mSwitching) {
- mLock.unlock();
- }
- mSwitching = false;
- }
-
- private:
- MediaPlayer2AudioOutput *mData;
- mutable Mutex mLock; // a recursive mutex might make this unnecessary.
- bool mSwitching;
- DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
- };
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
deleted file mode 100644
index 7804a62..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPLAYER2INTERFACE_H
-#define ANDROID_MEDIAPLAYER2INTERFACE_H
-
-#ifdef __cplusplus
-
-#include <sys/types.h>
-#include <utils/Errors.h>
-#include <utils/String8.h>
-#include <utils/RefBase.h>
-#include <jni.h>
-
-#include <media/AVSyncSettings.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/AudioSystem.h>
-#include <media/AudioTimestamp.h>
-#include <media/BufferingSettings.h>
-#include <media/stagefright/foundation/AHandler.h>
-#include <mediaplayer2/MediaPlayer2Types.h>
-
-#include "jni.h"
-#include "mediaplayer2.pb.h"
-
-using android::media::MediaPlayer2Proto::PlayerMessage;
-
-// Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
-// global, and not in android::
-struct sockaddr_in;
-
-namespace android {
-
-struct DataSourceDesc;
-class Parcel;
-struct ANativeWindowWrapper;
-
-#define DEFAULT_AUDIOSINK_BUFFERSIZE 1200
-#define DEFAULT_AUDIOSINK_SAMPLERATE 44100
-
-// when the channel mask isn't known, use the channel count to derive a mask in AudioSink::open()
-#define CHANNEL_MASK_USE_CHANNEL_ORDER 0
-
-// duration below which we do not allow deep audio buffering
-#define AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US 5000000
-
-class MediaPlayer2InterfaceListener: public RefBase
-{
-public:
- virtual void notify(int64_t srcId, int msg, int ext1, int ext2,
- const PlayerMessage *obj) = 0;
-};
-
-class MediaPlayer2Interface : public AHandler {
-public:
- // AudioSink: abstraction layer for audio output
- class AudioSink : public RefBase {
- public:
- enum cb_event_t {
- CB_EVENT_FILL_BUFFER, // Request to write more data to buffer.
- CB_EVENT_STREAM_END, // Sent after all the buffers queued in AF and HW are played
- // back (after stop is called)
- CB_EVENT_TEAR_DOWN // The AudioTrack was invalidated due to use case change:
- // Need to re-evaluate offloading options
- };
-
- // Callback returns the number of bytes actually written to the buffer.
- typedef size_t (*AudioCallback)(
- AudioSink *audioSink, void *buffer, size_t size, void *cookie, cb_event_t event);
-
- virtual ~AudioSink() {}
- virtual bool ready() const = 0; // audio output is open and ready
- virtual ssize_t bufferSize() const = 0;
- virtual ssize_t frameCount() const = 0;
- virtual ssize_t channelCount() const = 0;
- virtual ssize_t frameSize() const = 0;
- virtual uint32_t latency() const = 0;
- virtual float msecsPerFrame() const = 0;
- virtual status_t getPosition(uint32_t *position) const = 0;
- virtual status_t getTimestamp(AudioTimestamp &ts) const = 0;
- virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const = 0;
- virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
- virtual int32_t getSessionId() const = 0;
- virtual audio_stream_type_t getAudioStreamType() const = 0;
- virtual uint32_t getSampleRate() const = 0;
- virtual int64_t getBufferDurationInUs() const = 0;
-
- // If no callback is specified, use the "write" API below to submit
- // audio data.
- virtual status_t open(
- uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
- audio_format_t format=AUDIO_FORMAT_PCM_16_BIT,
- AudioCallback cb = NULL,
- void *cookie = NULL,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- const audio_offload_info_t *offloadInfo = NULL,
- uint32_t suggestedFrameCount = 0) = 0;
-
- virtual status_t start() = 0;
-
- /* Input parameter |size| is in byte units stored in |buffer|.
- * Data is copied over and actual number of bytes written (>= 0)
- * is returned, or no data is copied and a negative status code
- * is returned (even when |blocking| is true).
- * When |blocking| is false, AudioSink will immediately return after
- * part of or full |buffer| is copied over.
- * When |blocking| is true, AudioSink will wait to copy the entire
- * buffer, unless an error occurs or the copy operation is
- * prematurely stopped.
- */
- virtual ssize_t write(const void* buffer, size_t size, bool blocking = true) = 0;
-
- virtual void stop() = 0;
- virtual void flush() = 0;
- virtual void pause() = 0;
- virtual void close() = 0;
-
- virtual status_t setPlaybackRate(const AudioPlaybackRate& rate) = 0;
- virtual status_t getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
- virtual bool needsTrailingPadding() {
- return true;
- }
-
- virtual status_t setParameters(const String8& /* keyValuePairs */) {
- return NO_ERROR;
- }
- virtual String8 getParameters(const String8& /* keys */) {
- return String8::empty();
- }
-
- // AudioRouting
- virtual status_t setPreferredDevice(jobject device);
- virtual jobject getRoutedDevice();
- virtual status_t addAudioDeviceCallback(jobject routingDelegate);
- virtual status_t removeAudioDeviceCallback(jobject listener);
- };
-
- MediaPlayer2Interface() : mListener(NULL) { }
- virtual ~MediaPlayer2Interface() { }
- virtual status_t initCheck() = 0;
-
- virtual void setAudioSink(const sp<AudioSink>& audioSink) {
- mAudioSink = audioSink;
- }
-
- virtual status_t setDataSource(const sp<DataSourceDesc> &dsd) = 0;
-
- virtual status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd) = 0;
-
- virtual status_t playNextDataSource(int64_t srcId) = 0;
-
- // pass the buffered native window to the media player service
- virtual status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww) = 0;
-
- virtual status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */) {
- *buffering = BufferingSettings();
- return OK;
- }
- virtual status_t setBufferingSettings(const BufferingSettings& /* buffering */) {
- return OK;
- }
-
- virtual status_t prepareAsync() = 0;
- virtual status_t start() = 0;
- virtual status_t pause() = 0;
- virtual bool isPlaying() = 0;
- virtual status_t setPlaybackSettings(const AudioPlaybackRate& rate) {
- // by default, players only support setting rate to the default
- if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
- return BAD_VALUE;
- }
- return OK;
- }
- virtual status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
- *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
- return OK;
- }
- virtual status_t setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
- // By default, players only support setting sync source to default; all other sync
- // settings are ignored. There is no requirement for getters to return set values.
- if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
- return BAD_VALUE;
- }
- return OK;
- }
- virtual status_t getSyncSettings(
- AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
- *sync = AVSyncSettings();
- *videoFps = -1.f;
- return OK;
- }
- virtual status_t seekTo(
- int64_t msec, MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) = 0;
- virtual status_t getCurrentPosition(int64_t *msec) = 0;
- virtual status_t getDuration(int64_t *msec) = 0;
- virtual status_t reset() = 0;
- virtual status_t notifyAt(int64_t /* mediaTimeUs */) {
- return INVALID_OPERATION;
- }
- virtual status_t setLooping(int loop) = 0;
- virtual status_t setParameter(int key, const Parcel &request) = 0;
- virtual status_t getParameter(int key, Parcel *reply) = 0;
-
- virtual status_t getMetrics(char **buffer, size_t *length) = 0;
-
- // Invoke a generic method on the player by using opaque parcels
- // for the request and reply.
- //
- // @param request Parcel that is positioned at the start of the
- // data sent by the java layer.
- // @param[out] reply Parcel to hold the reply data. Cannot be null.
- // @return OK if the call was successful.
- virtual status_t invoke(const PlayerMessage &request, PlayerMessage *reply) = 0;
-
- void setListener(const sp<MediaPlayer2InterfaceListener> &listener) {
- Mutex::Autolock autoLock(mListenerLock);
- mListener = listener;
- }
-
- void sendEvent(int64_t srcId, int msg, int ext1=0, int ext2=0, const PlayerMessage *obj=NULL) {
- sp<MediaPlayer2InterfaceListener> listener;
- {
- Mutex::Autolock autoLock(mListenerLock);
- listener = mListener;
- }
-
- if (listener) {
- listener->notify(srcId, msg, ext1, ext2, obj);
- }
- }
-
- virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
- return INVALID_OPERATION;
- }
-
- virtual void onMessageReceived(const sp<AMessage> & /* msg */) override { }
-
- // Modular DRM
- virtual status_t prepareDrm(int64_t /*srcId*/, const uint8_t /* uuid */[16],
- const Vector<uint8_t>& /* drmSessionId */) {
- return INVALID_OPERATION;
- }
- virtual status_t releaseDrm(int64_t /*srcId*/) {
- return INVALID_OPERATION;
- }
-
-protected:
- sp<AudioSink> mAudioSink;
-
-private:
- Mutex mListenerLock;
- sp<MediaPlayer2InterfaceListener> mListener;
-};
-
-}; // namespace android
-
-#endif // __cplusplus
-
-
-#endif // ANDROID_MEDIAPLAYER2INTERFACE_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
deleted file mode 100644
index 2430289..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPLAYER2_TYPES_H
-#define ANDROID_MEDIAPLAYER2_TYPES_H
-
-#include <media/mediaplayer_common.h>
-
-#include <media/MediaSource.h>
-
-namespace android {
-
-typedef MediaSource::ReadOptions::SeekMode MediaPlayer2SeekMode;
-
-enum media2_event_type {
- MEDIA2_NOP = 0, // interface test message
- MEDIA2_PREPARED = 1,
- MEDIA2_PLAYBACK_COMPLETE = 2,
- MEDIA2_BUFFERING_UPDATE = 3,
- MEDIA2_SEEK_COMPLETE = 4,
- MEDIA2_SET_VIDEO_SIZE = 5,
- MEDIA2_STARTED = 6,
- MEDIA2_PAUSED = 7,
- MEDIA2_SKIPPED = 8,
- MEDIA2_NOTIFY_TIME = 98,
- MEDIA2_TIMED_TEXT = 99,
- MEDIA2_ERROR = 100,
- MEDIA2_INFO = 200,
- MEDIA2_SUBTITLE_DATA = 201,
- MEDIA2_META_DATA = 202,
- MEDIA2_DRM_INFO = 210,
-};
-
-// Generic error codes for the media player framework. Errors are fatal, the
-// playback must abort.
-//
-// Errors are communicated back to the client using the
-// MediaPlayer2Listener::notify method defined below.
-// In this situation, 'notify' is invoked with the following:
-// 'msg' is set to MEDIA_ERROR.
-// 'ext1' should be a value from the enum media2_error_type.
-// 'ext2' contains an implementation dependant error code to provide
-// more details. Should default to 0 when not used.
-//
-// The codes are distributed as follow:
-// 0xx: Reserved
-// 1xx: Android Player errors. Something went wrong inside the MediaPlayer2.
-// 2xx: Media errors (e.g Codec not supported). There is a problem with the
-// media itself.
-// 3xx: Runtime errors. Some extraordinary condition arose making the playback
-// impossible.
-//
-enum media2_error_type {
- // 0xx
- MEDIA2_ERROR_UNKNOWN = 1,
- // 1xx
- // MEDIA2_ERROR_SERVER_DIED = 100,
- // 2xx
- MEDIA2_ERROR_NOT_VALID_FOR_PROGRESSIVE_PLAYBACK = 200,
- // 3xx
- MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE = 300,
-};
-
-
-// Info and warning codes for the media player framework. These are non fatal,
-// the playback is going on but there might be some user visible issues.
-//
-// Info and warning messages are communicated back to the client using the
-// MediaPlayer2Listener::notify method defined below. In this situation,
-// 'notify' is invoked with the following:
-// 'msg' is set to MEDIA_INFO.
-// 'ext1' should be a value from the enum media2_info_type.
-// 'ext2' contains an implementation dependant info code to provide
-// more details. Should default to 0 when not used.
-//
-// The codes are distributed as follow:
-// 0xx: Reserved
-// 7xx: Android Player info/warning (e.g player lagging behind.)
-// 8xx: Media info/warning (e.g media badly interleaved.)
-//
-enum media2_info_type {
- // 0xx
- MEDIA2_INFO_UNKNOWN = 1,
- // The player just started the playback of this data source.
- MEDIA2_INFO_DATA_SOURCE_START = 2,
- // The player just pushed the very first video frame for rendering
- MEDIA2_INFO_VIDEO_RENDERING_START = 3,
- // The player just pushed the very first audio frame for rendering
- MEDIA2_INFO_AUDIO_RENDERING_START = 4,
- // The player just completed the playback of this data source
- MEDIA2_INFO_DATA_SOURCE_END = 5,
- // The player just completed the playback of all data sources.
- // But this is not visible in native code. Just keep this entry for completeness.
- MEDIA2_INFO_DATA_SOURCE_LIST_END = 6,
- // The player just completed an iteration of playback loop. This event is sent only when
- // looping is enabled.
- MEDIA2_INFO_DATA_SOURCE_REPEAT = 7,
-
- //1xx
- // The player just prepared a data source.
- MEDIA2_INFO_PREPARED = 100,
- // The player just completed a call play().
- MEDIA2_INFO_COMPLETE_CALL_PLAY = 101,
- // The player just completed a call pause().
- MEDIA2_INFO_COMPLETE_CALL_PAUSE = 102,
- // The player just completed a call seekTo.
- MEDIA2_INFO_COMPLETE_CALL_SEEK = 103,
-
- // 7xx
- // The video is too complex for the decoder: it can't decode frames fast
- // enough. Possibly only the audio plays fine at this stage.
- MEDIA2_INFO_VIDEO_TRACK_LAGGING = 700,
- // MediaPlayer2 is temporarily pausing playback internally in order to
- // buffer more data.
- MEDIA2_INFO_BUFFERING_START = 701,
- // MediaPlayer2 is resuming playback after filling buffers.
- MEDIA2_INFO_BUFFERING_END = 702,
- // Bandwidth in recent past
- MEDIA2_INFO_NETWORK_BANDWIDTH = 703,
-
- // 8xx
- // Bad interleaving means that a media has been improperly interleaved or not
- // interleaved at all, e.g has all the video samples first then all the audio
- // ones. Video is playing but a lot of disk seek may be happening.
- MEDIA2_INFO_BAD_INTERLEAVING = 800,
- // The media is not seekable (e.g live stream).
- MEDIA2_INFO_NOT_SEEKABLE = 801,
- // New media metadata is available.
- MEDIA2_INFO_METADATA_UPDATE = 802,
- // Audio can not be played.
- MEDIA2_INFO_PLAY_AUDIO_ERROR = 804,
- // Video can not be played.
- MEDIA2_INFO_PLAY_VIDEO_ERROR = 805,
-
- //9xx
- MEDIA2_INFO_TIMED_TEXT_ERROR = 900,
-};
-
-// Do not change these values without updating their counterparts in MediaPlayer2.java
-enum mediaplayer2_states {
- MEDIAPLAYER2_STATE_IDLE = 1001,
- MEDIAPLAYER2_STATE_PREPARED = 1002,
- MEDIAPLAYER2_STATE_PAUSED = 1003,
- MEDIAPLAYER2_STATE_PLAYING = 1004,
- MEDIAPLAYER2_STATE_ERROR = 1005,
-};
-
-enum media_player2_internal_states {
- MEDIA_PLAYER2_STATE_ERROR = 0,
- MEDIA_PLAYER2_IDLE = 1 << 0,
- MEDIA_PLAYER2_INITIALIZED = 1 << 1,
- MEDIA_PLAYER2_PREPARING = 1 << 2,
- MEDIA_PLAYER2_PREPARED = 1 << 3,
- MEDIA_PLAYER2_STARTED = 1 << 4,
- MEDIA_PLAYER2_PAUSED = 1 << 5,
- MEDIA_PLAYER2_PLAYBACK_COMPLETE = 1 << 6
-};
-
-// Keep KEY_PARAMETER_* in sync with MediaPlayer2.java.
-// The same enum space is used for both set and get, in case there are future keys that
-// can be both set and get. But as of now, all parameters are either set only or get only.
-enum media2_parameter_keys {
- // Streaming/buffering parameters
- MEDIA2_KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS = 1100, // set only
-
- // Return a Parcel containing a single int, which is the channel count of the
- // audio track, or zero for error (e.g. no audio track) or unknown.
- MEDIA2_KEY_PARAMETER_AUDIO_CHANNEL_COUNT = 1200, // get only
-
- // Playback rate expressed in permille (1000 is normal speed), saved as int32_t, with negative
- // values used for rewinding or reverse playback.
- MEDIA2_KEY_PARAMETER_PLAYBACK_RATE_PERMILLE = 1300, // set only
-
- // Set a Parcel containing the value of a parcelled Java AudioAttribute instance
- MEDIA2_KEY_PARAMETER_AUDIO_ATTRIBUTES = 1400 // set only
-};
-
-// Keep INVOKE_ID_* in sync with MediaPlayer2.java.
-enum media_player2_invoke_ids {
- MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO = 1,
- MEDIA_PLAYER2_INVOKE_ID_ADD_EXTERNAL_SOURCE = 2,
- MEDIA_PLAYER2_INVOKE_ID_ADD_EXTERNAL_SOURCE_FD = 3,
- MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK = 4,
- MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK = 5,
- MEDIA_PLAYER2_INVOKE_ID_SET_VIDEO_SCALING_MODE = 6,
- MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK = 7
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAPLAYER2_TYPES_H
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
deleted file mode 100644
index 1e8a1d5..0000000
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIAPLAYER2_H
-#define ANDROID_MEDIAPLAYER2_H
-
-#include <media/AVSyncSettings.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/BufferingSettings.h>
-#include <media/mediaplayer_common.h>
-#include <mediaplayer2/MediaPlayer2Interface.h>
-#include <mediaplayer2/MediaPlayer2Types.h>
-#include <mediaplayer2/JObjectHolder.h>
-
-#include <jni.h>
-#include <utils/Errors.h>
-#include <utils/Mutex.h>
-#include <utils/RefBase.h>
-#include <utils/String16.h>
-#include <utils/Vector.h>
-#include <system/audio-base.h>
-
-#include "jni.h"
-
-namespace android {
-
-struct ANativeWindowWrapper;
-struct DataSourceDesc;
-class MediaPlayer2AudioOutput;
-
-// ref-counted object for callbacks
-class MediaPlayer2Listener: virtual public RefBase
-{
-public:
- virtual void notify(int64_t srcId, int msg, int ext1, int ext2,
- const PlayerMessage *obj = NULL) = 0;
-};
-
-class MediaPlayer2 : public MediaPlayer2InterfaceListener
-{
-public:
- ~MediaPlayer2();
-
- static sp<MediaPlayer2> Create(int32_t sessionId, jobject context);
- static status_t DumpAll(int fd, const Vector<String16>& args);
-
- void disconnect();
-
- status_t getSrcId(int64_t *srcId);
- status_t setDataSource(const sp<DataSourceDesc> &dsd);
- status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd);
- status_t playNextDataSource(int64_t srcId);
- status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww);
- status_t setListener(const sp<MediaPlayer2Listener>& listener);
- status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
- status_t setBufferingSettings(const BufferingSettings& buffering);
- status_t prepareAsync();
- status_t start();
- status_t pause();
- bool isPlaying();
- mediaplayer2_states getState();
- status_t setPlaybackSettings(const AudioPlaybackRate& rate);
- status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
- status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
- status_t getSyncSettings(
- AVSyncSettings* sync /* nonnull */,
- float* videoFps /* nonnull */);
- status_t getVideoWidth(int *w);
- status_t getVideoHeight(int *h);
- status_t seekTo(
- int64_t msec,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC);
- status_t notifyAt(int64_t mediaTimeUs);
- status_t getCurrentPosition(int64_t *msec);
- status_t getDuration(int64_t srcId, int64_t *msec);
- status_t reset();
- status_t setAudioStreamType(audio_stream_type_t type);
- status_t getAudioStreamType(audio_stream_type_t *type);
- status_t setLooping(int loop);
- bool isLooping();
- status_t setVolume(float volume);
- void notify(int64_t srcId, int msg, int ext1, int ext2,
- const PlayerMessage *obj = NULL);
- status_t invoke(const PlayerMessage &request, PlayerMessage *reply);
- status_t setAudioSessionId(int32_t sessionId);
- int32_t getAudioSessionId();
- status_t setAuxEffectSendLevel(float level);
- status_t attachAuxEffect(int effectId);
- status_t setAudioAttributes(const jobject attributes);
- jobject getAudioAttributes();
- status_t getParameter(int key, Parcel* reply);
- status_t getMetrics(char **buffer, size_t *length);
-
- // Modular DRM
- status_t prepareDrm(int64_t srcId,
- const uint8_t uuid[16],
- const Vector<uint8_t>& drmSessionId);
- status_t releaseDrm(int64_t srcId);
- // AudioRouting
- status_t setPreferredDevice(jobject device);
- jobject getRoutedDevice();
- status_t addAudioDeviceCallback(jobject routingDelegate);
- status_t removeAudioDeviceCallback(jobject listener);
-
- status_t dump(int fd, const Vector<String16>& args);
-
-private:
- MediaPlayer2(int32_t sessionId, jobject context);
- bool init();
-
- // Disconnect from the currently connected ANativeWindow.
- void disconnectNativeWindow_l();
-
- status_t setAudioAttributes_l(const jobject attributes);
-
- void clear_l();
- status_t seekTo_l(int64_t msec, MediaPlayer2SeekMode mode);
- status_t prepareAsync_l();
- status_t getDuration_l(int64_t *msec);
- status_t reset_l();
- status_t checkState_l();
-
- pid_t mPid;
- uid_t mUid;
- sp<MediaPlayer2Interface> mPlayer;
- sp<MediaPlayer2AudioOutput> mAudioOutput;
- int64_t mSrcId;
- thread_id_t mLockThreadId;
- mutable Mutex mLock;
- Mutex mNotifyLock;
- sp<MediaPlayer2Listener> mListener;
- media_player2_internal_states mCurrentState;
- bool mTransitionToNext;
- int64_t mCurrentPosition;
- MediaPlayer2SeekMode mCurrentSeekMode;
- int64_t mSeekPosition;
- MediaPlayer2SeekMode mSeekMode;
- audio_stream_type_t mStreamType;
- bool mLoop;
- float mVolume;
- int mVideoWidth;
- int mVideoHeight;
- int32_t mAudioSessionId;
- sp<JObjectHolder> mAudioAttributes;
- sp<JObjectHolder> mContext;
- float mSendLevel;
- sp<ANativeWindowWrapper> mConnectedWindow;
-};
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAPLAYER2_H
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
deleted file mode 100644
index de65f8d..0000000
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ /dev/null
@@ -1,1261 +0,0 @@
-/*
-**
-** Copyright 2017, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaPlayer2Native"
-
-#include <android/binder_ibinder.h>
-#include <media/AudioSystem.h>
-#include <media/DataSourceDesc.h>
-#include <media/MemoryLeakTrackUtil.h>
-#include <media/NdkWrapper.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooperRoster.h>
-#include <mediaplayer2/MediaPlayer2AudioOutput.h>
-#include <mediaplayer2/mediaplayer2.h>
-
-#include <utils/Log.h>
-#include <utils/SortedVector.h>
-#include <utils/String8.h>
-
-#include <system/audio.h>
-#include <system/window.h>
-
-#include <nuplayer2/NuPlayer2Driver.h>
-
-#include <dirent.h>
-#include <sys/stat.h>
-
-namespace android {
-
-extern ALooperRoster gLooperRoster;
-
-namespace {
-
-const int kDumpLockRetries = 50;
-const int kDumpLockSleepUs = 20000;
-
-class proxyListener : public MediaPlayer2InterfaceListener {
-public:
- proxyListener(const wp<MediaPlayer2> &player)
- : mPlayer(player) { }
-
- ~proxyListener() { };
-
- virtual void notify(int64_t srcId, int msg, int ext1, int ext2,
- const PlayerMessage *obj) override {
- sp<MediaPlayer2> player = mPlayer.promote();
- if (player != NULL) {
- player->notify(srcId, msg, ext1, ext2, obj);
- }
- }
-
-private:
- wp<MediaPlayer2> mPlayer;
-};
-
-Mutex sRecordLock;
-SortedVector<wp<MediaPlayer2> > *sPlayers;
-
-void ensureInit_l() {
- if (sPlayers == NULL) {
- sPlayers = new SortedVector<wp<MediaPlayer2> >();
- }
-}
-
-void addPlayer(const wp<MediaPlayer2>& player) {
- Mutex::Autolock lock(sRecordLock);
- ensureInit_l();
- sPlayers->add(player);
-}
-
-void removePlayer(const wp<MediaPlayer2>& player) {
- Mutex::Autolock lock(sRecordLock);
- ensureInit_l();
- sPlayers->remove(player);
-}
-
-/**
- * The only arguments this understands right now are -c, -von and -voff,
- * which are parsed by ALooperRoster::dump()
- */
-status_t dumpPlayers(int fd, const Vector<String16>& args) {
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- SortedVector< sp<MediaPlayer2> > players; //to serialise the mutex unlock & client destruction.
-
- {
- Mutex::Autolock lock(sRecordLock);
- ensureInit_l();
- for (int i = 0, n = sPlayers->size(); i < n; ++i) {
- sp<MediaPlayer2> p = (*sPlayers)[i].promote();
- if (p != 0) {
- p->dump(fd, args);
- }
- players.add(p);
- }
- }
-
- result.append(" Files opened and/or mapped:\n");
- snprintf(buffer, SIZE, "/proc/%d/maps", getpid());
- FILE *f = fopen(buffer, "r");
- if (f) {
- while (!feof(f)) {
- fgets(buffer, SIZE, f);
- if (strstr(buffer, " /storage/") ||
- strstr(buffer, " /system/sounds/") ||
- strstr(buffer, " /data/") ||
- strstr(buffer, " /system/media/")) {
- result.append(" ");
- result.append(buffer);
- }
- }
- fclose(f);
- } else {
- result.append("couldn't open ");
- result.append(buffer);
- result.append("\n");
- }
-
- snprintf(buffer, SIZE, "/proc/%d/fd", getpid());
- DIR *d = opendir(buffer);
- if (d) {
- struct dirent *ent;
- while((ent = readdir(d)) != NULL) {
- if (strcmp(ent->d_name,".") && strcmp(ent->d_name,"..")) {
- snprintf(buffer, SIZE, "/proc/%d/fd/%s", getpid(), ent->d_name);
- struct stat s;
- if (lstat(buffer, &s) == 0) {
- if ((s.st_mode & S_IFMT) == S_IFLNK) {
- char linkto[256];
- int len = readlink(buffer, linkto, sizeof(linkto));
- if(len > 0) {
- if(len > 255) {
- linkto[252] = '.';
- linkto[253] = '.';
- linkto[254] = '.';
- linkto[255] = 0;
- } else {
- linkto[len] = 0;
- }
- if (strstr(linkto, "/storage/") == linkto ||
- strstr(linkto, "/system/sounds/") == linkto ||
- strstr(linkto, "/data/") == linkto ||
- strstr(linkto, "/system/media/") == linkto) {
- result.append(" ");
- result.append(buffer);
- result.append(" -> ");
- result.append(linkto);
- result.append("\n");
- }
- }
- } else {
- result.append(" unexpected type for ");
- result.append(buffer);
- result.append("\n");
- }
- }
- }
- }
- closedir(d);
- } else {
- result.append("couldn't open ");
- result.append(buffer);
- result.append("\n");
- }
-
- gLooperRoster.dump(fd, args);
-
- bool dumpMem = false;
- bool unreachableMemory = false;
- for (size_t i = 0; i < args.size(); i++) {
- if (args[i] == String16("-m")) {
- dumpMem = true;
- } else if (args[i] == String16("--unreachable")) {
- unreachableMemory = true;
- }
- }
- if (dumpMem) {
- result.append("\nDumping memory:\n");
- std::string s = dumpMemoryAddresses(100 /* limit */);
- result.append(s.c_str(), s.size());
- }
- if (unreachableMemory) {
- result.append("\nDumping unreachable memory:\n");
- // TODO - should limit be an argument parameter?
- // TODO: enable GetUnreachableMemoryString if it's part of stable API
- //std::string s = GetUnreachableMemoryString(true /* contents */, 10000 /* limit */);
- //result.append(s.c_str(), s.size());
- }
-
- write(fd, result.string(), result.size());
- return NO_ERROR;
-}
-
-} // anonymous namespace
-
-//static
-sp<MediaPlayer2> MediaPlayer2::Create(int32_t sessionId, jobject context) {
- sp<MediaPlayer2> player = new MediaPlayer2(sessionId, context);
-
- if (!player->init()) {
- return NULL;
- }
-
- ALOGV("Create new player(%p)", player.get());
-
- addPlayer(player);
- return player;
-}
-
-// static
-status_t MediaPlayer2::DumpAll(int fd, const Vector<String16>& args) {
- return dumpPlayers(fd, args);
-}
-
-MediaPlayer2::MediaPlayer2(int32_t sessionId, jobject context) {
- ALOGV("constructor");
- mSrcId = 0;
- mLockThreadId = 0;
- mListener = NULL;
- mStreamType = AUDIO_STREAM_MUSIC;
- mAudioAttributes = NULL;
- mContext = new JObjectHolder(context);
- mCurrentPosition = -1;
- mCurrentSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- mSeekPosition = -1;
- mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- mCurrentState = MEDIA_PLAYER2_IDLE;
- mTransitionToNext = false;
- mLoop = false;
- mVolume = 1.0;
- mVideoWidth = mVideoHeight = 0;
- mSendLevel = 0;
-
- mPid = AIBinder_getCallingPid();
- mUid = AIBinder_getCallingUid();
-
- mAudioOutput = new MediaPlayer2AudioOutput(sessionId, mUid, mPid, NULL /*attributes*/);
-}
-
-MediaPlayer2::~MediaPlayer2() {
- ALOGV("destructor");
- disconnect();
- removePlayer(this);
-}
-
-bool MediaPlayer2::init() {
- // TODO: after merge with NuPlayer2Driver, MediaPlayer2 will have its own
- // looper for notification.
- return true;
-}
-
-void MediaPlayer2::disconnect() {
- ALOGV("disconnect");
- sp<MediaPlayer2Interface> p;
- {
- Mutex::Autolock _l(mLock);
- p = mPlayer;
- mPlayer.clear();
- }
-
- if (p != 0) {
- p->setListener(NULL);
- p->reset();
- }
-
- {
- Mutex::Autolock _l(mLock);
- disconnectNativeWindow_l();
- }
-}
-
-void MediaPlayer2::clear_l() {
- mCurrentPosition = -1;
- mCurrentSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- mSeekPosition = -1;
- mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- mVideoWidth = mVideoHeight = 0;
-}
-
-status_t MediaPlayer2::setListener(const sp<MediaPlayer2Listener>& listener) {
- ALOGV("setListener");
- Mutex::Autolock _l(mLock);
- mListener = listener;
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::getSrcId(int64_t *srcId) {
- if (srcId == NULL) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock _l(mLock);
- *srcId = mSrcId;
- return OK;
-}
-
-status_t MediaPlayer2::setDataSource(const sp<DataSourceDesc> &dsd) {
- if (dsd == NULL) {
- return BAD_VALUE;
- }
- // Microsecond is used in NuPlayer2.
- if (dsd->mStartPositionMs > DataSourceDesc::kMaxTimeMs) {
- dsd->mStartPositionMs = DataSourceDesc::kMaxTimeMs;
- ALOGW("setDataSource, start poistion clamped to %lld ms", (long long)dsd->mStartPositionMs);
- }
- if (dsd->mEndPositionMs > DataSourceDesc::kMaxTimeMs) {
- dsd->mEndPositionMs = DataSourceDesc::kMaxTimeMs;
- ALOGW("setDataSource, end poistion clamped to %lld ms", (long long)dsd->mStartPositionMs);
- }
- ALOGV("setDataSource type(%d), srcId(%lld)", dsd->mType, (long long)dsd->mId);
-
- sp<MediaPlayer2Interface> oldPlayer;
-
- {
- Mutex::Autolock _l(mLock);
- if (!((mCurrentState & MEDIA_PLAYER2_IDLE)
- || mCurrentState == MEDIA_PLAYER2_STATE_ERROR)) {
- ALOGE("setDataSource called in wrong state %d", mCurrentState);
- return INVALID_OPERATION;
- }
-
- sp<MediaPlayer2Interface> player = new NuPlayer2Driver(mPid, mUid, mContext);
- status_t err = player->initCheck();
- if (err != NO_ERROR) {
- ALOGE("Failed to create player object, initCheck failed(%d)", err);
- return err;
- }
-
- clear_l();
-
- player->setListener(new proxyListener(this));
- player->setAudioSink(mAudioOutput);
-
- err = player->setDataSource(dsd);
- if (err != OK) {
- ALOGE("setDataSource error: %d", err);
- return err;
- }
-
- sp<MediaPlayer2Interface> oldPlayer = mPlayer;
- mPlayer = player;
- mSrcId = dsd->mId;
- mCurrentState = MEDIA_PLAYER2_INITIALIZED;
- }
-
- if (oldPlayer != NULL) {
- oldPlayer->setListener(NULL);
- oldPlayer->reset();
- }
-
- return OK;
-}
-
-status_t MediaPlayer2::prepareNextDataSource(const sp<DataSourceDesc> &dsd) {
- if (dsd == NULL) {
- return BAD_VALUE;
- }
- ALOGV("prepareNextDataSource type(%d), srcId(%lld)", dsd->mType, (long long)dsd->mId);
-
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- ALOGE("prepareNextDataSource failed: state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
- }
- return mPlayer->prepareNextDataSource(dsd);
-}
-
-status_t MediaPlayer2::playNextDataSource(int64_t srcId) {
- ALOGV("playNextDataSource srcId(%lld)", (long long)srcId);
-
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- ALOGE("playNextDataSource failed: state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
- }
- mSrcId = srcId;
- mTransitionToNext = true;
- return mPlayer->playNextDataSource(srcId);
-}
-
-status_t MediaPlayer2::invoke(const PlayerMessage &request, PlayerMessage *reply) {
- Mutex::Autolock _l(mLock);
- const bool hasBeenInitialized =
- (mCurrentState != MEDIA_PLAYER2_STATE_ERROR) &&
- ((mCurrentState & MEDIA_PLAYER2_IDLE) != MEDIA_PLAYER2_IDLE);
- if ((mPlayer == NULL) || !hasBeenInitialized) {
- ALOGE("invoke() failed: wrong state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
- }
- return mPlayer->invoke(request, reply);
-}
-
-void MediaPlayer2::disconnectNativeWindow_l() {
- if (mConnectedWindow != NULL && mConnectedWindow->getANativeWindow() != NULL) {
- status_t err = native_window_api_disconnect(
- mConnectedWindow->getANativeWindow(), NATIVE_WINDOW_API_MEDIA);
-
- if (err != OK) {
- ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
- strerror(-err), err);
- }
- }
- mConnectedWindow.clear();
-}
-
-status_t MediaPlayer2::setVideoSurfaceTexture(const sp<ANativeWindowWrapper>& nww) {
- ANativeWindow *anw = (nww == NULL ? NULL : nww->getANativeWindow());
- ALOGV("setVideoSurfaceTexture(%p)", anw);
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return NO_INIT;
- }
-
- if (anw != NULL) {
- if (mConnectedWindow != NULL
- && mConnectedWindow->getANativeWindow() == anw) {
- return OK;
- }
- status_t err = native_window_api_connect(anw, NATIVE_WINDOW_API_MEDIA);
-
- if (err != OK) {
- ALOGE("setVideoSurfaceTexture failed: %d", err);
- // Note that we must do the reset before disconnecting from the ANW.
- // Otherwise queue/dequeue calls could be made on the disconnected
- // ANW, which may result in errors.
- mPlayer->reset();
- disconnectNativeWindow_l();
- return err;
- }
- }
-
- // Note that we must set the player's new GraphicBufferProducer before
- // disconnecting the old one. Otherwise queue/dequeue calls could be made
- // on the disconnected ANW, which may result in errors.
- status_t err = mPlayer->setVideoSurfaceTexture(nww);
-
- disconnectNativeWindow_l();
-
- if (err == OK) {
- mConnectedWindow = nww;
- mLock.unlock();
- } else if (anw != NULL) {
- mLock.unlock();
- status_t err = native_window_api_disconnect(anw, NATIVE_WINDOW_API_MEDIA);
-
- if (err != OK) {
- ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
- strerror(-err), err);
- }
- }
-
- return err;
-}
-
-status_t MediaPlayer2::getBufferingSettings(BufferingSettings* buffering /* nonnull */) {
- ALOGV("getBufferingSettings");
-
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return NO_INIT;
- }
-
- status_t ret = mPlayer->getBufferingSettings(buffering);
- if (ret == NO_ERROR) {
- ALOGV("getBufferingSettings{%s}", buffering->toString().string());
- } else {
- ALOGE("getBufferingSettings returned %d", ret);
- }
- return ret;
-}
-
-status_t MediaPlayer2::setBufferingSettings(const BufferingSettings& buffering) {
- ALOGV("setBufferingSettings{%s}", buffering.toString().string());
-
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return NO_INIT;
- }
- return mPlayer->setBufferingSettings(buffering);
-}
-
-status_t MediaPlayer2::setAudioAttributes_l(const jobject attributes) {
- if (mAudioOutput != NULL) {
- mAudioOutput->setAudioAttributes(attributes);
- }
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::prepareAsync() {
- ALOGV("prepareAsync");
- Mutex::Autolock _l(mLock);
- if ((mPlayer != 0) && (mCurrentState & MEDIA_PLAYER2_INITIALIZED)) {
- if (mAudioAttributes != NULL) {
- status_t err = setAudioAttributes_l(mAudioAttributes->getJObject());
- if (err != OK) {
- return err;
- }
- }
- mCurrentState = MEDIA_PLAYER2_PREPARING;
- return mPlayer->prepareAsync();
- }
- ALOGE("prepareAsync called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
-}
-
-status_t MediaPlayer2::start() {
- ALOGV("start");
-
- status_t ret = NO_ERROR;
- Mutex::Autolock _l(mLock);
-
- mLockThreadId = getThreadId();
-
- if (mCurrentState & MEDIA_PLAYER2_STARTED) {
- ret = NO_ERROR;
- } else if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER2_PREPARED |
- MEDIA_PLAYER2_PLAYBACK_COMPLETE | MEDIA_PLAYER2_PAUSED ) ) ) {
- mPlayer->setLooping(mLoop);
-
- if (mAudioOutput != 0) {
- mAudioOutput->setVolume(mVolume);
- }
-
- if (mAudioOutput != 0) {
- mAudioOutput->setAuxEffectSendLevel(mSendLevel);
- }
- mCurrentState = MEDIA_PLAYER2_STARTED;
- ret = mPlayer->start();
- if (ret != NO_ERROR) {
- mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
- } else {
- if (mCurrentState == MEDIA_PLAYER2_PLAYBACK_COMPLETE) {
- ALOGV("playback completed immediately following start()");
- }
- }
- } else {
- ALOGE("start called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
- ret = INVALID_OPERATION;
- }
-
- mLockThreadId = 0;
-
- return ret;
-}
-
-status_t MediaPlayer2::pause() {
- ALOGV("pause");
- Mutex::Autolock _l(mLock);
- if (mCurrentState & (MEDIA_PLAYER2_PAUSED|MEDIA_PLAYER2_PLAYBACK_COMPLETE))
- return NO_ERROR;
- if ((mPlayer != 0) && (mCurrentState & (MEDIA_PLAYER2_STARTED | MEDIA_PLAYER2_PREPARED))) {
- status_t ret = mPlayer->pause();
- if (ret != NO_ERROR) {
- mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
- } else {
- mCurrentState = MEDIA_PLAYER2_PAUSED;
- mTransitionToNext = false;
- }
- return ret;
- }
- ALOGE("pause called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
-}
-
-bool MediaPlayer2::isPlaying() {
- Mutex::Autolock _l(mLock);
- if (mPlayer != 0) {
- bool temp = mPlayer->isPlaying();
- ALOGV("isPlaying: %d", temp);
- if ((mCurrentState & MEDIA_PLAYER2_STARTED) && ! temp) {
- ALOGE("internal/external state mismatch corrected");
- mCurrentState = MEDIA_PLAYER2_PAUSED;
- } else if ((mCurrentState & MEDIA_PLAYER2_PAUSED) && temp) {
- ALOGE("internal/external state mismatch corrected");
- mCurrentState = MEDIA_PLAYER2_STARTED;
- }
- return temp;
- }
- ALOGV("isPlaying: no active player");
- return false;
-}
-
-mediaplayer2_states MediaPlayer2::getState() {
- Mutex::Autolock _l(mLock);
- if (mCurrentState & MEDIA_PLAYER2_STATE_ERROR) {
- return MEDIAPLAYER2_STATE_ERROR;
- }
- if (mPlayer == 0
- || (mCurrentState &
- (MEDIA_PLAYER2_IDLE | MEDIA_PLAYER2_INITIALIZED | MEDIA_PLAYER2_PREPARING))) {
- return MEDIAPLAYER2_STATE_IDLE;
- }
- if (mCurrentState & MEDIA_PLAYER2_STARTED) {
- return MEDIAPLAYER2_STATE_PLAYING;
- }
- if (mCurrentState & (MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE)) {
- return MEDIAPLAYER2_STATE_PAUSED;
- }
- // now only mCurrentState & MEDIA_PLAYER2_PREPARED is true
- return MEDIAPLAYER2_STATE_PREPARED;
-}
-
-status_t MediaPlayer2::setPlaybackSettings(const AudioPlaybackRate& rate) {
- ALOGV("setPlaybackSettings: %f %f %d %d",
- rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
- // Negative speed and pitch does not make sense. Further validation will
- // be done by the respective mediaplayers.
- if (rate.mSpeed <= 0.f || rate.mPitch < 0.f) {
- return BAD_VALUE;
- }
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
-
- status_t err = mPlayer->setPlaybackSettings(rate);
- return err;
-}
-
-status_t MediaPlayer2::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
- status_t ret = mPlayer->getPlaybackSettings(rate);
- if (ret == NO_ERROR) {
- ALOGV("getPlaybackSettings(%f, %f, %d, %d)",
- rate->mSpeed, rate->mPitch, rate->mFallbackMode, rate->mStretchMode);
- } else {
- ALOGV("getPlaybackSettings returned %d", ret);
- }
- return ret;
-}
-
-status_t MediaPlayer2::setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) {
- ALOGV("setSyncSettings: %u %u %f %f",
- sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) return INVALID_OPERATION;
- return mPlayer->setSyncSettings(sync, videoFpsHint);
-}
-
-status_t MediaPlayer2::getSyncSettings(
- AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
- status_t ret = mPlayer->getSyncSettings(sync, videoFps);
- if (ret == NO_ERROR) {
- ALOGV("getSyncSettings(%u, %u, %f, %f)",
- sync->mSource, sync->mAudioAdjustMode, sync->mTolerance, *videoFps);
- } else {
- ALOGV("getSyncSettings returned %d", ret);
- }
- return ret;
-
-}
-
-status_t MediaPlayer2::getVideoWidth(int *w) {
- ALOGV("getVideoWidth");
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
- *w = mVideoWidth;
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::getVideoHeight(int *h) {
- ALOGV("getVideoHeight");
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
- *h = mVideoHeight;
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::getCurrentPosition(int64_t *msec) {
- ALOGV("getCurrentPosition");
- Mutex::Autolock _l(mLock);
- if (mPlayer == 0) {
- return INVALID_OPERATION;
- }
- if (mCurrentPosition >= 0) {
- ALOGV("Using cached seek position: %lld", (long long)mCurrentPosition);
- *msec = mCurrentPosition;
- return NO_ERROR;
- }
- status_t ret = mPlayer->getCurrentPosition(msec);
- if (ret == NO_ERROR) {
- ALOGV("getCurrentPosition = %lld", (long long)*msec);
- } else {
- ALOGE("getCurrentPosition returned %d", ret);
- }
- return ret;
-}
-
-status_t MediaPlayer2::getDuration(int64_t srcId, int64_t *msec) {
- Mutex::Autolock _l(mLock);
- // TODO: cache duration for currentSrcId and nextSrcId, and return correct
- // value for nextSrcId.
- if (srcId != mSrcId) {
- *msec = -1;
- return OK;
- }
-
- ALOGV("getDuration_l");
- bool isValidState = (mCurrentState & (MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
- MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE));
- if (mPlayer == 0 || !isValidState) {
- ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
- mPlayer.get(), mCurrentState);
- return INVALID_OPERATION;
- }
- int64_t durationMs;
- status_t ret = mPlayer->getDuration(&durationMs);
-
- if (ret == NO_ERROR) {
- ALOGV("getDuration = %lld", (long long)durationMs);
- } else {
- ALOGE("getDuration returned %d", ret);
- // Do not enter error state just because no duration was available.
- durationMs = -1;
- }
-
- if (msec) {
- *msec = durationMs;
- }
- return OK;
-}
-
-status_t MediaPlayer2::seekTo_l(int64_t msec, MediaPlayer2SeekMode mode) {
- ALOGV("seekTo (%lld, %d)", (long long)msec, mode);
- if ((mPlayer == 0) || !(mCurrentState & (MEDIA_PLAYER2_STARTED | MEDIA_PLAYER2_PREPARED |
- MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE))) {
- ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u",
- mPlayer.get(), mCurrentState);
- return INVALID_OPERATION;
- }
- if (msec < 0) {
- ALOGW("Attempt to seek to invalid position: %lld", (long long)msec);
- msec = 0;
- }
-
- int64_t durationMs;
- status_t err = mPlayer->getDuration(&durationMs);
-
- if (err != OK) {
- ALOGW("Stream has no duration and is therefore not seekable.");
- return err;
- }
-
- if (msec > durationMs) {
- ALOGW("Attempt to seek to past end of file: request = %lld, durationMs = %lld",
- (long long)msec, (long long)durationMs);
-
- msec = durationMs;
- }
-
- // cache duration
- mCurrentPosition = msec;
- mCurrentSeekMode = mode;
- if (mSeekPosition < 0) {
- mSeekPosition = msec;
- mSeekMode = mode;
- return mPlayer->seekTo(msec, mode);
- }
- ALOGV("Seek in progress - queue up seekTo[%lld, %d]", (long long)msec, mode);
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::seekTo(int64_t msec, MediaPlayer2SeekMode mode) {
- mLockThreadId = getThreadId();
- Mutex::Autolock _l(mLock);
- status_t result = seekTo_l(msec, mode);
- mLockThreadId = 0;
-
- return result;
-}
-
-status_t MediaPlayer2::notifyAt(int64_t mediaTimeUs) {
- Mutex::Autolock _l(mLock);
- if (mPlayer != 0) {
- return INVALID_OPERATION;
- }
-
- return mPlayer->notifyAt(mediaTimeUs);
-}
-
-status_t MediaPlayer2::reset_l() {
- mLoop = false;
- if (mCurrentState == MEDIA_PLAYER2_IDLE) {
- return NO_ERROR;
- }
- if (mPlayer != 0) {
- status_t ret = mPlayer->reset();
- if (ret != NO_ERROR) {
- ALOGE("reset() failed with return code (%d)", ret);
- mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
- } else {
- mPlayer->setListener(NULL);
- mCurrentState = MEDIA_PLAYER2_IDLE;
- mTransitionToNext = false;
- }
- // setDataSource has to be called again to create a
- // new mediaplayer.
- mPlayer = 0;
- return ret;
- }
- clear_l();
- return NO_ERROR;
-}
-
-status_t MediaPlayer2::reset() {
- ALOGV("reset");
- mLockThreadId = getThreadId();
- Mutex::Autolock _l(mLock);
- status_t result = reset_l();
- mLockThreadId = 0;
-
- return result;
-}
-
-status_t MediaPlayer2::setAudioStreamType(audio_stream_type_t type) {
- ALOGV("MediaPlayer2::setAudioStreamType");
- Mutex::Autolock _l(mLock);
- if (mStreamType == type) return NO_ERROR;
- if (mCurrentState & ( MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
- MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE ) ) {
- // Can't change the stream type after prepare
- ALOGE("setAudioStream called in state %d", mCurrentState);
- return INVALID_OPERATION;
- }
- // cache
- mStreamType = type;
- return OK;
-}
-
-status_t MediaPlayer2::getAudioStreamType(audio_stream_type_t *type) {
- ALOGV("getAudioStreamType");
- Mutex::Autolock _l(mLock);
- *type = mStreamType;
- return OK;
-}
-
-status_t MediaPlayer2::setLooping(int loop) {
- ALOGV("MediaPlayer2::setLooping");
- Mutex::Autolock _l(mLock);
- mLoop = (loop != 0);
- if (mPlayer != 0) {
- return mPlayer->setLooping(loop);
- }
- return OK;
-}
-
-bool MediaPlayer2::isLooping() {
- ALOGV("isLooping");
- Mutex::Autolock _l(mLock);
- if (mPlayer != 0) {
- return mLoop;
- }
- ALOGV("isLooping: no active player");
- return false;
-}
-
-status_t MediaPlayer2::setVolume(float volume) {
- ALOGV("MediaPlayer2::setVolume(%f)", volume);
- Mutex::Autolock _l(mLock);
- mVolume = volume;
- if (mAudioOutput != 0) {
- mAudioOutput->setVolume(volume);
- }
- return OK;
-}
-
-status_t MediaPlayer2::setAudioSessionId(int32_t sessionId) {
- ALOGV("MediaPlayer2::setAudioSessionId(%d)", sessionId);
- Mutex::Autolock _l(mLock);
- if (!(mCurrentState & MEDIA_PLAYER2_IDLE)) {
- ALOGE("setAudioSessionId called in state %d", mCurrentState);
- return INVALID_OPERATION;
- }
- if (sessionId < 0) {
- return BAD_VALUE;
- }
- if (mAudioOutput != NULL && sessionId != mAudioOutput->getSessionId()) {
- mAudioOutput->setSessionId(sessionId);
- }
- return NO_ERROR;
-}
-
-int32_t MediaPlayer2::getAudioSessionId() {
- Mutex::Autolock _l(mLock);
- if (mAudioOutput != NULL) {
- return mAudioOutput->getSessionId();
- }
- return 0;
-}
-
-status_t MediaPlayer2::setAuxEffectSendLevel(float level) {
- ALOGV("MediaPlayer2::setAuxEffectSendLevel(%f)", level);
- Mutex::Autolock _l(mLock);
- mSendLevel = level;
- if (mAudioOutput != 0) {
- return mAudioOutput->setAuxEffectSendLevel(level);
- }
- return OK;
-}
-
-status_t MediaPlayer2::attachAuxEffect(int effectId) {
- ALOGV("MediaPlayer2::attachAuxEffect(%d)", effectId);
- Mutex::Autolock _l(mLock);
- if (mAudioOutput == 0 ||
- (mCurrentState & MEDIA_PLAYER2_IDLE) ||
- (mCurrentState == MEDIA_PLAYER2_STATE_ERROR )) {
- ALOGE("attachAuxEffect called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
- return INVALID_OPERATION;
- }
-
- return mAudioOutput->attachAuxEffect(effectId);
-}
-
-// always call with lock held
-status_t MediaPlayer2::checkState_l() {
- if (mCurrentState & ( MEDIA_PLAYER2_PREPARED | MEDIA_PLAYER2_STARTED |
- MEDIA_PLAYER2_PAUSED | MEDIA_PLAYER2_PLAYBACK_COMPLETE) ) {
- // Can't change the audio attributes after prepare
- ALOGE("trying to set audio attributes called in state %d", mCurrentState);
- return INVALID_OPERATION;
- }
- return OK;
-}
-
-status_t MediaPlayer2::setAudioAttributes(const jobject attributes) {
- ALOGV("MediaPlayer2::setAudioAttributes");
- status_t status = INVALID_OPERATION;
- Mutex::Autolock _l(mLock);
- if (checkState_l() != OK) {
- return status;
- }
- mAudioAttributes = new JObjectHolder(attributes);
- status = setAudioAttributes_l(attributes);
- return status;
-}
-
-jobject MediaPlayer2::getAudioAttributes() {
- ALOGV("MediaPlayer2::getAudioAttributes)");
- Mutex::Autolock _l(mLock);
- return mAudioAttributes != NULL ? mAudioAttributes->getJObject() : NULL;
-}
-
-status_t MediaPlayer2::getParameter(int key, Parcel *reply) {
- ALOGV("MediaPlayer2::getParameter(%d)", key);
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- ALOGV("getParameter: no active player");
- return INVALID_OPERATION;
- }
-
- status_t status = mPlayer->getParameter(key, reply);
- if (status != OK) {
- ALOGD("getParameter returns %d", status);
- }
- return status;
-}
-
-// for mediametrics
-status_t MediaPlayer2::getMetrics(char **buffer, size_t *length) {
- ALOGD("MediaPlayer2::getMetrics()");
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- ALOGV("getMetrics: no active player");
- return INVALID_OPERATION;
- }
-
- status_t status = mPlayer->getMetrics(buffer, length);
- if (status != OK) {
- ALOGD("getMetrics returns %d", status);
- }
- return status;
-}
-
-void MediaPlayer2::notify(int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *obj) {
- ALOGV("message received srcId=%lld, msg=%d, ext1=%d, ext2=%d",
- (long long)srcId, msg, ext1, ext2);
-
- bool send = true;
- bool locked = false;
-
- // TODO: In the future, we might be on the same thread if the app is
- // running in the same process as the media server. In that case,
- // this will deadlock.
- //
- // The threadId hack below works around this for the care of prepare,
- // seekTo, start, and reset within the same process.
- // FIXME: Remember, this is a hack, it's not even a hack that is applied
- // consistently for all use-cases, this needs to be revisited.
- if (mLockThreadId != getThreadId()) {
- mLock.lock();
- locked = true;
- }
-
- // Allows calls from JNI in idle state to notify errors
- if (!(msg == MEDIA2_ERROR && mCurrentState == MEDIA_PLAYER2_IDLE) && mPlayer == 0) {
- ALOGV("notify(%lld, %d, %d, %d) callback on disconnected mediaplayer",
- (long long)srcId, msg, ext1, ext2);
- if (locked) mLock.unlock(); // release the lock when done.
- return;
- }
-
- switch (msg) {
- case MEDIA2_NOP: // interface test message
- break;
- case MEDIA2_PREPARED:
- ALOGV("MediaPlayer2::notify() prepared, srcId=%lld", (long long)srcId);
- if (srcId == mSrcId) {
- mCurrentState = MEDIA_PLAYER2_PREPARED;
- }
- break;
- case MEDIA2_DRM_INFO:
- ALOGV("MediaPlayer2::notify() MEDIA2_DRM_INFO(%lld, %d, %d, %d, %p)",
- (long long)srcId, msg, ext1, ext2, obj);
- break;
- case MEDIA2_PLAYBACK_COMPLETE:
- ALOGV("playback complete");
- if (mCurrentState == MEDIA_PLAYER2_IDLE) {
- ALOGE("playback complete in idle state");
- }
- if (!mLoop && srcId == mSrcId) {
- mCurrentState = MEDIA_PLAYER2_PLAYBACK_COMPLETE;
- }
- break;
- case MEDIA2_ERROR:
- // Always log errors.
- // ext1: Media framework error code.
- // ext2: Implementation dependant error code.
- ALOGE("error (%d, %d)", ext1, ext2);
- mCurrentState = MEDIA_PLAYER2_STATE_ERROR;
- break;
- case MEDIA2_INFO:
- // ext1: Media framework error code.
- // ext2: Implementation dependant error code.
- if (ext1 != MEDIA2_INFO_VIDEO_TRACK_LAGGING) {
- ALOGW("info/warning (%d, %d)", ext1, ext2);
-
- if (ext1 == MEDIA2_INFO_DATA_SOURCE_START && srcId == mSrcId && mTransitionToNext) {
- mCurrentState = MEDIA_PLAYER2_STARTED;
- mTransitionToNext = false;
- }
- }
- break;
- case MEDIA2_SEEK_COMPLETE:
- ALOGV("Received seek complete");
- if (mSeekPosition != mCurrentPosition || (mSeekMode != mCurrentSeekMode)) {
- ALOGV("Executing queued seekTo(%lld, %d)",
- (long long)mCurrentPosition, mCurrentSeekMode);
- mSeekPosition = -1;
- mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- seekTo_l(mCurrentPosition, mCurrentSeekMode);
- }
- else {
- ALOGV("All seeks complete - return to regularly scheduled program");
- mCurrentPosition = mSeekPosition = -1;
- mCurrentSeekMode = mSeekMode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC;
- }
- break;
- case MEDIA2_BUFFERING_UPDATE:
- ALOGV("buffering %d", ext1);
- break;
- case MEDIA2_SET_VIDEO_SIZE:
- ALOGV("New video size %d x %d", ext1, ext2);
- mVideoWidth = ext1;
- mVideoHeight = ext2;
- break;
- case MEDIA2_NOTIFY_TIME:
- ALOGV("Received notify time message");
- break;
- case MEDIA2_TIMED_TEXT:
- ALOGV("Received timed text message");
- break;
- case MEDIA2_SUBTITLE_DATA:
- ALOGV("Received subtitle data message");
- break;
- case MEDIA2_META_DATA:
- ALOGV("Received timed metadata message");
- break;
- default:
- ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
- break;
- }
-
- sp<MediaPlayer2Listener> listener = mListener;
- if (locked) mLock.unlock();
-
- // this prevents re-entrant calls into client code
- if ((listener != 0) && send) {
- Mutex::Autolock _l(mNotifyLock);
- ALOGV("callback application");
- listener->notify(srcId, msg, ext1, ext2, obj);
- ALOGV("back from callback");
- }
-}
-
-// Modular DRM
-status_t MediaPlayer2::prepareDrm(
- int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId) {
- // TODO change to ALOGV
- ALOGD("prepareDrm: uuid: %p drmSessionId: %p(%zu)", uuid,
- drmSessionId.array(), drmSessionId.size());
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- return NO_INIT;
- }
-
- // Only allowed it in player's preparing/prepared state.
- // We get here only if MEDIA_DRM_INFO has already arrived (e.g., prepare is half-way through or
- // completed) so the state change to "prepared" might not have happened yet (e.g., buffering).
- // Still, we can allow prepareDrm for the use case of being called in OnDrmInfoListener.
- if (!(mCurrentState & (MEDIA_PLAYER2_PREPARING | MEDIA_PLAYER2_PREPARED))) {
- ALOGW("prepareDrm(%lld) called in non-prepare state(%d)", (long long)srcId, mCurrentState);
- if (srcId == mSrcId) {
- return INVALID_OPERATION;
- }
- }
-
- if (drmSessionId.isEmpty()) {
- ALOGE("prepareDrm: Unexpected. Can't proceed with crypto. Empty drmSessionId.");
- return INVALID_OPERATION;
- }
-
- // Passing down to mediaserver mainly for creating the crypto
- status_t status = mPlayer->prepareDrm(srcId, uuid, drmSessionId);
- ALOGE_IF(status != OK, "prepareDrm: Failed at mediaserver with ret: %d", status);
-
- // TODO change to ALOGV
- ALOGD("prepareDrm: mediaserver::prepareDrm ret=%d", status);
-
- return status;
-}
-
-status_t MediaPlayer2::releaseDrm(int64_t srcId) {
- Mutex::Autolock _l(mLock);
- if (mPlayer == NULL) {
- return NO_INIT;
- }
-
- // Not allowing releaseDrm in an active/resumable state
- if (mCurrentState & (MEDIA_PLAYER2_STARTED |
- MEDIA_PLAYER2_PAUSED |
- MEDIA_PLAYER2_PLAYBACK_COMPLETE |
- MEDIA_PLAYER2_STATE_ERROR)) {
- ALOGE("releaseDrm Unexpected state %d. Can only be called in stopped/idle.", mCurrentState);
- return INVALID_OPERATION;
- }
-
- status_t status = mPlayer->releaseDrm(srcId);
- // TODO change to ALOGV
- ALOGD("releaseDrm: mediaserver::releaseDrm ret: %d", status);
- if (status != OK) {
- ALOGE("releaseDrm: Failed at mediaserver with ret: %d", status);
- // Overriding to OK so the client proceed with its own cleanup
- // Client can't do more cleanup. mediaserver release its crypto at end of session anyway.
- status = OK;
- }
-
- return status;
-}
-
-status_t MediaPlayer2::setPreferredDevice(jobject device) {
- Mutex::Autolock _l(mLock);
- if (mAudioOutput == NULL) {
- ALOGV("setPreferredDevice: audio sink not init");
- return NO_INIT;
- }
- return mAudioOutput->setPreferredDevice(device);
-}
-
-jobject MediaPlayer2::getRoutedDevice() {
- Mutex::Autolock _l(mLock);
- if (mAudioOutput == NULL) {
- ALOGV("getRoutedDevice: audio sink not init");
- return nullptr;
- }
- return mAudioOutput->getRoutedDevice();
-}
-
-status_t MediaPlayer2::addAudioDeviceCallback(jobject routingDelegate) {
- Mutex::Autolock _l(mLock);
- if (mAudioOutput == NULL) {
- ALOGV("addAudioDeviceCallback: player not init");
- return NO_INIT;
- }
- return mAudioOutput->addAudioDeviceCallback(routingDelegate);
-}
-
-status_t MediaPlayer2::removeAudioDeviceCallback(jobject listener) {
- Mutex::Autolock _l(mLock);
- if (mAudioOutput == NULL) {
- ALOGV("addAudioDeviceCallback: player not init");
- return NO_INIT;
- }
- return mAudioOutput->removeAudioDeviceCallback(listener);
-}
-
-status_t MediaPlayer2::dump(int fd, const Vector<String16>& args) {
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
- result.append(" MediaPlayer2\n");
- snprintf(buffer, 255, " pid(%d), looping(%s)\n", mPid, mLoop?"true": "false");
- result.append(buffer);
-
- sp<MediaPlayer2Interface> player;
- sp<MediaPlayer2AudioOutput> audioOutput;
- bool locked = false;
- for (int i = 0; i < kDumpLockRetries; ++i) {
- if (mLock.tryLock() == NO_ERROR) {
- locked = true;
- break;
- }
- usleep(kDumpLockSleepUs);
- }
-
- if (locked) {
- player = mPlayer;
- audioOutput = mAudioOutput;
- mLock.unlock();
- } else {
- result.append(" lock is taken, no dump from player and audio output\n");
- }
- write(fd, result.string(), result.size());
-
- if (player != NULL) {
- player->dump(fd, args);
- }
- if (audioOutput != 0) {
- audioOutput->dump(fd, args);
- }
- write(fd, "\n", 1);
- return NO_ERROR;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/Android.bp b/media/libmediaplayer2/nuplayer2/Android.bp
deleted file mode 100644
index 0f69b2e..0000000
--- a/media/libmediaplayer2/nuplayer2/Android.bp
+++ /dev/null
@@ -1,72 +0,0 @@
-cc_library_static {
-
- srcs: [
- "JMediaPlayer2Utils.cpp",
- "JWakeLock.cpp",
- "GenericSource2.cpp",
- "HTTPLiveSource2.cpp",
- "NuPlayer2.cpp",
- "NuPlayer2CCDecoder.cpp",
- "NuPlayer2Decoder.cpp",
- "NuPlayer2DecoderBase.cpp",
- "NuPlayer2DecoderPassThrough.cpp",
- "NuPlayer2Driver.cpp",
- "NuPlayer2Drm.cpp",
- "NuPlayer2Renderer.cpp",
- "RTSPSource2.cpp",
- ],
-
- header_libs: [
- "libbase_headers",
- "libmediaplayer2_headers",
- "media_plugin_headers",
- ],
-
- include_dirs: [
- "frameworks/av/media/libstagefright",
- "frameworks/av/media/libstagefright/httplive",
- "frameworks/av/media/libstagefright/include",
- "frameworks/av/media/libstagefright/mpeg2ts",
- "frameworks/av/media/libstagefright/rtsp",
- "frameworks/av/media/libstagefright/timedtext",
- "frameworks/av/media/ndk",
- "frameworks/base/core/jni",
- ],
-
- cflags: [
- "-Werror",
- "-Wall",
- ],
-
- product_variables: {
- debuggable: {
- cflags: [
- "-DENABLE_STAGEFRIGHT_EXPERIMENTS",
- ],
- }
- },
-
- shared_libs: [
- "libbinder",
- "libui",
- "libgui",
- "libmedia",
- "libmediametrics",
- "libmediandk",
- "libmediandk_utils",
- "libpowermanager",
- ],
-
- static_libs: [
- "libmedia_helper",
- "libmediaplayer2-protos",
- "libmedia2_jni_core",
- ],
-
- name: "libstagefright_nuplayer2",
-
- sanitize: {
- cfi: true,
- },
-
-}
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp b/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
deleted file mode 100644
index 9552580..0000000
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.cpp
+++ /dev/null
@@ -1,1547 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "GenericSource2"
-
-#include "GenericSource2.h"
-#include "NuPlayer2Drm.h"
-
-#include "AnotherPacketSource.h"
-#include <cutils/properties.h>
-#include <media/DataSource.h>
-#include <media/MediaBufferHolder.h>
-#include <media/NdkWrapper.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaClock.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/NdkUtils.h>
-#include <media/stagefright/Utils.h>
-
-namespace android {
-
-static const int kInitialMarkMs = 5000; // 5secs
-
-//static const int kPausePlaybackMarkMs = 2000; // 2secs
-static const int kResumePlaybackMarkMs = 15000; // 15secs
-
-NuPlayer2::GenericSource2::GenericSource2(
- const sp<AMessage> ¬ify,
- uid_t uid,
- const sp<MediaClock> &mediaClock)
- : Source(notify),
- mAudioTimeUs(0),
- mAudioLastDequeueTimeUs(0),
- mVideoTimeUs(0),
- mVideoLastDequeueTimeUs(0),
- mPrevBufferPercentage(-1),
- mPollBufferingGeneration(0),
- mSentPauseOnBuffering(false),
- mAudioDataGeneration(0),
- mVideoDataGeneration(0),
- mFetchSubtitleDataGeneration(0),
- mFetchTimedTextDataGeneration(0),
- mDurationUs(-1ll),
- mAudioIsVorbis(false),
- mIsSecure(false),
- mIsStreaming(false),
- mUID(uid),
- mMediaClock(mediaClock),
- mFd(-1),
- mBitrate(-1ll),
- mPendingReadBufferTypes(0) {
- ALOGV("GenericSource2");
- CHECK(mediaClock != NULL);
-
- mBufferingSettings.mInitialMarkMs = kInitialMarkMs;
- mBufferingSettings.mResumePlaybackMarkMs = kResumePlaybackMarkMs;
- resetDataSource();
-}
-
-void NuPlayer2::GenericSource2::resetDataSource() {
- ALOGV("resetDataSource");
-
- mDisconnected = false;
- mUri.clear();
- mUriHeaders.clear();
- if (mFd >= 0) {
- close(mFd);
- mFd = -1;
- }
- mOffset = 0;
- mLength = 0;
- mStarted = false;
- mPreparing = false;
-
- mIsDrmProtected = false;
- mIsDrmReleased = false;
- mIsSecure = false;
- mMimes.clear();
-}
-
-status_t NuPlayer2::GenericSource2::setDataSource(
- const char *url,
- const KeyedVector<String8, String8> *headers) {
- Mutex::Autolock _l(mLock);
- ALOGV("setDataSource url: %s", url);
-
- resetDataSource();
-
- mUri = url;
-
- if (headers) {
- mUriHeaders = *headers;
- }
-
- // delay data source creation to prepareAsync() to avoid blocking
- // the calling thread in setDataSource for any significant time.
- return OK;
-}
-
-status_t NuPlayer2::GenericSource2::setDataSource(
- int fd, int64_t offset, int64_t length) {
- Mutex::Autolock _l(mLock);
- ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
-
- resetDataSource();
-
- mFd = dup(fd);
- mOffset = offset;
- mLength = length;
-
- // delay data source creation to prepareAsync() to avoid blocking
- // the calling thread in setDataSource for any significant time.
- return OK;
-}
-
-status_t NuPlayer2::GenericSource2::setDataSource(const sp<DataSource>& source) {
- Mutex::Autolock _l(mLock);
- ALOGV("setDataSource (source: %p)", source.get());
-
- resetDataSource();
- mDataSourceWrapper = new AMediaDataSourceWrapper(source);
- return OK;
-}
-
-sp<MetaData> NuPlayer2::GenericSource2::getFileFormatMeta() const {
- Mutex::Autolock _l(mLock);
- return mFileMeta;
-}
-
-status_t NuPlayer2::GenericSource2::initFromDataSource() {
- mExtractor = new AMediaExtractorWrapper(AMediaExtractor_new());
- CHECK(mFd >=0 || mDataSourceWrapper != NULL);
- sp<AMediaDataSourceWrapper> aSourceWrapper = mDataSourceWrapper;
- const int fd = mFd;
-
- mLock.unlock();
- // This might take long time if data source is not reliable.
- status_t err;
- if (aSourceWrapper != NULL) {
- err = mExtractor->setDataSource(aSourceWrapper->getAMediaDataSource());
- } else {
- err = mExtractor->setDataSource(fd, mOffset, mLength);
- }
-
- if (err != OK) {
- ALOGE("initFromDataSource, failed to set extractor data source!");
- mLock.lock();
- return UNKNOWN_ERROR;
- }
-
- size_t numtracks = mExtractor->getTrackCount();
- if (numtracks == 0) {
- ALOGE("initFromDataSource, source has no track!");
- mLock.lock();
- return UNKNOWN_ERROR;
- }
-
- mFileMeta = convertMediaFormatWrapperToMetaData(mExtractor->getFormat());
- mLock.lock();
- if (mFileMeta != NULL) {
- int64_t duration;
- if (mFileMeta->findInt64(kKeyDuration, &duration)) {
- mDurationUs = duration;
- }
- }
-
- int32_t totalBitrate = 0;
-
- mMimes.clear();
-
- for (size_t i = 0; i < numtracks; ++i) {
-
- sp<AMediaFormatWrapper> trackFormat = mExtractor->getTrackFormat(i);
- if (trackFormat == NULL) {
- ALOGE("no metadata for track %zu", i);
- return UNKNOWN_ERROR;
- }
-
- sp<AMediaExtractorWrapper> trackExtractor = new AMediaExtractorWrapper(AMediaExtractor_new());
- if (aSourceWrapper != NULL) {
- trackExtractor->setDataSource(aSourceWrapper->getAMediaDataSource());
- } else {
- trackExtractor->setDataSource(fd, mOffset, mLength);
- }
-
- const char *mime;
- sp<MetaData> meta = convertMediaFormatWrapperToMetaData(trackFormat);
- CHECK(meta->findCString(kKeyMIMEType, &mime));
-
- ALOGV("initFromDataSource track[%zu]: %s", i, mime);
-
- // Do the string compare immediately with "mime",
- // we can't assume "mime" would stay valid after another
- // extractor operation, some extractors might modify meta
- // during getTrack() and make it invalid.
- if (!strncasecmp(mime, "audio/", 6)) {
- if (mAudioTrack.mExtractor == NULL) {
- mAudioTrack.mIndex = i;
- mAudioTrack.mExtractor = trackExtractor;
- mAudioTrack.mExtractor->selectTrack(i);
- mAudioTrack.mPackets = new AnotherPacketSource(meta);
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
- mAudioIsVorbis = true;
- } else {
- mAudioIsVorbis = false;
- }
-
- mMimes.add(String8(mime));
- }
- } else if (!strncasecmp(mime, "video/", 6)) {
- if (mVideoTrack.mExtractor == NULL) {
- mVideoTrack.mIndex = i;
- mVideoTrack.mExtractor = trackExtractor;
- mVideoTrack.mExtractor->selectTrack(i);
- mVideoTrack.mPackets = new AnotherPacketSource(meta);
-
- // video always at the beginning
- mMimes.insertAt(String8(mime), 0);
- }
- }
-
- mExtractors.push(trackExtractor);
- int64_t durationUs;
- if (meta->findInt64(kKeyDuration, &durationUs)) {
- if (durationUs > mDurationUs) {
- mDurationUs = durationUs;
- }
- }
-
- int32_t bitrate;
- if (totalBitrate >= 0 && meta->findInt32(kKeyBitRate, &bitrate)) {
- totalBitrate += bitrate;
- } else {
- totalBitrate = -1;
- }
- }
-
- ALOGV("initFromDataSource mExtractors.size(): %zu mIsSecure: %d mime[0]: %s", mExtractors.size(),
- mIsSecure, (mMimes.isEmpty() ? "NONE" : mMimes[0].string()));
-
- if (mExtractors.size() == 0) {
- ALOGE("b/23705695");
- return UNKNOWN_ERROR;
- }
-
- // Modular DRM: The return value doesn't affect source initialization.
- (void)checkDrmInfo();
-
- mBitrate = totalBitrate;
-
- return OK;
-}
-
-status_t NuPlayer2::GenericSource2::getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) {
- {
- Mutex::Autolock _l(mLock);
- *buffering = mBufferingSettings;
- }
-
- ALOGV("getBufferingSettings{%s}", buffering->toString().string());
- return OK;
-}
-
-status_t NuPlayer2::GenericSource2::setBufferingSettings(const BufferingSettings& buffering) {
- ALOGV("setBufferingSettings{%s}", buffering.toString().string());
-
- Mutex::Autolock _l(mLock);
- mBufferingSettings = buffering;
- return OK;
-}
-
-int64_t NuPlayer2::GenericSource2::getLastReadPosition() {
- if (mAudioTrack.mExtractor != NULL) {
- return mAudioTimeUs;
- } else if (mVideoTrack.mExtractor != NULL) {
- return mVideoTimeUs;
- } else {
- return 0;
- }
-}
-
-bool NuPlayer2::GenericSource2::isStreaming() const {
- Mutex::Autolock _l(mLock);
- return mIsStreaming;
-}
-
-NuPlayer2::GenericSource2::~GenericSource2() {
- ALOGV("~GenericSource2");
- if (mLooper != NULL) {
- mLooper->unregisterHandler(id());
- mLooper->stop();
- }
- if (mDataSourceWrapper != NULL) {
- mDataSourceWrapper->close();
- }
- resetDataSource();
-}
-
-void NuPlayer2::GenericSource2::prepareAsync(int64_t startTimeUs) {
- Mutex::Autolock _l(mLock);
- ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
-
- if (mLooper == NULL) {
- mLooper = new ALooper;
- mLooper->setName("generic2");
- mLooper->start(false, /* runOnCallingThread */
- true, /* canCallJava */
- PRIORITY_DEFAULT);
-
- mLooper->registerHandler(this);
- }
-
- sp<AMessage> msg = new AMessage(kWhatPrepareAsync, this);
- msg->setInt64("startTimeUs", startTimeUs);
-
- msg->post();
-}
-
-void NuPlayer2::GenericSource2::onPrepareAsync(int64_t startTimeUs) {
- ALOGV("onPrepareAsync: mFd %d mUri %s mDataSourceWrapper: %p",
- mFd, mUri.c_str(), mDataSourceWrapper.get());
-
- if (!mUri.empty()) {
- const char* uri = mUri.c_str();
- size_t numheaders = mUriHeaders.size();
- const char **key_values = numheaders ? new const char *[numheaders * 2] : NULL;
- for (size_t i = 0; i < numheaders; ++i) {
- key_values[i * 2] = mUriHeaders.keyAt(i).c_str();
- key_values[i * 2 + 1] = mUriHeaders.valueAt(i).c_str();
- }
- mLock.unlock();
- AMediaDataSource *aSource = AMediaDataSource_newUri(uri, numheaders, key_values);
- mLock.lock();
- mDataSourceWrapper = aSource ? new AMediaDataSourceWrapper(aSource) : NULL;
- delete[] key_values;
- // For cached streaming cases, we need to wait for enough
- // buffering before reporting prepared.
- mIsStreaming = !strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8);
- }
-
- if (mDisconnected || (mFd < 0 && mDataSourceWrapper == NULL)) {
- ALOGE("mDisconnected(%d) or Failed to create data source!", mDisconnected);
- notifyPreparedAndCleanup(UNKNOWN_ERROR);
- return;
- }
-
- // init extractor from data source
- status_t err = initFromDataSource();
- if (mFd >= 0) {
- close(mFd);
- mFd = -1;
- }
-
- if (err != OK) {
- ALOGE("Failed to init from data source!");
- notifyPreparedAndCleanup(err);
- return;
- }
-
- if (mVideoTrack.mExtractor != NULL) {
- sp<MetaData> meta = getFormatMeta_l(false /* audio */);
- sp<AMessage> msg = new AMessage;
- err = convertMetaDataToMessage(meta, &msg);
- if(err != OK) {
- notifyPreparedAndCleanup(err);
- return;
- }
- notifyVideoSizeChanged(msg);
- }
-
- notifyFlagsChanged(
- // FLAG_SECURE will be known if/when prepareDrm is called by the app
- // FLAG_PROTECTED will be known if/when prepareDrm is called by the app
- FLAG_CAN_PAUSE |
- FLAG_CAN_SEEK_BACKWARD |
- FLAG_CAN_SEEK_FORWARD |
- FLAG_CAN_SEEK);
-
- doSeek(startTimeUs, MediaPlayer2SeekMode::SEEK_CLOSEST);
- finishPrepareAsync();
-
- ALOGV("onPrepareAsync: Done");
-}
-
-void NuPlayer2::GenericSource2::finishPrepareAsync() {
- ALOGV("finishPrepareAsync");
-
- if (mIsStreaming) {
- mPreparing = true;
- ++mPollBufferingGeneration;
- schedulePollBuffering();
- } else {
- notifyPrepared();
- }
-
- if (mAudioTrack.mExtractor != NULL) {
- postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
- }
-
- if (mVideoTrack.mExtractor != NULL) {
- postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
- }
-}
-
-void NuPlayer2::GenericSource2::notifyPreparedAndCleanup(status_t err) {
- if (err != OK) {
- mDataSourceWrapper.clear();
-
- mBitrate = -1;
- mPrevBufferPercentage = -1;
- ++mPollBufferingGeneration;
- }
- notifyPrepared(err);
-}
-
-void NuPlayer2::GenericSource2::start() {
- Mutex::Autolock _l(mLock);
- ALOGI("start");
-
- if (mAudioTrack.mExtractor != NULL) {
- postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
- }
-
- if (mVideoTrack.mExtractor != NULL) {
- postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
- }
-
- mStarted = true;
-}
-
-void NuPlayer2::GenericSource2::stop() {
- Mutex::Autolock _l(mLock);
- mStarted = false;
-}
-
-void NuPlayer2::GenericSource2::pause() {
- Mutex::Autolock _l(mLock);
- mStarted = false;
-}
-
-void NuPlayer2::GenericSource2::resume() {
- Mutex::Autolock _l(mLock);
- mStarted = true;
-}
-
-void NuPlayer2::GenericSource2::disconnect() {
- {
- Mutex::Autolock _l(mLock);
- mDisconnected = true;
- }
- if (mDataSourceWrapper != NULL) {
- mDataSourceWrapper->close();
- }
-}
-
-status_t NuPlayer2::GenericSource2::feedMoreTSData() {
- return OK;
-}
-
-void NuPlayer2::GenericSource2::onMessageReceived(const sp<AMessage> &msg) {
- Mutex::Autolock _l(mLock);
- switch (msg->what()) {
- case kWhatPrepareAsync:
- {
- int64_t startTimeUs;
- CHECK(msg->findInt64("startTimeUs", &startTimeUs));
- onPrepareAsync(startTimeUs);
- break;
- }
- case kWhatFetchSubtitleData:
- {
- fetchTextData(kWhatSendSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
- mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
- break;
- }
-
- case kWhatFetchTimedTextData:
- {
- fetchTextData(kWhatSendTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
- mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
- break;
- }
-
- case kWhatSendSubtitleData:
- {
- sendTextData(kWhatSubtitleData, MEDIA_TRACK_TYPE_SUBTITLE,
- mFetchSubtitleDataGeneration, mSubtitleTrack.mPackets, msg);
- break;
- }
-
- case kWhatSendGlobalTimedTextData:
- {
- sendGlobalTextData(kWhatTimedTextData, mFetchTimedTextDataGeneration, msg);
- break;
- }
- case kWhatSendTimedTextData:
- {
- sendTextData(kWhatTimedTextData, MEDIA_TRACK_TYPE_TIMEDTEXT,
- mFetchTimedTextDataGeneration, mTimedTextTrack.mPackets, msg);
- break;
- }
-
- case kWhatChangeAVSource:
- {
- int32_t trackIndex;
- CHECK(msg->findInt32("trackIndex", &trackIndex));
- const sp<AMediaExtractorWrapper> extractor = mExtractors.itemAt(trackIndex);
-
- Track* track;
- AString mime;
- media_track_type trackType, counterpartType;
- sp<AMediaFormatWrapper> format = extractor->getTrackFormat(trackIndex);
- format->getString(AMEDIAFORMAT_KEY_MIME, &mime);
- if (!strncasecmp(mime.c_str(), "audio/", 6)) {
- track = &mAudioTrack;
- trackType = MEDIA_TRACK_TYPE_AUDIO;
- counterpartType = MEDIA_TRACK_TYPE_VIDEO;;
- } else {
- CHECK(!strncasecmp(mime.c_str(), "video/", 6));
- track = &mVideoTrack;
- trackType = MEDIA_TRACK_TYPE_VIDEO;
- counterpartType = MEDIA_TRACK_TYPE_AUDIO;;
- }
-
-
- track->mExtractor = extractor;
- track->mExtractor->selectSingleTrack(trackIndex);
- track->mIndex = trackIndex;
- ++mAudioDataGeneration;
- ++mVideoDataGeneration;
-
- int64_t timeUs, actualTimeUs;
- const bool formatChange = true;
- if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
- timeUs = mAudioLastDequeueTimeUs;
- } else {
- timeUs = mVideoLastDequeueTimeUs;
- }
- readBuffer(trackType, timeUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
- &actualTimeUs, formatChange);
- readBuffer(counterpartType, -1, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */,
- NULL, !formatChange);
- ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
-
- break;
- }
-
- case kWhatSeek:
- {
- onSeek(msg);
- break;
- }
-
- case kWhatReadBuffer:
- {
- onReadBuffer(msg);
- break;
- }
-
- case kWhatPollBuffering:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- if (generation == mPollBufferingGeneration) {
- onPollBuffering();
- }
- break;
- }
-
- default:
- Source::onMessageReceived(msg);
- break;
- }
-}
-
-void NuPlayer2::GenericSource2::fetchTextData(
- uint32_t sendWhat,
- media_track_type type,
- int32_t curGen,
- const sp<AnotherPacketSource>& packets,
- const sp<AMessage>& msg) {
- int32_t msgGeneration;
- CHECK(msg->findInt32("generation", &msgGeneration));
- if (msgGeneration != curGen) {
- // stale
- return;
- }
-
- int32_t avail;
- if (packets->hasBufferAvailable(&avail)) {
- return;
- }
-
- int64_t timeUs;
- CHECK(msg->findInt64("timeUs", &timeUs));
-
- int64_t subTimeUs = 0;
- readBuffer(type, timeUs, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */, &subTimeUs);
-
- status_t eosResult;
- if (!packets->hasBufferAvailable(&eosResult)) {
- return;
- }
-
- if (msg->what() == kWhatFetchSubtitleData) {
- subTimeUs -= 1000000ll; // send subtile data one second earlier
- }
- sp<AMessage> msg2 = new AMessage(sendWhat, this);
- msg2->setInt32("generation", msgGeneration);
- mMediaClock->addTimer(msg2, subTimeUs);
-}
-
-void NuPlayer2::GenericSource2::sendTextData(
- uint32_t what,
- media_track_type type,
- int32_t curGen,
- const sp<AnotherPacketSource>& packets,
- const sp<AMessage>& msg) {
- int32_t msgGeneration;
- CHECK(msg->findInt32("generation", &msgGeneration));
- if (msgGeneration != curGen) {
- // stale
- return;
- }
-
- int64_t subTimeUs;
- if (packets->nextBufferTime(&subTimeUs) != OK) {
- return;
- }
-
- int64_t nextSubTimeUs;
- readBuffer(type, -1, MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
-
- sp<ABuffer> buffer;
- status_t dequeueStatus = packets->dequeueAccessUnit(&buffer);
- if (dequeueStatus == OK) {
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", what);
- notify->setBuffer("buffer", buffer);
- notify->post();
-
- if (msg->what() == kWhatSendSubtitleData) {
- nextSubTimeUs -= 1000000ll; // send subtile data one second earlier
- }
- mMediaClock->addTimer(msg, nextSubTimeUs);
- }
-}
-
-void NuPlayer2::GenericSource2::sendGlobalTextData(
- uint32_t what,
- int32_t curGen,
- sp<AMessage> msg) {
- int32_t msgGeneration;
- CHECK(msg->findInt32("generation", &msgGeneration));
- if (msgGeneration != curGen) {
- // stale
- return;
- }
-
- void *data = NULL;
- size_t size = 0;
- if (mTimedTextTrack.mExtractor->getTrackFormat(mTimedTextTrack.mIndex)->getBuffer(
- "text", &data, &size)) {
- mGlobalTimedText = new ABuffer(size);
- if (mGlobalTimedText->data()) {
- memcpy(mGlobalTimedText->data(), data, size);
- sp<AMessage> globalMeta = mGlobalTimedText->meta();
- globalMeta->setInt64("timeUs", 0);
- globalMeta->setString("mime", MEDIA_MIMETYPE_TEXT_3GPP);
- globalMeta->setInt32("global", 1);
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", what);
- notify->setBuffer("buffer", mGlobalTimedText);
- notify->post();
- }
- }
-}
-
-sp<AMessage> NuPlayer2::GenericSource2::getFormat(bool audio) {
- Mutex::Autolock _l(mLock);
- return getFormat_l(audio);
-}
-
-sp<MetaData> NuPlayer2::GenericSource2::getFormatMeta(bool audio) {
- Mutex::Autolock _l(mLock);
- return getFormatMeta_l(audio);
-}
-
-sp<AMessage> NuPlayer2::GenericSource2::getFormat_l(bool audio) {
- sp<AMediaExtractorWrapper> extractor = audio ? mAudioTrack.mExtractor : mVideoTrack.mExtractor;
- size_t trackIndex = audio ? mAudioTrack.mIndex : mVideoTrack.mIndex;
-
- if (extractor == NULL) {
- return NULL;
- }
-
- return extractor->getTrackFormat(trackIndex)->toAMessage();
-}
-
-sp<MetaData> NuPlayer2::GenericSource2::getFormatMeta_l(bool audio) {
- sp<AMediaExtractorWrapper> extractor = audio ? mAudioTrack.mExtractor : mVideoTrack.mExtractor;
- size_t trackIndex = audio ? mAudioTrack.mIndex : mVideoTrack.mIndex;
-
- if (extractor == NULL) {
- return NULL;
- }
-
- return convertMediaFormatWrapperToMetaData(extractor->getTrackFormat(trackIndex));
-}
-
-status_t NuPlayer2::GenericSource2::dequeueAccessUnit(
- bool audio, sp<ABuffer> *accessUnit) {
- Mutex::Autolock _l(mLock);
- // If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
- // the codec's crypto object has gone away (b/37960096).
- // Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
- if (!mStarted && mIsDrmReleased) {
- return -EWOULDBLOCK;
- }
-
- Track *track = audio ? &mAudioTrack : &mVideoTrack;
-
- if (track->mExtractor == NULL) {
- return -EWOULDBLOCK;
- }
-
- status_t finalResult;
- if (!track->mPackets->hasBufferAvailable(&finalResult)) {
- if (finalResult == OK) {
- postReadBuffer(
- audio ? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
- return -EWOULDBLOCK;
- }
- return finalResult;
- }
-
- status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
-
- // start pulling in more buffers if cache is running low
- // so that decoder has less chance of being starved
- if (!mIsStreaming) {
- if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
- postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
- }
- } else {
- int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
- // TODO: maxRebufferingMarkMs could be larger than
- // mBufferingSettings.mResumePlaybackMarkMs
- int64_t restartBufferingMarkUs =
- mBufferingSettings.mResumePlaybackMarkMs * 1000ll / 2;
- if (finalResult == OK) {
- if (durationUs < restartBufferingMarkUs) {
- postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
- }
- if (track->mPackets->getAvailableBufferCount(&finalResult) < 2
- && !mSentPauseOnBuffering && !mPreparing) {
- mSentPauseOnBuffering = true;
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatPauseOnBufferingStart);
- notify->post();
- }
- }
- }
-
- if (result != OK) {
- if (mSubtitleTrack.mExtractor != NULL) {
- mSubtitleTrack.mPackets->clear();
- mFetchSubtitleDataGeneration++;
- }
- if (mTimedTextTrack.mExtractor != NULL) {
- mTimedTextTrack.mPackets->clear();
- mFetchTimedTextDataGeneration++;
- }
- return result;
- }
-
- int64_t timeUs;
- status_t eosResult; // ignored
- CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
- if (audio) {
- mAudioLastDequeueTimeUs = timeUs;
- } else {
- mVideoLastDequeueTimeUs = timeUs;
- }
-
- if (mSubtitleTrack.mExtractor != NULL
- && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
- sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
- msg->setInt64("timeUs", timeUs);
- msg->setInt32("generation", mFetchSubtitleDataGeneration);
- msg->post();
- }
-
- if (mTimedTextTrack.mExtractor != NULL
- && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
- sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
- msg->setInt64("timeUs", timeUs);
- msg->setInt32("generation", mFetchTimedTextDataGeneration);
- msg->post();
- }
-
- return result;
-}
-
-status_t NuPlayer2::GenericSource2::getDuration(int64_t *durationUs) {
- Mutex::Autolock _l(mLock);
- *durationUs = mDurationUs;
- return OK;
-}
-
-size_t NuPlayer2::GenericSource2::getTrackCount() const {
- Mutex::Autolock _l(mLock);
- return mExtractors.size();
-}
-
-sp<AMessage> NuPlayer2::GenericSource2::getTrackInfo(size_t trackIndex) const {
- Mutex::Autolock _l(mLock);
- size_t trackCount = mExtractors.size();
- if (trackIndex >= trackCount) {
- return NULL;
- }
-
- sp<AMessage> format = mExtractors.itemAt(trackIndex)->getTrackFormat(trackIndex)->toAMessage();
- if (format == NULL) {
- ALOGE("no metadata for track %zu", trackIndex);
- return NULL;
- }
-
- AString mime;
- CHECK(format->findString(AMEDIAFORMAT_KEY_MIME, &mime));
-
- int32_t trackType;
- if (!strncasecmp(mime.c_str(), "video/", 6)) {
- trackType = MEDIA_TRACK_TYPE_VIDEO;
- } else if (!strncasecmp(mime.c_str(), "audio/", 6)) {
- trackType = MEDIA_TRACK_TYPE_AUDIO;
- } else if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP)) {
- trackType = MEDIA_TRACK_TYPE_TIMEDTEXT;
- } else {
- trackType = MEDIA_TRACK_TYPE_UNKNOWN;
- }
- format->setInt32("type", trackType);
-
- AString lang;
- if (!format->findString("language", &lang)) {
- format->setString("language", "und");
- }
-
- if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
- int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
- format->findInt32(AMEDIAFORMAT_KEY_IS_AUTOSELECT, &isAutoselect);
- format->findInt32(AMEDIAFORMAT_KEY_IS_DEFAULT, &isDefault);
- format->findInt32(AMEDIAFORMAT_KEY_IS_FORCED_SUBTITLE, &isForced);
-
- format->setInt32("auto", !!isAutoselect);
- format->setInt32("default", !!isDefault);
- format->setInt32("forced", !!isForced);
- }
-
- return format;
-}
-
-ssize_t NuPlayer2::GenericSource2::getSelectedTrack(media_track_type type) const {
- Mutex::Autolock _l(mLock);
- const Track *track = NULL;
- switch (type) {
- case MEDIA_TRACK_TYPE_VIDEO:
- track = &mVideoTrack;
- break;
- case MEDIA_TRACK_TYPE_AUDIO:
- track = &mAudioTrack;
- break;
- case MEDIA_TRACK_TYPE_TIMEDTEXT:
- track = &mTimedTextTrack;
- break;
- case MEDIA_TRACK_TYPE_SUBTITLE:
- track = &mSubtitleTrack;
- break;
- default:
- break;
- }
-
- if (track != NULL && track->mExtractor != NULL) {
- return track->mIndex;
- }
-
- return -1;
-}
-
-status_t NuPlayer2::GenericSource2::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
- Mutex::Autolock _l(mLock);
- ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
-
- if (trackIndex >= mExtractors.size()) {
- return BAD_INDEX;
- }
-
- if (!select) {
- Track* track = NULL;
- if (mSubtitleTrack.mExtractor != NULL && trackIndex == mSubtitleTrack.mIndex) {
- track = &mSubtitleTrack;
- mFetchSubtitleDataGeneration++;
- } else if (mTimedTextTrack.mExtractor != NULL && trackIndex == mTimedTextTrack.mIndex) {
- track = &mTimedTextTrack;
- mFetchTimedTextDataGeneration++;
- }
- if (track == NULL) {
- return INVALID_OPERATION;
- }
- track->mExtractor = NULL;
- track->mPackets->clear();
- return OK;
- }
-
- const sp<AMediaExtractorWrapper> extractor = mExtractors.itemAt(trackIndex);
- sp<MetaData> meta = convertMediaFormatWrapperToMetaData(extractor->getTrackFormat(trackIndex));
- const char *mime;
- CHECK(meta->findCString(kKeyMIMEType, &mime));
- if (!strncasecmp(mime, "text/", 5)) {
- bool isSubtitle = strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP);
- Track *track = isSubtitle ? &mSubtitleTrack : &mTimedTextTrack;
- if (track->mExtractor != NULL && track->mIndex == trackIndex) {
- return OK;
- }
- track->mIndex = trackIndex;
- track->mExtractor = mExtractors.itemAt(trackIndex);
- track->mExtractor->selectSingleTrack(trackIndex);
- if (track->mPackets == NULL) {
- track->mPackets = new AnotherPacketSource(meta);
- } else {
- track->mPackets->clear();
- track->mPackets->setFormat(meta);
-
- }
-
- if (isSubtitle) {
- mFetchSubtitleDataGeneration++;
- } else {
- mFetchTimedTextDataGeneration++;
- }
-
- status_t eosResult; // ignored
- if (mSubtitleTrack.mExtractor != NULL
- && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
- sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
- msg->setInt64("timeUs", timeUs);
- msg->setInt32("generation", mFetchSubtitleDataGeneration);
- msg->post();
- }
-
- sp<AMessage> msg2 = new AMessage(kWhatSendGlobalTimedTextData, this);
- msg2->setInt32("generation", mFetchTimedTextDataGeneration);
- msg2->post();
-
- if (mTimedTextTrack.mExtractor != NULL
- && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
- sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
- msg->setInt64("timeUs", timeUs);
- msg->setInt32("generation", mFetchTimedTextDataGeneration);
- msg->post();
- }
-
- return OK;
- } else if (!strncasecmp(mime, "audio/", 6) || !strncasecmp(mime, "video/", 6)) {
- bool audio = !strncasecmp(mime, "audio/", 6);
- Track *track = audio ? &mAudioTrack : &mVideoTrack;
- if (track->mExtractor != NULL && track->mIndex == trackIndex) {
- return OK;
- }
-
- sp<AMessage> msg = new AMessage(kWhatChangeAVSource, this);
- msg->setInt32("trackIndex", trackIndex);
- msg->post();
- return OK;
- }
-
- return INVALID_OPERATION;
-}
-
-status_t NuPlayer2::GenericSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
- ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode);
- sp<AMessage> msg = new AMessage(kWhatSeek, this);
- msg->setInt64("seekTimeUs", seekTimeUs);
- msg->setInt32("mode", mode);
-
- // Need to call readBuffer on |mLooper| to ensure the calls to
- // IMediaSource::read* are serialized. Note that IMediaSource::read*
- // is called without |mLock| acquired and MediaSource is not thread safe.
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
-}
-
-void NuPlayer2::GenericSource2::onSeek(const sp<AMessage>& msg) {
- int64_t seekTimeUs;
- int32_t mode;
- CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
- CHECK(msg->findInt32("mode", &mode));
-
- sp<AMessage> response = new AMessage;
- status_t err = doSeek(seekTimeUs, (MediaPlayer2SeekMode)mode);
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-status_t NuPlayer2::GenericSource2::doSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
- if (mVideoTrack.mExtractor != NULL) {
- ++mVideoDataGeneration;
-
- int64_t actualTimeUs;
- readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
-
- if (mode != MediaPlayer2SeekMode::SEEK_CLOSEST) {
- seekTimeUs = actualTimeUs;
- }
- mVideoLastDequeueTimeUs = actualTimeUs;
- }
-
- if (mAudioTrack.mExtractor != NULL) {
- ++mAudioDataGeneration;
- readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayer2SeekMode::SEEK_CLOSEST);
- mAudioLastDequeueTimeUs = seekTimeUs;
- }
-
- if (mSubtitleTrack.mExtractor != NULL) {
- mSubtitleTrack.mPackets->clear();
- mFetchSubtitleDataGeneration++;
- }
-
- if (mTimedTextTrack.mExtractor != NULL) {
- mTimedTextTrack.mPackets->clear();
- mFetchTimedTextDataGeneration++;
- }
-
- ++mPollBufferingGeneration;
- schedulePollBuffering();
- return OK;
-}
-
-sp<ABuffer> NuPlayer2::GenericSource2::mediaBufferToABuffer(
- MediaBufferBase* mb,
- media_track_type trackType) {
- bool audio = trackType == MEDIA_TRACK_TYPE_AUDIO;
- size_t outLength = mb->range_length();
-
- if (audio && mAudioIsVorbis) {
- outLength += sizeof(int32_t);
- }
-
- sp<ABuffer> ab;
-
- if (mIsDrmProtected) {
- // Modular DRM
- // Enabled for both video/audio so 1) media buffer is reused without extra copying
- // 2) meta data can be retrieved in onInputBufferFetched for calling queueSecureInputBuffer.
-
- // data is already provided in the buffer
- ab = new ABuffer(NULL, mb->range_length());
- ab->meta()->setObject("mediaBufferHolder", new MediaBufferHolder(mb));
-
- // Modular DRM: Required b/c of the above add_ref.
- // If ref>0, there must be an observer, or it'll crash at release().
- // TODO: MediaBuffer might need to be revised to ease such need.
- mb->setObserver(this);
- // setMediaBufferBase() interestingly doesn't increment the ref count on its own.
- // Extra increment (since we want to keep mb alive and attached to ab beyond this function
- // call. This is to counter the effect of mb->release() towards the end.
- mb->add_ref();
-
- } else {
- ab = new ABuffer(outLength);
- memcpy(ab->data(),
- (const uint8_t *)mb->data() + mb->range_offset(),
- mb->range_length());
- }
-
- if (audio && mAudioIsVorbis) {
- int32_t numPageSamples;
- if (!mb->meta_data().findInt32(kKeyValidSamples, &numPageSamples)) {
- numPageSamples = -1;
- }
-
- uint8_t* abEnd = ab->data() + mb->range_length();
- memcpy(abEnd, &numPageSamples, sizeof(numPageSamples));
- }
-
- sp<AMessage> meta = ab->meta();
-
- int64_t timeUs;
- CHECK(mb->meta_data().findInt64(kKeyTime, &timeUs));
- meta->setInt64("timeUs", timeUs);
-
- if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
- int32_t layerId;
- if (mb->meta_data().findInt32(kKeyTemporalLayerId, &layerId)) {
- meta->setInt32("temporal-layer-id", layerId);
- }
- }
-
- if (trackType == MEDIA_TRACK_TYPE_TIMEDTEXT) {
- AString mime;
- sp<AMediaExtractorWrapper> extractor = mTimedTextTrack.mExtractor;
- size_t trackIndex = mTimedTextTrack.mIndex;
- CHECK(extractor != NULL
- && extractor->getTrackFormat(trackIndex)->getString(AMEDIAFORMAT_KEY_MIME, &mime));
- meta->setString("mime", mime.c_str());
- }
-
- int64_t durationUs;
- if (mb->meta_data().findInt64(kKeyDuration, &durationUs)) {
- meta->setInt64("durationUs", durationUs);
- }
-
- if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
- meta->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSubtitleTrack.mIndex);
- }
-
- uint32_t dataType; // unused
- const void *seiData;
- size_t seiLength;
- if (mb->meta_data().findData(kKeySEI, &dataType, &seiData, &seiLength)) {
- sp<ABuffer> sei = ABuffer::CreateAsCopy(seiData, seiLength);;
- meta->setBuffer("sei", sei);
- }
-
- const void *mpegUserDataPointer;
- size_t mpegUserDataLength;
- if (mb->meta_data().findData(
- kKeyMpegUserData, &dataType, &mpegUserDataPointer, &mpegUserDataLength)) {
- sp<ABuffer> mpegUserData = ABuffer::CreateAsCopy(mpegUserDataPointer, mpegUserDataLength);
- meta->setBuffer(AMEDIAFORMAT_KEY_MPEG_USER_DATA, mpegUserData);
- }
-
- mb->release();
- mb = NULL;
-
- return ab;
-}
-
-int32_t NuPlayer2::GenericSource2::getDataGeneration(media_track_type type) const {
- int32_t generation = -1;
- switch (type) {
- case MEDIA_TRACK_TYPE_VIDEO:
- generation = mVideoDataGeneration;
- break;
- case MEDIA_TRACK_TYPE_AUDIO:
- generation = mAudioDataGeneration;
- break;
- case MEDIA_TRACK_TYPE_TIMEDTEXT:
- generation = mFetchTimedTextDataGeneration;
- break;
- case MEDIA_TRACK_TYPE_SUBTITLE:
- generation = mFetchSubtitleDataGeneration;
- break;
- default:
- break;
- }
-
- return generation;
-}
-
-void NuPlayer2::GenericSource2::postReadBuffer(media_track_type trackType) {
- if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
- mPendingReadBufferTypes |= (1 << trackType);
- sp<AMessage> msg = new AMessage(kWhatReadBuffer, this);
- msg->setInt32("trackType", trackType);
- msg->post();
- }
-}
-
-void NuPlayer2::GenericSource2::onReadBuffer(const sp<AMessage>& msg) {
- int32_t tmpType;
- CHECK(msg->findInt32("trackType", &tmpType));
- media_track_type trackType = (media_track_type)tmpType;
- mPendingReadBufferTypes &= ~(1 << trackType);
- readBuffer(trackType);
-}
-
-void NuPlayer2::GenericSource2::readBuffer(
- media_track_type trackType, int64_t seekTimeUs, MediaPlayer2SeekMode mode,
- int64_t *actualTimeUs, bool formatChange) {
- Track *track;
- size_t maxBuffers = 1;
- switch (trackType) {
- case MEDIA_TRACK_TYPE_VIDEO:
- track = &mVideoTrack;
- maxBuffers = 8; // too large of a number may influence seeks
- break;
- case MEDIA_TRACK_TYPE_AUDIO:
- track = &mAudioTrack;
- maxBuffers = 64;
- break;
- case MEDIA_TRACK_TYPE_SUBTITLE:
- track = &mSubtitleTrack;
- break;
- case MEDIA_TRACK_TYPE_TIMEDTEXT:
- track = &mTimedTextTrack;
- break;
- default:
- TRESPASS();
- }
-
- if (track->mExtractor == NULL) {
- return;
- }
-
- if (actualTimeUs) {
- *actualTimeUs = seekTimeUs;
- }
-
-
- bool seeking = false;
- sp<AMediaExtractorWrapper> extractor = track->mExtractor;
- if (seekTimeUs >= 0) {
- extractor->seekTo(seekTimeUs, mode);
- seeking = true;
- }
-
- int32_t generation = getDataGeneration(trackType);
- for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
- Vector<sp<ABuffer> > aBuffers;
-
- mLock.unlock();
-
- sp<AMediaFormatWrapper> format;
- ssize_t sampleSize = -1;
- status_t err = extractor->getSampleFormat(format);
- if (err == OK) {
- sampleSize = extractor->getSampleSize();
- }
-
- if (err != OK || sampleSize < 0) {
- mLock.lock();
- track->mPackets->signalEOS(err != OK ? err : ERROR_END_OF_STREAM);
- break;
- }
-
- sp<ABuffer> abuf = new ABuffer(sampleSize);
- sampleSize = extractor->readSampleData(abuf);
- mLock.lock();
-
- // in case track has been changed since we don't have lock for some time.
- if (generation != getDataGeneration(trackType)) {
- break;
- }
-
- int64_t timeUs = extractor->getSampleTime();
- if (timeUs < 0) {
- track->mPackets->signalEOS(ERROR_MALFORMED);
- break;
- }
-
- sp<AMessage> meta = abuf->meta();
- format->writeToAMessage(meta);
- meta->setInt64("timeUs", timeUs);
- if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
- mAudioTimeUs = timeUs;
- } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
- mVideoTimeUs = timeUs;
- }
-
- sp<AMediaCodecCryptoInfoWrapper> cryptInfo = extractor->getSampleCryptoInfo();
- if (cryptInfo != NULL) {
- meta->setObject("cryptInfo", cryptInfo);
- }
-
- queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
-
- if (numBuffers == 0 && actualTimeUs != nullptr) {
- *actualTimeUs = timeUs;
- }
- if (seeking) {
- if (meta != nullptr && mode == MediaPlayer2SeekMode::SEEK_CLOSEST
- && seekTimeUs > timeUs) {
- sp<AMessage> extra = new AMessage;
- extra->setInt64("resume-at-mediaTimeUs", seekTimeUs);
- meta->setMessage("extra", extra);
- }
- }
-
- track->mPackets->queueAccessUnit(abuf);
- formatChange = false;
- seeking = false;
- ++numBuffers;
- extractor->advance();
-
- }
-
- if (mIsStreaming
- && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) {
- status_t finalResult;
- int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
-
- // TODO: maxRebufferingMarkMs could be larger than
- // mBufferingSettings.mResumePlaybackMarkMs
- int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs
- : mBufferingSettings.mResumePlaybackMarkMs) * 1000ll;
- if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
- if (mPreparing || mSentPauseOnBuffering) {
- Track *counterTrack =
- (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack);
- if (counterTrack->mExtractor != NULL) {
- durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult);
- }
- if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
- if (mPreparing) {
- notifyPrepared();
- mPreparing = false;
- } else {
- mSentPauseOnBuffering = false;
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatResumeOnBufferingEnd);
- notify->post();
- }
- }
- }
- return;
- }
-
- postReadBuffer(trackType);
- }
-}
-
-void NuPlayer2::GenericSource2::queueDiscontinuityIfNeeded(
- bool seeking, bool formatChange, media_track_type trackType, Track *track) {
- // formatChange && seeking: track whose source is changed during selection
- // formatChange && !seeking: track whose source is not changed during selection
- // !formatChange: normal seek
- if ((seeking || formatChange)
- && (trackType == MEDIA_TRACK_TYPE_AUDIO
- || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
- ATSParser::DiscontinuityType type = (formatChange && seeking)
- ? ATSParser::DISCONTINUITY_FORMATCHANGE
- : ATSParser::DISCONTINUITY_NONE;
- track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
- }
-}
-
-void NuPlayer2::GenericSource2::notifyBufferingUpdate(int32_t percentage) {
- // Buffering percent could go backward as it's estimated from remaining
- // data and last access time. This could cause the buffering position
- // drawn on media control to jitter slightly. Remember previously reported
- // percentage and don't allow it to go backward.
- if (percentage < mPrevBufferPercentage) {
- percentage = mPrevBufferPercentage;
- } else if (percentage > 100) {
- percentage = 100;
- }
-
- mPrevBufferPercentage = percentage;
-
- ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatBufferingUpdate);
- notify->setInt32("percentage", percentage);
- notify->post();
-}
-
-void NuPlayer2::GenericSource2::schedulePollBuffering() {
- if (mIsStreaming) {
- sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
- msg->setInt32("generation", mPollBufferingGeneration);
- // Enquires buffering status every second.
- msg->post(1000000ll);
- }
-}
-
-void NuPlayer2::GenericSource2::onPollBuffering() {
- int64_t cachedDurationUs = -1ll;
-
- sp<AMediaExtractorWrapper> extractor;
- if (mVideoTrack.mExtractor != NULL) {
- extractor = mVideoTrack.mExtractor;
- } else if (mAudioTrack.mExtractor != NULL) {
- extractor = mAudioTrack.mExtractor;
- }
-
- if (extractor != NULL) {
- cachedDurationUs = extractor->getCachedDuration();
- }
-
- if (cachedDurationUs >= 0ll) {
- ssize_t sampleSize = extractor->getSampleSize();
- if (sampleSize >= 0ll) {
- int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
- int percentage = 100.0 * cachedPosUs / mDurationUs;
- if (percentage > 100) {
- percentage = 100;
- }
-
- notifyBufferingUpdate(percentage);
- ALOGV("onPollBuffering: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
- } else {
- notifyBufferingUpdate(100);
- ALOGV("onPollBuffering: EOS");
- }
- }
-
- schedulePollBuffering();
-}
-
-// Modular DRM
-status_t NuPlayer2::GenericSource2::prepareDrm(
- const uint8_t uuid[16],
- const Vector<uint8_t> &drmSessionId,
- sp<AMediaCryptoWrapper> *outCrypto) {
- Mutex::Autolock _l(mLock);
- ALOGV("prepareDrm");
-
- mIsDrmProtected = false;
- mIsDrmReleased = false;
- mIsSecure = false;
-
- status_t status = OK;
- sp<AMediaCryptoWrapper> crypto =
- new AMediaCryptoWrapper(uuid, drmSessionId.array(), drmSessionId.size());
- if (crypto == NULL) {
- ALOGE("prepareDrm: failed to create crypto.");
- return UNKNOWN_ERROR;
- }
- ALOGV("prepareDrm: crypto created for uuid: %s",
- DrmUUID::toHexString(uuid).string());
-
- *outCrypto = crypto;
- // as long a there is an active crypto
- mIsDrmProtected = true;
-
- if (mMimes.size() == 0) {
- status = UNKNOWN_ERROR;
- ALOGE("prepareDrm: Unexpected. Must have at least one track. status: %d", status);
- return status;
- }
-
- // first mime in this list is either the video track, or the first audio track
- const char *mime = mMimes[0].string();
- mIsSecure = crypto->requiresSecureDecoderComponent(mime);
- ALOGV("prepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
- mime, mIsSecure);
-
- // Checking the member flags while in the looper to send out the notification.
- // The legacy mDecryptHandle!=NULL check (for FLAG_PROTECTED) is equivalent to mIsDrmProtected.
- notifyFlagsChanged(
- (mIsSecure ? FLAG_SECURE : 0) |
- // Setting "protected screen" only for L1: b/38390836
- (mIsSecure ? FLAG_PROTECTED : 0) |
- FLAG_CAN_PAUSE |
- FLAG_CAN_SEEK_BACKWARD |
- FLAG_CAN_SEEK_FORWARD |
- FLAG_CAN_SEEK);
-
- if (status == OK) {
- ALOGV("prepareDrm: mCrypto: %p", outCrypto->get());
- ALOGD("prepareDrm ret: %d ", status);
- } else {
- ALOGE("prepareDrm err: %d", status);
- }
- return status;
-}
-
-status_t NuPlayer2::GenericSource2::releaseDrm() {
- Mutex::Autolock _l(mLock);
- ALOGV("releaseDrm");
-
- if (mIsDrmProtected) {
- mIsDrmProtected = false;
- // to prevent returning any more buffer after stop/releaseDrm (b/37960096)
- mIsDrmReleased = true;
- ALOGV("releaseDrm: mIsDrmProtected is reset.");
- } else {
- ALOGE("releaseDrm: mIsDrmProtected is already false.");
- }
-
- return OK;
-}
-
-status_t NuPlayer2::GenericSource2::checkDrmInfo()
-{
- // clearing the flag at prepare in case the player is reused after stop/releaseDrm with the
- // same source without being reset (called by prepareAsync/initFromDataSource)
- mIsDrmReleased = false;
-
- if (mExtractor == NULL) {
- ALOGV("checkDrmInfo: No extractor");
- return OK; // letting the caller responds accordingly
- }
-
- PsshInfo *psshInfo = mExtractor->getPsshInfo();
- if (psshInfo == NULL) {
- ALOGV("checkDrmInfo: No PSSH");
- return OK; // source without DRM info
- }
-
- PlayerMessage playerMsg;
- status_t ret = NuPlayer2Drm::retrieveDrmInfo(psshInfo, &playerMsg);
- ALOGV("checkDrmInfo: MEDIA_DRM_INFO PSSH drm info size: %d", (int)playerMsg.ByteSize());
-
- if (ret != OK) {
- ALOGE("checkDrmInfo: failed to retrive DrmInfo %d", ret);
- return UNKNOWN_ERROR;
- }
-
- int size = playerMsg.ByteSize();
- sp<ABuffer> drmInfoBuf = new ABuffer(size);
- playerMsg.SerializeToArray(drmInfoBuf->data(), size);
- drmInfoBuf->setRange(0, size);
- notifyDrmInfo(drmInfoBuf);
-
- return OK;
-}
-
-void NuPlayer2::GenericSource2::signalBufferReturned(MediaBufferBase *buffer)
-{
- //ALOGV("signalBufferReturned %p refCount: %d", buffer, buffer->localRefcount());
-
- buffer->setObserver(NULL);
- buffer->release(); // this leads to delete since that there is no observor
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/GenericSource2.h b/media/libmediaplayer2/nuplayer2/GenericSource2.h
deleted file mode 100644
index ade1aa3..0000000
--- a/media/libmediaplayer2/nuplayer2/GenericSource2.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef GENERIC_SOURCE2_H_
-
-#define GENERIC_SOURCE2_H_
-
-#include "NuPlayer2.h"
-#include "NuPlayer2Source.h"
-
-#include "ATSParser.h"
-
-#include <media/stagefright/MediaBuffer.h>
-#include <mediaplayer2/mediaplayer2.h>
-#include <media/NdkMediaDataSource.h>
-#include <media/NdkMediaExtractor.h>
-#include <media/NdkWrapper.h>
-
-namespace android {
-
-class DecryptHandle;
-struct AnotherPacketSource;
-struct ARTSPController;
-class DataSource;
-class IDataSource;
-class IMediaSource;
-struct MediaSource;
-class MediaBuffer;
-struct MediaClock;
-
-struct NuPlayer2::GenericSource2 : public NuPlayer2::Source,
- public MediaBufferObserver // Modular DRM
-{
- GenericSource2(const sp<AMessage> ¬ify, uid_t uid,
- const sp<MediaClock> &mediaClock);
-
- status_t setDataSource(
- const char *url,
- const KeyedVector<String8, String8> *headers);
-
- status_t setDataSource(int fd, int64_t offset, int64_t length);
-
- status_t setDataSource(const sp<DataSource>& dataSource);
-
- virtual status_t getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) override;
- virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
-
- virtual void prepareAsync(int64_t startTimeUs);
-
- virtual void start();
- virtual void stop();
- virtual void pause();
- virtual void resume();
-
- virtual void disconnect();
-
- virtual status_t feedMoreTSData();
-
- virtual sp<MetaData> getFileFormatMeta() const;
-
- virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
-
- virtual status_t getDuration(int64_t *durationUs);
- virtual size_t getTrackCount() const;
- virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
- virtual ssize_t getSelectedTrack(media_track_type type) const;
- virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(
- int64_t seekTimeUs,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
-
- virtual bool isStreaming() const;
-
- // Modular DRM
- virtual void signalBufferReturned(MediaBufferBase *buffer);
-
- virtual status_t prepareDrm(
- const uint8_t uuid[16],
- const Vector<uint8_t> &drmSessionId,
- sp<AMediaCryptoWrapper> *outCrypto);
-
- virtual status_t releaseDrm();
-
-
-protected:
- virtual ~GenericSource2();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- virtual sp<AMessage> getFormat(bool audio);
- virtual sp<MetaData> getFormatMeta(bool audio);
-
-private:
- enum {
- kWhatPrepareAsync,
- kWhatFetchSubtitleData,
- kWhatFetchTimedTextData,
- kWhatSendSubtitleData,
- kWhatSendGlobalTimedTextData,
- kWhatSendTimedTextData,
- kWhatChangeAVSource,
- kWhatPollBuffering,
- kWhatSeek,
- kWhatReadBuffer,
- kWhatStart,
- kWhatResume,
- kWhatSecureDecodersInstantiated,
- };
-
- struct Track {
- size_t mIndex;
- sp<AMediaExtractorWrapper> mExtractor;
- sp<AnotherPacketSource> mPackets;
- };
-
- int64_t mAudioTimeUs;
- int64_t mAudioLastDequeueTimeUs;
- int64_t mVideoTimeUs;
- int64_t mVideoLastDequeueTimeUs;
-
- BufferingSettings mBufferingSettings;
- int32_t mPrevBufferPercentage;
- int32_t mPollBufferingGeneration;
- bool mSentPauseOnBuffering;
-
- int32_t mAudioDataGeneration;
- int32_t mVideoDataGeneration;
- int32_t mFetchSubtitleDataGeneration;
- int32_t mFetchTimedTextDataGeneration;
- int64_t mDurationUs;
- bool mAudioIsVorbis;
- // Secure codec is required.
- bool mIsSecure;
- bool mIsStreaming;
- uid_t mUID;
- const sp<MediaClock> mMediaClock;
- AString mUri;
- KeyedVector<String8, String8> mUriHeaders;
- int mFd;
- int64_t mOffset;
- int64_t mLength;
-
- bool mDisconnected;
- sp<MetaData> mFileMeta;
- sp<AMediaDataSourceWrapper> mDataSourceWrapper;
- sp<AMediaExtractorWrapper> mExtractor;
- Vector<sp<AMediaExtractorWrapper> > mExtractors;
- bool mStarted;
- bool mPreparing;
- int64_t mBitrate;
- uint32_t mPendingReadBufferTypes;
- sp<ABuffer> mGlobalTimedText;
-
- Track mVideoTrack;
- Track mAudioTrack;
- Track mSubtitleTrack;
- Track mTimedTextTrack;
-
- mutable Mutex mLock;
-
- sp<ALooper> mLooper;
-
- void resetDataSource();
-
- status_t initFromDataSource();
- int64_t getLastReadPosition();
-
- void notifyPreparedAndCleanup(status_t err);
- void onSecureDecodersInstantiated(status_t err);
- void finishPrepareAsync();
- status_t startSources();
-
- void onSeek(const sp<AMessage>& msg);
- status_t doSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode);
-
- void onPrepareAsync(int64_t startTimeUs);
-
- void fetchTextData(
- uint32_t what, media_track_type type,
- int32_t curGen, const sp<AnotherPacketSource>& packets, const sp<AMessage>& msg);
-
- void sendGlobalTextData(
- uint32_t what,
- int32_t curGen, sp<AMessage> msg);
-
- void sendTextData(
- uint32_t what, media_track_type type,
- int32_t curGen, const sp<AnotherPacketSource>& packets, const sp<AMessage>& msg);
-
- sp<ABuffer> mediaBufferToABuffer(
- MediaBufferBase *mbuf,
- media_track_type trackType);
-
- void postReadBuffer(media_track_type trackType);
- void onReadBuffer(const sp<AMessage>& msg);
- // When |mode| is MediaPlayer2SeekMode::SEEK_CLOSEST, the buffer read shall
- // include an item indicating skipping rendering all buffers with timestamp
- // earlier than |seekTimeUs|.
- // For other modes, the buffer read will not include the item as above in order
- // to facilitate fast seek operation.
- void readBuffer(
- media_track_type trackType,
- int64_t seekTimeUs = -1ll,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC,
- int64_t *actualTimeUs = NULL, bool formatChange = false);
-
- void queueDiscontinuityIfNeeded(
- bool seeking, bool formatChange, media_track_type trackType, Track *track);
-
- void schedulePollBuffering();
- void onPollBuffering();
- void notifyBufferingUpdate(int32_t percentage);
-
- sp<AMessage> getFormat_l(bool audio);
- sp<MetaData> getFormatMeta_l(bool audio);
- int32_t getDataGeneration(media_track_type type) const;
-
- // Modular DRM
- // The source is DRM protected and is prepared for DRM.
- bool mIsDrmProtected;
- // releaseDrm has been processed.
- bool mIsDrmReleased;
- Vector<String8> mMimes;
-
- status_t checkDrmInfo();
-
- DISALLOW_EVIL_CONSTRUCTORS(GenericSource2);
-};
-
-} // namespace android
-
-#endif // GENERIC_SOURCE2_H_
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
deleted file mode 100644
index e53900b..0000000
--- a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.cpp
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "HTTPLiveSource2"
-#include <utils/Log.h>
-
-#include "HTTPLiveSource2.h"
-
-#include "AnotherPacketSource.h"
-#include "LiveDataSource.h"
-
-#include <media/MediaHTTPService.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/Utils.h>
-
-// default buffer prepare/ready/underflow marks
-static const int kReadyMarkMs = 5000; // 5 seconds
-static const int kPrepareMarkMs = 1500; // 1.5 seconds
-
-namespace android {
-
-NuPlayer2::HTTPLiveSource2::HTTPLiveSource2(
- const sp<AMessage> ¬ify,
- const sp<MediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers)
- : Source(notify),
- mHTTPService(httpService),
- mURL(url),
- mFlags(0),
- mFinalResult(OK),
- mOffset(0),
- mFetchSubtitleDataGeneration(0),
- mFetchMetaDataGeneration(0),
- mHasMetadata(false),
- mMetadataSelected(false) {
- mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
- mBufferingSettings.mResumePlaybackMarkMs = kReadyMarkMs;
- if (headers) {
- mExtraHeaders = *headers;
-
- ssize_t index =
- mExtraHeaders.indexOfKey(String8("x-hide-urls-from-log"));
-
- if (index >= 0) {
- mFlags |= kFlagIncognito;
-
- mExtraHeaders.removeItemsAt(index);
- }
- }
-}
-
-NuPlayer2::HTTPLiveSource2::~HTTPLiveSource2() {
- if (mLiveSession != NULL) {
- mLiveSession->disconnect();
-
- mLiveLooper->unregisterHandler(mLiveSession->id());
- mLiveLooper->unregisterHandler(id());
- mLiveLooper->stop();
-
- mLiveSession.clear();
- mLiveLooper.clear();
- }
-}
-
-status_t NuPlayer2::HTTPLiveSource2::getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) {
- *buffering = mBufferingSettings;
-
- return OK;
-}
-
-status_t NuPlayer2::HTTPLiveSource2::setBufferingSettings(const BufferingSettings& buffering) {
- mBufferingSettings = buffering;
-
- if (mLiveSession != NULL) {
- mLiveSession->setBufferingSettings(mBufferingSettings);
- }
-
- return OK;
-}
-
-// TODO: fetch data starting from |startTimeUs|
-void NuPlayer2::HTTPLiveSource2::prepareAsync(int64_t /* startTimeUs */) {
- if (mLiveLooper == NULL) {
- mLiveLooper = new ALooper;
- mLiveLooper->setName("http live2");
- mLiveLooper->start(false, /* runOnCallingThread */
- true /* canCallJava */);
-
- mLiveLooper->registerHandler(this);
- }
-
- sp<AMessage> notify = new AMessage(kWhatSessionNotify, this);
-
- mLiveSession = new LiveSession(
- notify,
- (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
- mHTTPService);
-
- mLiveLooper->registerHandler(mLiveSession);
-
- mLiveSession->setBufferingSettings(mBufferingSettings);
- mLiveSession->connectAsync(
- mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
-}
-
-void NuPlayer2::HTTPLiveSource2::start() {
-}
-
-sp<MetaData> NuPlayer2::HTTPLiveSource2::getFormatMeta(bool audio) {
- sp<MetaData> meta;
- if (mLiveSession != NULL) {
- mLiveSession->getStreamFormatMeta(
- audio ? LiveSession::STREAMTYPE_AUDIO
- : LiveSession::STREAMTYPE_VIDEO,
- &meta);
- }
-
- return meta;
-}
-
-sp<AMessage> NuPlayer2::HTTPLiveSource2::getFormat(bool audio) {
- sp<MetaData> meta;
- status_t err = -EWOULDBLOCK;
- if (mLiveSession != NULL) {
- err = mLiveSession->getStreamFormatMeta(
- audio ? LiveSession::STREAMTYPE_AUDIO
- : LiveSession::STREAMTYPE_VIDEO,
- &meta);
- }
-
- sp<AMessage> format;
- if (err == -EWOULDBLOCK) {
- format = new AMessage();
- format->setInt32("err", err);
- return format;
- }
-
- if (err != OK || convertMetaDataToMessage(meta, &format) != OK) {
- return NULL;
- }
- return format;
-}
-
-status_t NuPlayer2::HTTPLiveSource2::feedMoreTSData() {
- return OK;
-}
-
-status_t NuPlayer2::HTTPLiveSource2::dequeueAccessUnit(
- bool audio, sp<ABuffer> *accessUnit) {
- return mLiveSession->dequeueAccessUnit(
- audio ? LiveSession::STREAMTYPE_AUDIO
- : LiveSession::STREAMTYPE_VIDEO,
- accessUnit);
-}
-
-status_t NuPlayer2::HTTPLiveSource2::getDuration(int64_t *durationUs) {
- return mLiveSession->getDuration(durationUs);
-}
-
-size_t NuPlayer2::HTTPLiveSource2::getTrackCount() const {
- return mLiveSession->getTrackCount();
-}
-
-sp<AMessage> NuPlayer2::HTTPLiveSource2::getTrackInfo(size_t trackIndex) const {
- return mLiveSession->getTrackInfo(trackIndex);
-}
-
-ssize_t NuPlayer2::HTTPLiveSource2::getSelectedTrack(media_track_type type) const {
- if (mLiveSession == NULL) {
- return -1;
- } else if (type == MEDIA_TRACK_TYPE_METADATA) {
- // MEDIA_TRACK_TYPE_METADATA is always last track
- // mMetadataSelected can only be true when mHasMetadata is true
- return mMetadataSelected ? (mLiveSession->getTrackCount() - 1) : -1;
- } else {
- return mLiveSession->getSelectedTrack(type);
- }
-}
-
-status_t NuPlayer2::HTTPLiveSource2::selectTrack(size_t trackIndex, bool select, int64_t /*timeUs*/) {
- if (mLiveSession == NULL) {
- return INVALID_OPERATION;
- }
-
- status_t err = INVALID_OPERATION;
- bool postFetchMsg = false, isSub = false;
- if (!mHasMetadata || trackIndex != mLiveSession->getTrackCount() - 1) {
- err = mLiveSession->selectTrack(trackIndex, select);
- postFetchMsg = select;
- isSub = true;
- } else {
- // metadata track; i.e. (mHasMetadata && trackIndex == mLiveSession->getTrackCount() - 1)
- if (mMetadataSelected && !select) {
- err = OK;
- } else if (!mMetadataSelected && select) {
- postFetchMsg = true;
- err = OK;
- } else {
- err = BAD_VALUE; // behave as LiveSession::selectTrack
- }
-
- mMetadataSelected = select;
- }
-
- if (err == OK) {
- int32_t &generation = isSub ? mFetchSubtitleDataGeneration : mFetchMetaDataGeneration;
- generation++;
- if (postFetchMsg) {
- int32_t what = isSub ? kWhatFetchSubtitleData : kWhatFetchMetaData;
- sp<AMessage> msg = new AMessage(what, this);
- msg->setInt32("generation", generation);
- msg->post();
- }
- }
-
- // LiveSession::selectTrack returns BAD_VALUE when selecting the currently
- // selected track, or unselecting a non-selected track. In this case it's an
- // no-op so we return OK.
- return (err == OK || err == BAD_VALUE) ? (status_t)OK : err;
-}
-
-status_t NuPlayer2::HTTPLiveSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
- if (mLiveSession->isSeekable()) {
- return mLiveSession->seekTo(seekTimeUs, mode);
- } else {
- return INVALID_OPERATION;
- }
-}
-
-void NuPlayer2::HTTPLiveSource2::pollForRawData(
- const sp<AMessage> &msg, int32_t currentGeneration,
- LiveSession::StreamType fetchType, int32_t pushWhat) {
-
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != currentGeneration) {
- return;
- }
-
- sp<ABuffer> buffer;
- while (mLiveSession->dequeueAccessUnit(fetchType, &buffer) == OK) {
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", pushWhat);
- notify->setBuffer("buffer", buffer);
-
- int64_t timeUs, baseUs, delayUs;
- CHECK(buffer->meta()->findInt64("baseUs", &baseUs));
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
- delayUs = baseUs + timeUs - ALooper::GetNowUs();
-
- if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
- notify->post();
- msg->post(delayUs > 0LL ? delayUs : 0LL);
- return;
- } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
- if (delayUs < -1000000LL) { // 1 second
- continue;
- }
- notify->post();
- // push all currently available metadata buffers in each invocation of pollForRawData
- // continue;
- } else {
- TRESPASS();
- }
- }
-
- // try again in 1 second
- msg->post(1000000LL);
-}
-
-void NuPlayer2::HTTPLiveSource2::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatSessionNotify:
- {
- onSessionNotify(msg);
- break;
- }
-
- case kWhatFetchSubtitleData:
- {
- pollForRawData(
- msg, mFetchSubtitleDataGeneration,
- /* fetch */ LiveSession::STREAMTYPE_SUBTITLES,
- /* push */ kWhatSubtitleData);
-
- break;
- }
-
- case kWhatFetchMetaData:
- {
- if (!mMetadataSelected) {
- break;
- }
-
- pollForRawData(
- msg, mFetchMetaDataGeneration,
- /* fetch */ LiveSession::STREAMTYPE_METADATA,
- /* push */ kWhatTimedMetaData);
-
- break;
- }
-
- default:
- Source::onMessageReceived(msg);
- break;
- }
-}
-
-void NuPlayer2::HTTPLiveSource2::onSessionNotify(const sp<AMessage> &msg) {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- switch (what) {
- case LiveSession::kWhatPrepared:
- {
- // notify the current size here if we have it, otherwise report an initial size of (0,0)
- sp<AMessage> format = getFormat(false /* audio */);
- int32_t width;
- int32_t height;
- if (format != NULL &&
- format->findInt32("width", &width) && format->findInt32("height", &height)) {
- notifyVideoSizeChanged(format);
- } else {
- notifyVideoSizeChanged();
- }
-
- uint32_t flags = 0;
- if (mLiveSession->isSeekable()) {
- flags |= FLAG_CAN_PAUSE;
- flags |= FLAG_CAN_SEEK;
- flags |= FLAG_CAN_SEEK_BACKWARD;
- flags |= FLAG_CAN_SEEK_FORWARD;
- }
-
- if (mLiveSession->hasDynamicDuration()) {
- flags |= FLAG_DYNAMIC_DURATION;
- }
-
- notifyFlagsChanged(flags);
-
- notifyPrepared();
- break;
- }
-
- case LiveSession::kWhatPreparationFailed:
- {
- status_t err;
- CHECK(msg->findInt32("err", &err));
-
- notifyPrepared(err);
- break;
- }
-
- case LiveSession::kWhatStreamsChanged:
- {
- uint32_t changedMask;
- CHECK(msg->findInt32(
- "changedMask", (int32_t *)&changedMask));
-
- bool audio = changedMask & LiveSession::STREAMTYPE_AUDIO;
- bool video = changedMask & LiveSession::STREAMTYPE_VIDEO;
-
- sp<AMessage> reply;
- CHECK(msg->findMessage("reply", &reply));
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatQueueDecoderShutdown);
- notify->setInt32("audio", audio);
- notify->setInt32("video", video);
- notify->setMessage("reply", reply);
- notify->post();
- break;
- }
-
- case LiveSession::kWhatBufferingStart:
- {
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatPauseOnBufferingStart);
- notify->post();
- break;
- }
-
- case LiveSession::kWhatBufferingEnd:
- {
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatResumeOnBufferingEnd);
- notify->post();
- break;
- }
-
-
- case LiveSession::kWhatBufferingUpdate:
- {
- sp<AMessage> notify = dupNotify();
- int32_t percentage;
- CHECK(msg->findInt32("percentage", &percentage));
- notify->setInt32("what", kWhatBufferingUpdate);
- notify->setInt32("percentage", percentage);
- notify->post();
- break;
- }
-
- case LiveSession::kWhatMetadataDetected:
- {
- if (!mHasMetadata) {
- mHasMetadata = true;
-
- sp<AMessage> notify = dupNotify();
- // notification without buffer triggers MEDIA2_INFO_METADATA_UPDATE
- notify->setInt32("what", kWhatTimedMetaData);
- notify->post();
- }
- break;
- }
-
- case LiveSession::kWhatError:
- {
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-} // namespace android
-
diff --git a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h b/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h
deleted file mode 100644
index 8fc71e2..0000000
--- a/media/libmediaplayer2/nuplayer2/HTTPLiveSource2.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef HTTP_LIVE_SOURCE2_H_
-
-#define HTTP_LIVE_SOURCE2_H_
-
-#include "NuPlayer2.h"
-#include "NuPlayer2Source.h"
-
-#include "LiveSession.h"
-
-namespace android {
-
-struct LiveSession;
-
-struct NuPlayer2::HTTPLiveSource2 : public NuPlayer2::Source {
- HTTPLiveSource2(
- const sp<AMessage> ¬ify,
- const sp<MediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers);
-
- virtual status_t getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) override;
- virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
-
- virtual void prepareAsync(int64_t startTimeUs);
- virtual void start();
-
- virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
- virtual sp<MetaData> getFormatMeta(bool audio);
- virtual sp<AMessage> getFormat(bool audio);
-
- virtual status_t feedMoreTSData();
- virtual status_t getDuration(int64_t *durationUs);
- virtual size_t getTrackCount() const;
- virtual sp<AMessage> getTrackInfo(size_t trackIndex) const;
- virtual ssize_t getSelectedTrack(media_track_type /* type */) const;
- virtual status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
- virtual status_t seekTo(
- int64_t seekTimeUs,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
-
-protected:
- virtual ~HTTPLiveSource2();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
- enum Flags {
- // Don't log any URLs.
- kFlagIncognito = 1,
- };
-
- enum {
- kWhatSessionNotify,
- kWhatFetchSubtitleData,
- kWhatFetchMetaData,
- };
-
- sp<MediaHTTPService> mHTTPService;
- AString mURL;
- KeyedVector<String8, String8> mExtraHeaders;
- uint32_t mFlags;
- status_t mFinalResult;
- off64_t mOffset;
- sp<ALooper> mLiveLooper;
- sp<LiveSession> mLiveSession;
- int32_t mFetchSubtitleDataGeneration;
- int32_t mFetchMetaDataGeneration;
- bool mHasMetadata;
- bool mMetadataSelected;
- BufferingSettings mBufferingSettings;
-
- void onSessionNotify(const sp<AMessage> &msg);
- void pollForRawData(
- const sp<AMessage> &msg, int32_t currentGeneration,
- LiveSession::StreamType fetchType, int32_t pushWhat);
-
- DISALLOW_EVIL_CONSTRUCTORS(HTTPLiveSource2);
-};
-
-} // namespace android
-
-#endif // HTTP_LIVE_SOURCE2_H_
diff --git a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
deleted file mode 100644
index 89703de..0000000
--- a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright 2018, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "JMediaPlayer2Utils"
-
-#include "JMediaPlayer2Utils.h"
-#include <mediaplayer2/JavaVMHelper.h>
-
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/Utils.h>
-#include <utils/Log.h>
-
-#include "log/log.h"
-
-namespace android {
-
-static const int64_t kOffloadMinDurationSec = 60;
-
-// static
-bool JMediaPlayer2Utils::isOffloadedAudioPlaybackSupported(
- const sp<MetaData>& meta, bool hasVideo, bool isStreaming, audio_stream_type_t streamType)
-{
- if (hasVideo || streamType != AUDIO_STREAM_MUSIC) {
- return false;
- }
-
- audio_offload_info_t info = AUDIO_INFO_INITIALIZER;
- if (OK != getAudioOffloadInfo(meta, hasVideo, isStreaming, streamType, &info)) {
- return false;
- }
-
- if (info.duration_us < kOffloadMinDurationSec * 1000000) {
- return false;
- }
-
- int32_t audioFormat = audioFormatFromNative(info.format);
- int32_t channelMask = outChannelMaskFromNative(info.channel_mask);
- if (audioFormat == ENCODING_INVALID || channelMask == CHANNEL_INVALID) {
- return false;
- }
-
- JNIEnv* env = JavaVMHelper::getJNIEnv();
- jclass jMP2UtilsCls = env->FindClass("android/media/MediaPlayer2Utils");
- jmethodID jSetAudioOutputDeviceById = env->GetStaticMethodID(
- jMP2UtilsCls, "isOffloadedAudioPlaybackSupported", "(III)Z");
- jboolean result = env->CallStaticBooleanMethod(
- jMP2UtilsCls, jSetAudioOutputDeviceById, audioFormat, info.sample_rate, channelMask);
- return result;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.h b/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.h
deleted file mode 100644
index fcbd43c..0000000
--- a/media/libmediaplayer2/nuplayer2/JMediaPlayer2Utils.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright 2018, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _J_MEDIAPLAYER2_UTILS2_H_
-#define _J_MEDIAPLAYER2_UTILS2_H_
-
-#include <media/stagefright/MetaData.h>
-
-#include "jni.h"
-#include "android_media_AudioFormat.h"
-
-namespace android {
-
-struct JMediaPlayer2Utils {
- static bool isOffloadedAudioPlaybackSupported(
- const sp<MetaData>& meta, bool hasVideo, bool isStreaming,
- audio_stream_type_t streamType);
-};
-
-} // namespace android
-
-#endif // _J_MEDIAPLAYER2_UTILS2_H_
diff --git a/media/libmediaplayer2/nuplayer2/JWakeLock.cpp b/media/libmediaplayer2/nuplayer2/JWakeLock.cpp
deleted file mode 100644
index 983d77e..0000000
--- a/media/libmediaplayer2/nuplayer2/JWakeLock.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "JWakeLock"
-#include <utils/Log.h>
-
-#include "JWakeLock.h"
-
-#include <media/stagefright/foundation/ADebug.h>
-
-namespace android {
-
-JWakeLock::JWakeLock(const sp<JObjectHolder> &context) :
- mWakeLockCount(0),
- mWakeLock(NULL),
- mContext(context) {}
-
-JWakeLock::~JWakeLock() {
- clearJavaWakeLock();
-}
-
-bool JWakeLock::acquire() {
- if (mWakeLockCount == 0) {
- if (mWakeLock == NULL) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jContextCls = env->FindClass("android/content/Context");
- jclass jPowerManagerCls = env->FindClass("android/os/PowerManager");
-
- jmethodID jGetSystemService = env->GetMethodID(jContextCls,
- "getSystemService", "(Ljava/lang/String;)Ljava/lang/Object;");
- jobject javaPowerManagerObj = env->CallObjectMethod(mContext->getJObject(),
- jGetSystemService, env->NewStringUTF("power"));
-
- jfieldID jPARTIAL_WAKE_LOCK = env->GetStaticFieldID(jPowerManagerCls,
- "PARTIAL_WAKE_LOCK", "I");
- jint PARTIAL_WAKE_LOCK = env->GetStaticIntField(jPowerManagerCls, jPARTIAL_WAKE_LOCK);
-
- jmethodID jNewWakeLock = env->GetMethodID(jPowerManagerCls,
- "newWakeLock", "(ILjava/lang/String;)Landroid/os/PowerManager$WakeLock;");
- jobject javaWakeLock = env->CallObjectMethod(javaPowerManagerObj,
- jNewWakeLock, PARTIAL_WAKE_LOCK, env->NewStringUTF("JWakeLock"));
- mWakeLock = new JObjectHolder(javaWakeLock);
- env->DeleteLocalRef(javaPowerManagerObj);
- env->DeleteLocalRef(javaWakeLock);
- }
- if (mWakeLock != NULL) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass wakeLockCls = env->FindClass("android/os/PowerManager$WakeLock");
- jmethodID jAcquire = env->GetMethodID(wakeLockCls, "acquire", "()V");
- env->CallVoidMethod(mWakeLock->getJObject(), jAcquire);
- mWakeLockCount++;
- return true;
- }
- } else {
- mWakeLockCount++;
- return true;
- }
- return false;
-}
-
-void JWakeLock::release(bool force) {
- if (mWakeLockCount == 0) {
- return;
- }
- if (force) {
- // Force wakelock release below by setting reference count to 1.
- mWakeLockCount = 1;
- }
- if (--mWakeLockCount == 0) {
- if (mWakeLock != NULL) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass wakeLockCls = env->FindClass("android/os/PowerManager$WakeLock");
- jmethodID jRelease = env->GetMethodID(wakeLockCls, "release", "()V");
- env->CallVoidMethod(mWakeLock->getJObject(), jRelease);
- }
- }
-}
-
-void JWakeLock::clearJavaWakeLock() {
- release(true);
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/JWakeLock.h b/media/libmediaplayer2/nuplayer2/JWakeLock.h
deleted file mode 100644
index 36c542e..0000000
--- a/media/libmediaplayer2/nuplayer2/JWakeLock.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef J_WAKELOCK_H_
-#define J_WAKELOCK_H_
-
-#include <media/stagefright/foundation/ABase.h>
-#include <mediaplayer2/JObjectHolder.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-class JWakeLock : public RefBase {
-
-public:
- JWakeLock(const sp<JObjectHolder> &context);
-
- // NOTE: acquire and release are not thread safe
-
- // returns true if wakelock was acquired
- bool acquire();
- void release(bool force = false);
-
- virtual ~JWakeLock();
-
-private:
- uint32_t mWakeLockCount;
- sp<JObjectHolder> mWakeLock;
- const sp<JObjectHolder> mContext;
-
- void clearJavaWakeLock();
-
- DISALLOW_EVIL_CONSTRUCTORS(JWakeLock);
-};
-
-} // namespace android
-
-#endif // J_WAKELOCK_H_
diff --git a/media/libmediaplayer2/nuplayer2/MODULE_LICENSE_APACHE2 b/media/libmediaplayer2/nuplayer2/MODULE_LICENSE_APACHE2
deleted file mode 100644
index e69de29..0000000
--- a/media/libmediaplayer2/nuplayer2/MODULE_LICENSE_APACHE2
+++ /dev/null
diff --git a/media/libmediaplayer2/nuplayer2/NOTICE b/media/libmediaplayer2/nuplayer2/NOTICE
deleted file mode 100644
index c5b1efa..0000000
--- a/media/libmediaplayer2/nuplayer2/NOTICE
+++ /dev/null
@@ -1,190 +0,0 @@
-
- Copyright (c) 2005-2008, The Android Open Source Project
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
deleted file mode 100644
index d608d4a..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ /dev/null
@@ -1,3308 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2"
-
-#include <inttypes.h>
-
-#include <utils/Log.h>
-
-#include "NuPlayer2.h"
-
-#include "HTTPLiveSource2.h"
-#include "JMediaPlayer2Utils.h"
-#include "NuPlayer2CCDecoder.h"
-#include "NuPlayer2Decoder.h"
-#include "NuPlayer2DecoderBase.h"
-#include "NuPlayer2DecoderPassThrough.h"
-#include "NuPlayer2Driver.h"
-#include "NuPlayer2Renderer.h"
-#include "NuPlayer2Source.h"
-#include "RTSPSource2.h"
-#include "GenericSource2.h"
-#include "TextDescriptions2.h"
-
-#include "ATSParser.h"
-
-#include <cutils/properties.h>
-
-#include <media/AudioParameter.h>
-#include <media/AudioResamplerPublic.h>
-#include <media/AVSyncSettings.h>
-#include <media/DataSourceDesc.h>
-#include <media/MediaCodecBuffer.h>
-#include <media/NdkWrapper.h>
-
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaClock.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MetaData.h>
-
-#include "ESDS.h"
-#include <media/stagefright/Utils.h>
-
-#include <system/window.h>
-
-namespace android {
-
-static status_t sendMetaDataToHal(sp<MediaPlayer2Interface::AudioSink>& sink,
- const sp<MetaData>& meta) {
- int32_t sampleRate = 0;
- int32_t bitRate = 0;
- int32_t channelMask = 0;
- int32_t delaySamples = 0;
- int32_t paddingSamples = 0;
-
- AudioParameter param = AudioParameter();
-
- if (meta->findInt32(kKeySampleRate, &sampleRate)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_SAMPLE_RATE), sampleRate);
- }
- if (meta->findInt32(kKeyChannelMask, &channelMask)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_NUM_CHANNEL), channelMask);
- }
- if (meta->findInt32(kKeyBitRate, &bitRate)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
- }
- if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
- }
- if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
- param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
- }
-
- ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
- "delaySample %d, paddingSample %d", bitRate, sampleRate,
- channelMask, delaySamples, paddingSamples);
-
- sink->setParameters(param.toString());
- return OK;
-}
-
-
-struct NuPlayer2::Action : public RefBase {
- Action() {}
-
- virtual void execute(NuPlayer2 *player) = 0;
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(Action);
-};
-
-struct NuPlayer2::SeekAction : public Action {
- explicit SeekAction(int64_t seekTimeUs, MediaPlayer2SeekMode mode)
- : mSeekTimeUs(seekTimeUs),
- mMode(mode) {
- }
-
- virtual void execute(NuPlayer2 *player) {
- player->performSeek(mSeekTimeUs, mMode);
- }
-
-private:
- int64_t mSeekTimeUs;
- MediaPlayer2SeekMode mMode;
-
- DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
-};
-
-struct NuPlayer2::ResumeDecoderAction : public Action {
- explicit ResumeDecoderAction(bool needNotify)
- : mNeedNotify(needNotify) {
- }
-
- virtual void execute(NuPlayer2 *player) {
- player->performResumeDecoders(mNeedNotify);
- }
-
-private:
- bool mNeedNotify;
-
- DISALLOW_EVIL_CONSTRUCTORS(ResumeDecoderAction);
-};
-
-struct NuPlayer2::SetSurfaceAction : public Action {
- explicit SetSurfaceAction(const sp<ANativeWindowWrapper> &nww)
- : mNativeWindow(nww) {
- }
-
- virtual void execute(NuPlayer2 *player) {
- player->performSetSurface(mNativeWindow);
- }
-
-private:
- sp<ANativeWindowWrapper> mNativeWindow;
-
- DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
-};
-
-struct NuPlayer2::FlushDecoderAction : public Action {
- FlushDecoderAction(FlushCommand audio, FlushCommand video)
- : mAudio(audio),
- mVideo(video) {
- }
-
- virtual void execute(NuPlayer2 *player) {
- player->performDecoderFlush(mAudio, mVideo);
- }
-
-private:
- FlushCommand mAudio;
- FlushCommand mVideo;
-
- DISALLOW_EVIL_CONSTRUCTORS(FlushDecoderAction);
-};
-
-struct NuPlayer2::PostMessageAction : public Action {
- explicit PostMessageAction(const sp<AMessage> &msg)
- : mMessage(msg) {
- }
-
- virtual void execute(NuPlayer2 *) {
- mMessage->post();
- }
-
-private:
- sp<AMessage> mMessage;
-
- DISALLOW_EVIL_CONSTRUCTORS(PostMessageAction);
-};
-
-// Use this if there's no state necessary to save in order to execute
-// the action.
-struct NuPlayer2::SimpleAction : public Action {
- typedef void (NuPlayer2::*ActionFunc)();
-
- explicit SimpleAction(ActionFunc func)
- : mFunc(func) {
- }
-
- virtual void execute(NuPlayer2 *player) {
- (player->*mFunc)();
- }
-
-private:
- ActionFunc mFunc;
-
- DISALLOW_EVIL_CONSTRUCTORS(SimpleAction);
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-NuPlayer2::NuPlayer2(
- pid_t pid, uid_t uid, const sp<MediaClock> &mediaClock, const sp<JObjectHolder> &context)
- : mPID(pid),
- mUID(uid),
- mMediaClock(mediaClock),
- mOffloadAudio(false),
- mAudioDecoderGeneration(0),
- mVideoDecoderGeneration(0),
- mRendererGeneration(0),
- mEOSMonitorGeneration(0),
- mLastStartedPlayingTimeNs(0),
- mPreviousSeekTimeUs(0),
- mAudioEOS(false),
- mVideoEOS(false),
- mScanSourcesPending(false),
- mScanSourcesGeneration(0),
- mPollDurationGeneration(0),
- mTimedTextGeneration(0),
- mFlushingAudio(NONE),
- mFlushingVideo(NONE),
- mResumePending(false),
- mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
- mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
- mVideoFpsHint(-1.f),
- mStarted(false),
- mPrepared(false),
- mResetting(false),
- mSourceStarted(false),
- mAudioDecoderError(false),
- mVideoDecoderError(false),
- mPaused(false),
- mPausedByClient(true),
- mPausedForBuffering(false),
- mContext(context) {
- CHECK(mediaClock != NULL);
- clearFlushComplete();
-}
-
-NuPlayer2::~NuPlayer2() {
-}
-
-void NuPlayer2::setDriver(const wp<NuPlayer2Driver> &driver) {
- mDriver = driver;
-}
-
-static bool IsHTTPLiveURL(const char *url) {
- if (!strncasecmp("http://", url, 7)
- || !strncasecmp("https://", url, 8)
- || !strncasecmp("file://", url, 7)) {
- size_t len = strlen(url);
- if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) {
- return true;
- }
-
- if (strstr(url,"m3u8")) {
- return true;
- }
- }
-
- return false;
-}
-
-status_t NuPlayer2::createNuPlayer2Source(const sp<DataSourceDesc> &dsd,
- sp<Source> *source,
- DATA_SOURCE_TYPE *dataSourceType) {
- status_t err = NO_ERROR;
- sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
- notify->setInt64("srcId", dsd->mId);
-
- switch (dsd->mType) {
- case DataSourceDesc::TYPE_URL:
- {
- const char *url = dsd->mUrl.c_str();
- size_t len = strlen(url);
-
- const sp<MediaHTTPService> &httpService = dsd->mHttpService;
- KeyedVector<String8, String8> *headers = &(dsd->mHeaders);
-
- if (IsHTTPLiveURL(url)) {
- *source = new HTTPLiveSource2(notify, httpService, url, headers);
- ALOGV("createNuPlayer2Source HTTPLiveSource2 %s", url);
- *dataSourceType = DATA_SOURCE_TYPE_HTTP_LIVE;
- } else if (!strncasecmp(url, "rtsp://", 7)) {
- *source = new RTSPSource2(
- notify, httpService, url, headers, mUID);
- ALOGV("createNuPlayer2Source RTSPSource2 %s", url);
- *dataSourceType = DATA_SOURCE_TYPE_RTSP;
- } else if ((!strncasecmp(url, "http://", 7)
- || !strncasecmp(url, "https://", 8))
- && ((len >= 4 && !strcasecmp(".sdp", &url[len - 4]))
- || strstr(url, ".sdp?"))) {
- *source = new RTSPSource2(
- notify, httpService, url, headers, mUID, true);
- ALOGV("createNuPlayer2Source RTSPSource2 http/https/.sdp %s", url);
- *dataSourceType = DATA_SOURCE_TYPE_RTSP;
- } else {
- ALOGV("createNuPlayer2Source GenericSource2 %s", url);
-
- sp<GenericSource2> genericSource =
- new GenericSource2(notify, mUID, mMediaClock);
-
- err = genericSource->setDataSource(url, headers);
-
- if (err == OK) {
- *source = genericSource;
- } else {
- *source = NULL;
- ALOGE("Failed to create NuPlayer2Source!");
- }
-
- // regardless of success/failure
- *dataSourceType = DATA_SOURCE_TYPE_GENERIC_URL;
- }
- break;
- }
-
- case DataSourceDesc::TYPE_FD:
- {
- sp<GenericSource2> genericSource =
- new GenericSource2(notify, mUID, mMediaClock);
-
- ALOGV("createNuPlayer2Source fd %d/%lld/%lld source: %p",
- dsd->mFD, (long long)dsd->mFDOffset, (long long)dsd->mFDLength,
- genericSource.get());
-
- err = genericSource->setDataSource(dsd->mFD, dsd->mFDOffset, dsd->mFDLength);
-
- if (err != OK) {
- ALOGE("Failed to create NuPlayer2Source!");
- *source = NULL;
- } else {
- *source = genericSource;
- }
-
- *dataSourceType = DATA_SOURCE_TYPE_GENERIC_FD;
- break;
- }
-
- case DataSourceDesc::TYPE_CALLBACK:
- {
- sp<GenericSource2> genericSource =
- new GenericSource2(notify, mUID, mMediaClock);
- err = genericSource->setDataSource(dsd->mCallbackSource);
-
- if (err != OK) {
- ALOGE("Failed to create NuPlayer2Source!");
- *source = NULL;
- } else {
- *source = genericSource;
- }
-
- *dataSourceType = DATA_SOURCE_TYPE_MEDIA;
- break;
- }
-
- default:
- err = BAD_TYPE;
- *source = NULL;
- *dataSourceType = DATA_SOURCE_TYPE_NONE;
- ALOGE("invalid data source type!");
- break;
- }
-
- return err;
-}
-
-void NuPlayer2::setDataSourceAsync(const sp<DataSourceDesc> &dsd) {
- DATA_SOURCE_TYPE dataSourceType;
- sp<Source> source;
- createNuPlayer2Source(dsd, &source, &dataSourceType);
-
- // TODO: currently NuPlayer2Driver makes blocking call to setDataSourceAsync
- // and expects notifySetDataSourceCompleted regardless of success or failure.
- // This will be changed since setDataSource should be asynchronous at JAVA level.
- // When it succeeds, app will get onInfo notification. Otherwise, onError
- // will be called.
- /*
- if (err != OK) {
- notifyListener(dsd->mId, MEDIA2_ERROR, MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE, err);
- return;
- }
-
- // Now, source != NULL.
- */
-
- mCurrentSourceInfo.mDataSourceType = dataSourceType;
-
- sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
- msg->setObject("source", source);
- msg->setInt64("srcId", dsd->mId);
- msg->setInt64("startTimeUs", dsd->mStartPositionMs * 1000);
- msg->setInt64("endTimeUs", dsd->mEndPositionMs * 1000);
- msg->post();
-}
-
-void NuPlayer2::prepareNextDataSourceAsync(const sp<DataSourceDesc> &dsd) {
- DATA_SOURCE_TYPE dataSourceType;
- sp<Source> source;
- createNuPlayer2Source(dsd, &source, &dataSourceType);
-
- /*
- if (err != OK) {
- notifyListener(dsd->mId, MEDIA2_ERROR, MEDIA2_ERROR_FAILED_TO_SET_DATA_SOURCE, err);
- return;
- }
-
- // Now, source != NULL.
- */
-
- mNextSourceInfo.mDataSourceType = dataSourceType;
-
- sp<AMessage> msg = new AMessage(kWhatPrepareNextDataSource, this);
- msg->setObject("source", source);
- msg->setInt64("srcId", dsd->mId);
- msg->setInt64("startTimeUs", dsd->mStartPositionMs * 1000);
- msg->setInt64("endTimeUs", dsd->mEndPositionMs * 1000);
- msg->post();
-}
-
-void NuPlayer2::playNextDataSource(int64_t srcId) {
- disconnectSource();
-
- sp<AMessage> msg = new AMessage(kWhatPlayNextDataSource, this);
- msg->setInt64("srcId", srcId);
- msg->post();
-}
-
-status_t NuPlayer2::getBufferingSettings(
- BufferingSettings *buffering /* nonnull */) {
- sp<AMessage> msg = new AMessage(kWhatGetBufferingSettings, this);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- if (err == OK) {
- readFromAMessage(response, buffering);
- }
- }
- return err;
-}
-
-status_t NuPlayer2::setBufferingSettings(const BufferingSettings& buffering) {
- sp<AMessage> msg = new AMessage(kWhatSetBufferingSettings, this);
- writeToAMessage(msg, buffering);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-void NuPlayer2::prepareAsync() {
- ALOGV("prepareAsync");
-
- (new AMessage(kWhatPrepare, this))->post();
-}
-
-void NuPlayer2::setVideoSurfaceTextureAsync(const sp<ANativeWindowWrapper> &nww) {
- sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
-
- if (nww == NULL || nww->getANativeWindow() == NULL) {
- msg->setObject("surface", NULL);
- } else {
- msg->setObject("surface", nww);
- }
-
- msg->post();
-}
-
-void NuPlayer2::setAudioSink(const sp<MediaPlayer2Interface::AudioSink> &sink) {
- sp<AMessage> msg = new AMessage(kWhatSetAudioSink, this);
- msg->setObject("sink", sink);
- msg->post();
-}
-
-void NuPlayer2::start() {
- (new AMessage(kWhatStart, this))->post();
-}
-
-status_t NuPlayer2::setPlaybackSettings(const AudioPlaybackRate &rate) {
- // do some cursory validation of the settings here. audio modes are
- // only validated when set on the audiosink.
- if (rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN
- || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
- || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
- || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
- return BAD_VALUE;
- }
- sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
- writeToAMessage(msg, rate);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-status_t NuPlayer2::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
- sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- if (err == OK) {
- readFromAMessage(response, rate);
- }
- }
- return err;
-}
-
-status_t NuPlayer2::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
- sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
- writeToAMessage(msg, sync, videoFpsHint);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-status_t NuPlayer2::getSyncSettings(
- AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
- sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- if (err == OK) {
- readFromAMessage(response, sync, videoFps);
- }
- }
- return err;
-}
-
-void NuPlayer2::pause() {
- (new AMessage(kWhatPause, this))->post();
-}
-
-void NuPlayer2::resetAsync() {
- disconnectSource();
- (new AMessage(kWhatReset, this))->post();
-}
-
-void NuPlayer2::disconnectSource() {
- sp<Source> source;
- {
- Mutex::Autolock autoLock(mSourceLock);
- source = mCurrentSourceInfo.mSource;
- }
-
- if (source != NULL) {
- // During a reset, the data source might be unresponsive already, we need to
- // disconnect explicitly so that reads exit promptly.
- // We can't queue the disconnect request to the looper, as it might be
- // queued behind a stuck read and never gets processed.
- // Doing a disconnect outside the looper to allows the pending reads to exit
- // (either successfully or with error).
- source->disconnect();
- }
-
-}
-
-status_t NuPlayer2::notifyAt(int64_t mediaTimeUs) {
- sp<AMessage> notify = new AMessage(kWhatNotifyTime, this);
- notify->setInt64("timerUs", mediaTimeUs);
- mMediaClock->addTimer(notify, mediaTimeUs);
- return OK;
-}
-
-void NuPlayer2::seekToAsync(int64_t seekTimeUs, MediaPlayer2SeekMode mode, bool needNotify) {
- sp<AMessage> msg = new AMessage(kWhatSeek, this);
- msg->setInt64("seekTimeUs", seekTimeUs);
- msg->setInt32("mode", mode);
- msg->setInt32("needNotify", needNotify);
- msg->post();
-}
-
-void NuPlayer2::rewind() {
- sp<AMessage> msg = new AMessage(kWhatRewind, this);
- msg->post();
-}
-
-void NuPlayer2::writeTrackInfo(
- PlayerMessage* reply, const sp<AMessage>& format) const {
- if (format == NULL) {
- ALOGE("NULL format");
- return;
- }
- int32_t trackType;
- if (!format->findInt32("type", &trackType)) {
- ALOGE("no track type");
- return;
- }
-
- AString mime;
- if (!format->findString("mime", &mime)) {
- // Java MediaPlayer only uses mimetype for subtitle and timedtext tracks.
- // If we can't find the mimetype here it means that we wouldn't be needing
- // the mimetype on the Java end. We still write a placeholder mime to keep the
- // (de)serialization logic simple.
- if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
- mime = "audio/";
- } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
- mime = "video/";
- } else {
- ALOGE("unknown track type: %d", trackType);
- return;
- }
- }
-
- AString lang;
- if (!format->findString("language", &lang)) {
- ALOGE("no language");
- return;
- }
-
- reply->add_values()->set_int32_value(trackType);
- reply->add_values()->set_string_value(mime.c_str());
- reply->add_values()->set_string_value(lang.c_str());
-
- if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
- int32_t isAuto, isDefault, isForced;
- CHECK(format->findInt32("auto", &isAuto));
- CHECK(format->findInt32("default", &isDefault));
- CHECK(format->findInt32("forced", &isForced));
-
- reply->add_values()->set_int32_value(isAuto);
- reply->add_values()->set_int32_value(isDefault);
- reply->add_values()->set_int32_value(isForced);
- }
-}
-
-void NuPlayer2::onMessageReceived(const sp<AMessage> &msg) {
-
- switch (msg->what()) {
- case kWhatSetDataSource:
- {
- ALOGV("kWhatSetDataSource");
-
- CHECK(mCurrentSourceInfo.mSource == NULL);
-
- status_t err = OK;
- sp<RefBase> obj;
- CHECK(msg->findObject("source", &obj));
- if (obj != NULL) {
- Mutex::Autolock autoLock(mSourceLock);
- CHECK(msg->findInt64("srcId", &mCurrentSourceInfo.mSrcId));
- CHECK(msg->findInt64("startTimeUs", &mCurrentSourceInfo.mStartTimeUs));
- CHECK(msg->findInt64("endTimeUs", &mCurrentSourceInfo.mEndTimeUs));
- mCurrentSourceInfo.mSource = static_cast<Source *>(obj.get());
- } else {
- err = UNKNOWN_ERROR;
- ALOGE("kWhatSetDataSource, source should not be NULL");
- }
-
- CHECK(mDriver != NULL);
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifySetDataSourceCompleted(mCurrentSourceInfo.mSrcId, err);
- }
- break;
- }
-
- case kWhatPrepareNextDataSource:
- {
- ALOGV("kWhatPrepareNextDataSource");
-
- status_t err = OK;
- sp<RefBase> obj;
- CHECK(msg->findObject("source", &obj));
- if (obj != NULL) {
- Mutex::Autolock autoLock(mSourceLock);
- CHECK(msg->findInt64("srcId", &mNextSourceInfo.mSrcId));
- CHECK(msg->findInt64("startTimeUs", &mNextSourceInfo.mStartTimeUs));
- CHECK(msg->findInt64("endTimeUs", &mNextSourceInfo.mEndTimeUs));
- mNextSourceInfo.mSource = static_cast<Source *>(obj.get());
- mNextSourceInfo.mSource->prepareAsync(mNextSourceInfo.mStartTimeUs);
- } else {
- err = UNKNOWN_ERROR;
- }
-
- break;
- }
-
- case kWhatPlayNextDataSource:
- {
- ALOGV("kWhatPlayNextDataSource");
- int64_t srcId;
- CHECK(msg->findInt64("srcId", &srcId));
- if (srcId != mNextSourceInfo.mSrcId) {
- notifyListener(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, 0);
- return;
- }
-
- mResetting = true;
- stopPlaybackTimer("kWhatPlayNextDataSource");
- stopRebufferingTimer(true);
-
- mDeferredActions.push_back(
- new FlushDecoderAction(
- FLUSH_CMD_SHUTDOWN /* audio */,
- FLUSH_CMD_SHUTDOWN /* video */));
-
- mDeferredActions.push_back(
- new SimpleAction(&NuPlayer2::performPlayNextDataSource));
-
- processDeferredActions();
- break;
- }
-
- case kWhatEOSMonitor:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- int32_t reason;
- CHECK(msg->findInt32("reason", &reason));
-
- if (generation != mEOSMonitorGeneration || reason != MediaClock::TIMER_REASON_REACHED) {
- break; // stale or reset
- }
-
- ALOGV("kWhatEOSMonitor");
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PLAYBACK_COMPLETE, 0, 0);
- break;
- }
-
- case kWhatGetBufferingSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- ALOGV("kWhatGetBufferingSettings");
- BufferingSettings buffering;
- status_t err = OK;
- if (mCurrentSourceInfo.mSource != NULL) {
- err = mCurrentSourceInfo.mSource->getBufferingSettings(&buffering);
- } else {
- err = INVALID_OPERATION;
- }
- sp<AMessage> response = new AMessage;
- if (err == OK) {
- writeToAMessage(response, buffering);
- }
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatSetBufferingSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- ALOGV("kWhatSetBufferingSettings");
- BufferingSettings buffering;
- readFromAMessage(msg, &buffering);
- status_t err = OK;
- if (mCurrentSourceInfo.mSource != NULL) {
- err = mCurrentSourceInfo.mSource->setBufferingSettings(buffering);
- } else {
- err = INVALID_OPERATION;
- }
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatPrepare:
- {
- ALOGV("onMessageReceived kWhatPrepare");
-
- mCurrentSourceInfo.mSource->prepareAsync(mCurrentSourceInfo.mStartTimeUs);
- break;
- }
-
- case kWhatGetTrackInfo:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- int64_t srcId;
- CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
-
- PlayerMessage* reply;
- CHECK(msg->findPointer("reply", (void**)&reply));
-
- // TODO: use correct source info based on srcId.
- size_t inbandTracks = 0;
- if (mCurrentSourceInfo.mSource != NULL) {
- inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
- }
-
- size_t ccTracks = 0;
- if (mCCDecoder != NULL) {
- ccTracks = mCCDecoder->getTrackCount();
- }
-
- // total track count
- reply->add_values()->set_int32_value(inbandTracks + ccTracks);
-
- // write inband tracks
- for (size_t i = 0; i < inbandTracks; ++i) {
- writeTrackInfo(reply, mCurrentSourceInfo.mSource->getTrackInfo(i));
- }
-
- // write CC track
- for (size_t i = 0; i < ccTracks; ++i) {
- writeTrackInfo(reply, mCCDecoder->getTrackInfo(i));
- }
-
- sp<AMessage> response = new AMessage;
- response->postReply(replyID);
- break;
- }
-
- case kWhatGetSelectedTrack:
- {
- int64_t srcId;
- CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
-
- int32_t type32;
- CHECK(msg->findInt32("type", (int32_t*)&type32));
- media_track_type type = (media_track_type)type32;
-
- // TODO: use correct source info based on srcId.
- size_t inbandTracks = 0;
- status_t err = INVALID_OPERATION;
- ssize_t selectedTrack = -1;
- if (mCurrentSourceInfo.mSource != NULL) {
- err = OK;
- inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
- selectedTrack = mCurrentSourceInfo.mSource->getSelectedTrack(type);
- }
-
- if (selectedTrack == -1 && mCCDecoder != NULL) {
- err = OK;
- selectedTrack = mCCDecoder->getSelectedTrack(type);
- if (selectedTrack != -1) {
- selectedTrack += inbandTracks;
- }
- }
-
- PlayerMessage* reply;
- CHECK(msg->findPointer("reply", (void**)&reply));
- reply->add_values()->set_int32_value(selectedTrack);
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- case kWhatSelectTrack:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- int64_t srcId;
- size_t trackIndex;
- int32_t select;
- int64_t timeUs;
- CHECK(msg->findInt64("srcId", (int64_t*)&srcId));
- CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK(msg->findInt32("select", &select));
- CHECK(msg->findInt64("timeUs", &timeUs));
-
- status_t err = INVALID_OPERATION;
-
- // TODO: use correct source info based on srcId.
- size_t inbandTracks = 0;
- if (mCurrentSourceInfo.mSource != NULL) {
- inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
- }
- size_t ccTracks = 0;
- if (mCCDecoder != NULL) {
- ccTracks = mCCDecoder->getTrackCount();
- }
-
- if (trackIndex < inbandTracks) {
- err = mCurrentSourceInfo.mSource->selectTrack(trackIndex, select, timeUs);
-
- if (!select && err == OK) {
- int32_t type;
- sp<AMessage> info = mCurrentSourceInfo.mSource->getTrackInfo(trackIndex);
- if (info != NULL
- && info->findInt32("type", &type)
- && type == MEDIA_TRACK_TYPE_TIMEDTEXT) {
- ++mTimedTextGeneration;
- }
- }
- } else {
- trackIndex -= inbandTracks;
-
- if (trackIndex < ccTracks) {
- err = mCCDecoder->selectTrack(trackIndex, select);
- }
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
-
- response->postReply(replyID);
- break;
- }
-
- case kWhatPollDuration:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != mPollDurationGeneration) {
- // stale
- break;
- }
-
- int64_t durationUs;
- if (mDriver != NULL && mCurrentSourceInfo.mSource->getDuration(&durationUs) == OK) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifyDuration(mCurrentSourceInfo.mSrcId, durationUs);
- }
- }
-
- msg->post(1000000LL); // poll again in a second.
- break;
- }
-
- case kWhatSetVideoSurface:
- {
-
- sp<RefBase> obj;
- CHECK(msg->findObject("surface", &obj));
- sp<ANativeWindowWrapper> nww = static_cast<ANativeWindowWrapper *>(obj.get());
-
- ALOGD("onSetVideoSurface(%p, %s video decoder)",
- (nww == NULL ? NULL : nww->getANativeWindow()),
- (mCurrentSourceInfo.mSource != NULL && mStarted
- && mCurrentSourceInfo.mSource->getFormat(false /* audio */) != NULL
- && mVideoDecoder != NULL) ? "have" : "no");
-
- // Need to check mStarted before calling mCurrentSourceInfo.mSource->getFormat
- // because NuPlayer2 might be in preparing state and it could take long time.
- // When mStarted is true, mCurrentSourceInfo.mSource must have been set.
- if (mCurrentSourceInfo.mSource == NULL || !mStarted
- || mCurrentSourceInfo.mSource->getFormat(false /* audio */) == NULL
- // NOTE: mVideoDecoder's mNativeWindow is always non-null
- || (mVideoDecoder != NULL && mVideoDecoder->setVideoSurface(nww) == OK)) {
- performSetSurface(nww);
- break;
- }
-
- mDeferredActions.push_back(
- new FlushDecoderAction(
- (obj != NULL ? FLUSH_CMD_FLUSH : FLUSH_CMD_NONE) /* audio */,
- FLUSH_CMD_SHUTDOWN /* video */));
-
- mDeferredActions.push_back(new SetSurfaceAction(nww));
-
- if (obj != NULL) {
- if (mStarted) {
- // Issue a seek to refresh the video screen only if started otherwise
- // the extractor may not yet be started and will assert.
- // If the video decoder is not set (perhaps audio only in this case)
- // do not perform a seek as it is not needed.
- int64_t currentPositionUs = 0;
- if (getCurrentPosition(¤tPositionUs) == OK) {
- mDeferredActions.push_back(
- new SeekAction(currentPositionUs,
- MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC /* mode */));
- }
- }
-
- // If there is a new surface texture, instantiate decoders
- // again if possible.
- mDeferredActions.push_back(
- new SimpleAction(&NuPlayer2::performScanSources));
-
- // After a flush without shutdown, decoder is paused.
- // Don't resume it until source seek is done, otherwise it could
- // start pulling stale data too soon.
- mDeferredActions.push_back(
- new ResumeDecoderAction(false /* needNotify */));
- }
-
- processDeferredActions();
- break;
- }
-
- case kWhatSetAudioSink:
- {
- ALOGV("kWhatSetAudioSink");
-
- sp<RefBase> obj;
- CHECK(msg->findObject("sink", &obj));
-
- mAudioSink = static_cast<MediaPlayer2Interface::AudioSink *>(obj.get());
- break;
- }
-
- case kWhatStart:
- {
- ALOGV("kWhatStart");
- if (mStarted) {
- // do not resume yet if the source is still buffering
- if (!mPausedForBuffering) {
- onResume();
- }
- } else {
- onStart(true /* play */);
- }
- mPausedByClient = false;
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_STARTED, 0, 0);
- break;
- }
-
- case kWhatConfigPlayback:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AudioPlaybackRate rate /* sanitized */;
- readFromAMessage(msg, &rate);
- status_t err = OK;
- if (mRenderer != NULL) {
- // AudioSink allows only 1.f and 0.f for offload mode.
- // For other speed, switch to non-offload mode.
- if (mOffloadAudio && (rate.mSpeed != 1.f || rate.mPitch != 1.f)) {
- int64_t currentPositionUs;
- if (getCurrentPosition(¤tPositionUs) != OK) {
- currentPositionUs = mPreviousSeekTimeUs;
- }
-
- // Set mPlaybackSettings so that the new audio decoder can
- // be created correctly.
- mPlaybackSettings = rate;
- if (!mPaused) {
- mRenderer->pause();
- }
- restartAudio(
- currentPositionUs, true /* forceNonOffload */,
- true /* needsToCreateAudioDecoder */);
- if (!mPaused) {
- mRenderer->resume();
- }
- }
-
- err = mRenderer->setPlaybackSettings(rate);
- }
- if (err == OK) {
- mPlaybackSettings = rate;
-
- if (mVideoDecoder != NULL) {
- sp<AMessage> params = new AMessage();
- params->setFloat("playback-speed", mPlaybackSettings.mSpeed);
- mVideoDecoder->setParameters(params);
- }
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatGetPlaybackSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AudioPlaybackRate rate = mPlaybackSettings;
- status_t err = OK;
- if (mRenderer != NULL) {
- err = mRenderer->getPlaybackSettings(&rate);
- }
- if (err == OK) {
- // get playback settings used by renderer, as it may be
- // slightly off due to audiosink not taking small changes.
- mPlaybackSettings = rate;
- }
- sp<AMessage> response = new AMessage;
- if (err == OK) {
- writeToAMessage(response, rate);
- }
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatConfigSync:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- ALOGV("kWhatConfigSync");
- AVSyncSettings sync;
- float videoFpsHint;
- readFromAMessage(msg, &sync, &videoFpsHint);
- status_t err = OK;
- if (mRenderer != NULL) {
- err = mRenderer->setSyncSettings(sync, videoFpsHint);
- }
- if (err == OK) {
- mSyncSettings = sync;
- mVideoFpsHint = videoFpsHint;
- }
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatGetSyncSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AVSyncSettings sync = mSyncSettings;
- float videoFps = mVideoFpsHint;
- status_t err = OK;
- if (mRenderer != NULL) {
- err = mRenderer->getSyncSettings(&sync, &videoFps);
- if (err == OK) {
- mSyncSettings = sync;
- mVideoFpsHint = videoFps;
- }
- }
- sp<AMessage> response = new AMessage;
- if (err == OK) {
- writeToAMessage(response, sync, videoFps);
- }
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatScanSources:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- if (generation != mScanSourcesGeneration) {
- // Drop obsolete msg.
- break;
- }
-
- mScanSourcesPending = false;
-
- ALOGV("scanning sources haveAudio=%d, haveVideo=%d",
- mAudioDecoder != NULL, mVideoDecoder != NULL);
-
- bool mHadAnySourcesBefore =
- (mAudioDecoder != NULL) || (mVideoDecoder != NULL);
- bool rescan = false;
-
- // initialize video before audio because successful initialization of
- // video may change deep buffer mode of audio.
- if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
- if (instantiateDecoder(false, &mVideoDecoder) == -EWOULDBLOCK) {
- rescan = true;
- }
- }
-
- // Don't try to re-open audio sink if there's an existing decoder.
- if (mAudioSink != NULL && mAudioDecoder == NULL) {
- if (instantiateDecoder(true, &mAudioDecoder) == -EWOULDBLOCK) {
- rescan = true;
- }
- }
-
- if (!mHadAnySourcesBefore
- && (mAudioDecoder != NULL || mVideoDecoder != NULL)) {
- // This is the first time we've found anything playable.
-
- if (mCurrentSourceInfo.mSourceFlags & Source::FLAG_DYNAMIC_DURATION) {
- schedulePollDuration();
- }
- }
-
- status_t err;
- if ((err = mCurrentSourceInfo.mSource->feedMoreTSData()) != OK) {
- if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
- // We're not currently decoding anything (no audio or
- // video tracks found) and we just ran out of input data.
-
- if (err == ERROR_END_OF_STREAM) {
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PLAYBACK_COMPLETE, 0, 0);
- } else {
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
- }
- }
- break;
- }
-
- if (rescan) {
- msg->post(100000LL);
- mScanSourcesPending = true;
- }
- break;
- }
-
- case kWhatVideoNotify:
- case kWhatAudioNotify:
- {
- bool audio = msg->what() == kWhatAudioNotify;
-
- int32_t currentDecoderGeneration =
- (audio? mAudioDecoderGeneration : mVideoDecoderGeneration);
- int32_t requesterGeneration = currentDecoderGeneration - 1;
- CHECK(msg->findInt32("generation", &requesterGeneration));
-
- if (requesterGeneration != currentDecoderGeneration) {
- ALOGV("got message from old %s decoder, generation(%d:%d)",
- audio ? "audio" : "video", requesterGeneration,
- currentDecoderGeneration);
- sp<AMessage> reply;
- if (!(msg->findMessage("reply", &reply))) {
- return;
- }
-
- reply->setInt32("err", INFO_DISCONTINUITY);
- reply->post();
- return;
- }
-
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == DecoderBase::kWhatInputDiscontinuity) {
- int32_t formatChange;
- CHECK(msg->findInt32("formatChange", &formatChange));
-
- ALOGV("%s discontinuity: formatChange %d",
- audio ? "audio" : "video", formatChange);
-
- if (formatChange) {
- mDeferredActions.push_back(
- new FlushDecoderAction(
- audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
- audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
- }
-
- mDeferredActions.push_back(
- new SimpleAction(
- &NuPlayer2::performScanSources));
-
- processDeferredActions();
- } else if (what == DecoderBase::kWhatEOS) {
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- if (err == ERROR_END_OF_STREAM) {
- ALOGV("got %s decoder EOS", audio ? "audio" : "video");
- } else {
- ALOGV("got %s decoder EOS w/ error %d",
- audio ? "audio" : "video",
- err);
- }
-
- mRenderer->queueEOS(audio, err);
- } else if (what == DecoderBase::kWhatFlushCompleted) {
- ALOGV("decoder %s flush completed", audio ? "audio" : "video");
-
- handleFlushComplete(audio, true /* isDecoder */);
- finishFlushIfPossible();
- } else if (what == DecoderBase::kWhatVideoSizeChanged) {
- sp<AMessage> format;
- CHECK(msg->findMessage("format", &format));
-
- sp<AMessage> inputFormat =
- mCurrentSourceInfo.mSource->getFormat(false /* audio */);
-
- setVideoScalingMode(mVideoScalingMode);
- updateVideoSize(mCurrentSourceInfo.mSrcId, inputFormat, format);
- } else if (what == DecoderBase::kWhatShutdownCompleted) {
- ALOGV("%s shutdown completed", audio ? "audio" : "video");
- if (audio) {
- Mutex::Autolock autoLock(mDecoderLock);
- mAudioDecoder.clear();
- mAudioDecoderError = false;
- ++mAudioDecoderGeneration;
-
- CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
- mFlushingAudio = SHUT_DOWN;
- } else {
- Mutex::Autolock autoLock(mDecoderLock);
- mVideoDecoder.clear();
- mVideoDecoderError = false;
- ++mVideoDecoderGeneration;
-
- CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
- mFlushingVideo = SHUT_DOWN;
- }
-
- finishFlushIfPossible();
- } else if (what == DecoderBase::kWhatResumeCompleted) {
- finishResume();
- } else if (what == DecoderBase::kWhatError) {
- status_t err;
- if (!msg->findInt32("err", &err) || err == OK) {
- err = UNKNOWN_ERROR;
- }
-
- // Decoder errors can be due to Source (e.g. from streaming),
- // or from decoding corrupted bitstreams, or from other decoder
- // MediaCodec operations (e.g. from an ongoing reset or seek).
- // They may also be due to openAudioSink failure at
- // decoder start or after a format change.
- //
- // We try to gracefully shut down the affected decoder if possible,
- // rather than trying to force the shutdown with something
- // similar to performReset(). This method can lead to a hang
- // if MediaCodec functions block after an error, but they should
- // typically return INVALID_OPERATION instead of blocking.
-
- FlushStatus *flushing = audio ? &mFlushingAudio : &mFlushingVideo;
- ALOGE("received error(%#x) from %s decoder, flushing(%d), now shutting down",
- err, audio ? "audio" : "video", *flushing);
-
- switch (*flushing) {
- case NONE:
- mDeferredActions.push_back(
- new FlushDecoderAction(
- audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
- audio ? FLUSH_CMD_NONE : FLUSH_CMD_SHUTDOWN));
- processDeferredActions();
- break;
- case FLUSHING_DECODER:
- *flushing = FLUSHING_DECODER_SHUTDOWN; // initiate shutdown after flush.
- break; // Wait for flush to complete.
- case FLUSHING_DECODER_SHUTDOWN:
- break; // Wait for flush to complete.
- case SHUTTING_DOWN_DECODER:
- break; // Wait for shutdown to complete.
- case FLUSHED:
- getDecoder(audio)->initiateShutdown(); // In the middle of a seek.
- *flushing = SHUTTING_DOWN_DECODER; // Shut down.
- break;
- case SHUT_DOWN:
- finishFlushIfPossible(); // Should not occur.
- break; // Finish anyways.
- }
- if (mCurrentSourceInfo.mSource != nullptr) {
- if (audio) {
- if (mVideoDecoderError
- || mCurrentSourceInfo.mSource->getFormat(false /* audio */) == NULL
- || mNativeWindow == NULL
- || mNativeWindow->getANativeWindow() == NULL
- || mVideoDecoder == NULL) {
- // When both audio and video have error, or this stream has only audio
- // which has error, notify client of error.
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_ERROR,
- MEDIA2_ERROR_UNKNOWN, err);
- } else {
- // Only audio track has error. Video track could be still good to play.
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_INFO,
- MEDIA2_INFO_PLAY_AUDIO_ERROR, err);
- }
- mAudioDecoderError = true;
- } else {
- if (mAudioDecoderError
- || mCurrentSourceInfo.mSource->getFormat(true /* audio */) == NULL
- || mAudioSink == NULL || mAudioDecoder == NULL) {
- // When both audio and video have error, or this stream has only video
- // which has error, notify client of error.
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_ERROR,
- MEDIA2_ERROR_UNKNOWN, err);
- } else {
- // Only video track has error. Audio track could be still good to play.
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_INFO,
- MEDIA2_INFO_PLAY_VIDEO_ERROR, err);
- }
- mVideoDecoderError = true;
- }
- }
- } else {
- ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
- what,
- what >> 24,
- (what >> 16) & 0xff,
- (what >> 8) & 0xff,
- what & 0xff);
- }
-
- break;
- }
-
- case kWhatRendererNotify:
- {
- int32_t requesterGeneration = mRendererGeneration - 1;
- CHECK(msg->findInt32("generation", &requesterGeneration));
- if (requesterGeneration != mRendererGeneration) {
- ALOGV("got message from old renderer, generation(%d:%d)",
- requesterGeneration, mRendererGeneration);
- return;
- }
-
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- if (what == Renderer::kWhatEOS) {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- int32_t finalResult;
- CHECK(msg->findInt32("finalResult", &finalResult));
-
- if (audio) {
- mAudioEOS = true;
- } else {
- mVideoEOS = true;
- }
-
- if (finalResult == ERROR_END_OF_STREAM) {
- ALOGV("reached %s EOS", audio ? "audio" : "video");
- } else {
- ALOGE("%s track encountered an error (%d)",
- audio ? "audio" : "video", finalResult);
-
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_ERROR,
- MEDIA2_ERROR_UNKNOWN, finalResult);
- }
-
- if ((mAudioEOS || mAudioDecoder == NULL)
- && (mVideoEOS || mVideoDecoder == NULL)) {
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PLAYBACK_COMPLETE, 0, 0);
- }
- } else if (what == Renderer::kWhatFlushComplete) {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- if (audio) {
- mAudioEOS = false;
- } else {
- mVideoEOS = false;
- }
-
- ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
- if (audio && (mFlushingAudio == NONE || mFlushingAudio == FLUSHED
- || mFlushingAudio == SHUT_DOWN)) {
- // Flush has been handled by tear down.
- break;
- }
- handleFlushComplete(audio, false /* isDecoder */);
- finishFlushIfPossible();
- } else if (what == Renderer::kWhatVideoRenderingStart) {
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_INFO,
- MEDIA2_INFO_VIDEO_RENDERING_START, 0);
- } else if (what == Renderer::kWhatMediaRenderingStart) {
- ALOGV("media rendering started");
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_STARTED, 0, 0);
- } else if (what == Renderer::kWhatAudioTearDown) {
- int32_t reason;
- CHECK(msg->findInt32("reason", &reason));
- ALOGV("Tear down audio with reason %d.", reason);
- if (reason == Renderer::kDueToTimeout && !(mPaused && mOffloadAudio)) {
- // TimeoutWhenPaused is only for offload mode.
- ALOGW("Receive a stale message for teardown.");
- break;
- }
- int64_t positionUs;
- if (!msg->findInt64("positionUs", &positionUs)) {
- positionUs = mPreviousSeekTimeUs;
- }
-
- restartAudio(
- positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
- reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
- }
- break;
- }
-
- case kWhatMoreDataQueued:
- {
- break;
- }
-
- case kWhatReset:
- {
- ALOGV("kWhatReset");
-
- mResetting = true;
- stopPlaybackTimer("kWhatReset");
- stopRebufferingTimer(true);
-
- mDeferredActions.push_back(
- new FlushDecoderAction(
- FLUSH_CMD_SHUTDOWN /* audio */,
- FLUSH_CMD_SHUTDOWN /* video */));
-
- mDeferredActions.push_back(
- new SimpleAction(&NuPlayer2::performReset));
-
- processDeferredActions();
- break;
- }
-
- case kWhatNotifyTime:
- {
- ALOGV("kWhatNotifyTime");
- int64_t timerUs;
- CHECK(msg->findInt64("timerUs", &timerUs));
-
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_NOTIFY_TIME, timerUs, 0);
- break;
- }
-
- case kWhatSeek:
- {
- int64_t seekTimeUs;
- int32_t mode;
- int32_t needNotify;
- CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
- CHECK(msg->findInt32("mode", &mode));
- CHECK(msg->findInt32("needNotify", &needNotify));
-
- ALOGV("kWhatSeek seekTimeUs=%lld us, mode=%d, needNotify=%d",
- (long long)seekTimeUs, mode, needNotify);
-
- if (!mStarted) {
- if (!mSourceStarted) {
- mSourceStarted = true;
- mCurrentSourceInfo.mSource->start();
- }
- if (seekTimeUs > 0) {
- performSeek(seekTimeUs, (MediaPlayer2SeekMode)mode);
- }
-
- if (needNotify) {
- notifyDriverSeekComplete(mCurrentSourceInfo.mSrcId);
- }
- break;
- }
-
- // seeks can take a while, so we essentially paused
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PAUSED, 0, 0);
-
- mDeferredActions.push_back(
- new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
- FLUSH_CMD_FLUSH /* video */));
-
- mDeferredActions.push_back(
- new SeekAction(seekTimeUs, (MediaPlayer2SeekMode)mode));
-
- // After a flush without shutdown, decoder is paused.
- // Don't resume it until source seek is done, otherwise it could
- // start pulling stale data too soon.
- mDeferredActions.push_back(
- new ResumeDecoderAction(needNotify));
-
- processDeferredActions();
- break;
- }
-
- case kWhatRewind:
- {
- ALOGV("kWhatRewind");
-
- int64_t seekTimeUs = mCurrentSourceInfo.mStartTimeUs;
- int32_t mode = MediaPlayer2SeekMode::SEEK_CLOSEST;
-
- if (!mStarted) {
- if (!mSourceStarted) {
- mSourceStarted = true;
- mCurrentSourceInfo.mSource->start();
- }
- performSeek(seekTimeUs, (MediaPlayer2SeekMode)mode);
- break;
- }
-
- // seeks can take a while, so we essentially paused
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PAUSED, 0, 0);
-
- mDeferredActions.push_back(
- new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
- FLUSH_CMD_FLUSH /* video */));
-
- mDeferredActions.push_back(
- new SeekAction(seekTimeUs, (MediaPlayer2SeekMode)mode));
-
- // After a flush without shutdown, decoder is paused.
- // Don't resume it until source seek is done, otherwise it could
- // start pulling stale data too soon.
- mDeferredActions.push_back(
- new ResumeDecoderAction(false /* needNotify */));
-
- processDeferredActions();
- break;
- }
-
- case kWhatPause:
- {
- if (!mStarted) {
- onStart(false /* play */);
- }
- onPause();
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_PAUSED, 0, 0);
- mPausedByClient = true;
- break;
- }
-
- case kWhatSourceNotify:
- {
- onSourceNotify(msg);
- break;
- }
-
- case kWhatClosedCaptionNotify:
- {
- onClosedCaptionNotify(msg);
- break;
- }
-
- case kWhatPrepareDrm:
- {
- status_t status = onPrepareDrm(msg);
-
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- case kWhatReleaseDrm:
- {
- status_t status = onReleaseDrm(msg);
-
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- default:
- TRESPASS();
- break;
- }
-}
-
-void NuPlayer2::onResume() {
- if (!mPaused || mResetting) {
- ALOGD_IF(mResetting, "resetting, onResume discarded");
- return;
- }
- mPaused = false;
- if (mCurrentSourceInfo.mSource != NULL) {
- mCurrentSourceInfo.mSource->resume();
- } else {
- ALOGW("resume called when source is gone or not set");
- }
- // |mAudioDecoder| may have been released due to the pause timeout, so re-create it if
- // needed.
- if (audioDecoderStillNeeded() && mAudioDecoder == NULL) {
- instantiateDecoder(true /* audio */, &mAudioDecoder);
- }
- if (mRenderer != NULL) {
- mRenderer->resume();
- } else {
- ALOGW("resume called when renderer is gone or not set");
- }
-
- startPlaybackTimer("onresume");
-}
-
-void NuPlayer2::onStart(bool play) {
- ALOGV("onStart: mCrypto: %p", mCurrentSourceInfo.mCrypto.get());
-
- if (!mSourceStarted) {
- mSourceStarted = true;
- mCurrentSourceInfo.mSource->start();
- }
-
- mOffloadAudio = false;
- mAudioEOS = false;
- mVideoEOS = false;
- mStarted = true;
- mPaused = false;
-
- uint32_t flags = 0;
-
- if (mCurrentSourceInfo.mSource->isRealTime()) {
- flags |= Renderer::FLAG_REAL_TIME;
- }
-
- bool hasAudio = (mCurrentSourceInfo.mSource->getFormat(true /* audio */) != NULL);
- bool hasVideo = (mCurrentSourceInfo.mSource->getFormat(false /* audio */) != NULL);
- if (!hasAudio && !hasVideo) {
- ALOGE("no metadata for either audio or video source");
- mCurrentSourceInfo.mSource->stop();
- mSourceStarted = false;
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_ERROR,
- MEDIA2_ERROR_UNKNOWN, ERROR_MALFORMED);
- return;
- }
- ALOGV_IF(!hasAudio, "no metadata for audio source"); // video only stream
-
- sp<MetaData> audioMeta = mCurrentSourceInfo.mSource->getFormatMeta(true /* audio */);
-
- audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
- if (mAudioSink != NULL) {
- streamType = mAudioSink->getAudioStreamType();
- }
-
- mOffloadAudio =
- JMediaPlayer2Utils::isOffloadedAudioPlaybackSupported(
- audioMeta, hasVideo, mCurrentSourceInfo.mSource->isStreaming(), streamType)
- && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
-
- // Modular DRM: Disabling audio offload if the source is protected
- if (mOffloadAudio && mCurrentSourceInfo.mIsDrmProtected) {
- mOffloadAudio = false;
- ALOGV("onStart: Disabling mOffloadAudio now that the source is protected.");
- }
-
- if (mOffloadAudio) {
- flags |= Renderer::FLAG_OFFLOAD_AUDIO;
- }
-
- sp<AMessage> notify = new AMessage(kWhatRendererNotify, this);
- ++mRendererGeneration;
- notify->setInt32("generation", mRendererGeneration);
- mRenderer = new Renderer(mAudioSink, mMediaClock, notify, mContext, flags);
- mRendererLooper = new ALooper;
- mRendererLooper->setName("NuPlayer2Renderer");
- mRendererLooper->start(false, true, ANDROID_PRIORITY_AUDIO);
- mRendererLooper->registerHandler(mRenderer);
-
- status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings);
- if (err != OK) {
- mCurrentSourceInfo.mSource->stop();
- mSourceStarted = false;
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
- return;
- }
-
- float rate = getFrameRate();
- if (rate > 0) {
- mRenderer->setVideoFrameRate(rate);
- }
-
- addEndTimeMonitor();
- // Renderer is created in paused state.
- if (play) {
- mRenderer->resume();
- }
-
- if (mVideoDecoder != NULL) {
- mVideoDecoder->setRenderer(mRenderer);
- }
- if (mAudioDecoder != NULL) {
- mAudioDecoder->setRenderer(mRenderer);
- }
-
- startPlaybackTimer("onstart");
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
-
- postScanSources();
-}
-
-void NuPlayer2::addEndTimeMonitor() {
- ++mEOSMonitorGeneration;
-
- if (mCurrentSourceInfo.mEndTimeUs == DataSourceDesc::kMaxTimeUs) {
- return;
- }
-
- sp<AMessage> msg = new AMessage(kWhatEOSMonitor, this);
- msg->setInt32("generation", mEOSMonitorGeneration);
- mMediaClock->addTimer(msg, mCurrentSourceInfo.mEndTimeUs);
-}
-
-void NuPlayer2::startPlaybackTimer(const char *where) {
- Mutex::Autolock autoLock(mPlayingTimeLock);
- if (mLastStartedPlayingTimeNs == 0) {
- mLastStartedPlayingTimeNs = systemTime();
- ALOGV("startPlaybackTimer() time %20" PRId64 " (%s)", mLastStartedPlayingTimeNs, where);
- }
-}
-
-void NuPlayer2::stopPlaybackTimer(const char *where) {
- Mutex::Autolock autoLock(mPlayingTimeLock);
-
- ALOGV("stopPlaybackTimer() time %20" PRId64 " (%s)", mLastStartedPlayingTimeNs, where);
-
- if (mLastStartedPlayingTimeNs != 0) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- int64_t now = systemTime();
- int64_t played = now - mLastStartedPlayingTimeNs;
- ALOGV("stopPlaybackTimer() log %20" PRId64 "", played);
-
- if (played > 0) {
- driver->notifyMorePlayingTimeUs(mCurrentSourceInfo.mSrcId, (played+500)/1000);
- }
- }
- mLastStartedPlayingTimeNs = 0;
- }
-}
-
-void NuPlayer2::startRebufferingTimer() {
- Mutex::Autolock autoLock(mPlayingTimeLock);
- if (mLastStartedRebufferingTimeNs == 0) {
- mLastStartedRebufferingTimeNs = systemTime();
- ALOGV("startRebufferingTimer() time %20" PRId64 "", mLastStartedRebufferingTimeNs);
- }
-}
-
-void NuPlayer2::stopRebufferingTimer(bool exitingPlayback) {
- Mutex::Autolock autoLock(mPlayingTimeLock);
-
- ALOGV("stopRebufferTimer() time %20" PRId64 " (exiting %d)",
- mLastStartedRebufferingTimeNs, exitingPlayback);
-
- if (mLastStartedRebufferingTimeNs != 0) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- int64_t now = systemTime();
- int64_t rebuffered = now - mLastStartedRebufferingTimeNs;
- ALOGV("stopRebufferingTimer() log %20" PRId64 "", rebuffered);
-
- if (rebuffered > 0) {
- driver->notifyMoreRebufferingTimeUs(
- mCurrentSourceInfo.mSrcId, (rebuffered+500)/1000);
- if (exitingPlayback) {
- driver->notifyRebufferingWhenExit(mCurrentSourceInfo.mSrcId, true);
- }
- }
- }
- mLastStartedRebufferingTimeNs = 0;
- }
-}
-
-void NuPlayer2::onPause() {
-
- stopPlaybackTimer("onPause");
-
- if (mPaused) {
- return;
- }
- mPaused = true;
- if (mCurrentSourceInfo.mSource != NULL) {
- mCurrentSourceInfo.mSource->pause();
- } else {
- ALOGW("pause called when source is gone or not set");
- }
- if (mRenderer != NULL) {
- mRenderer->pause();
- } else {
- ALOGW("pause called when renderer is gone or not set");
- }
-
-}
-
-bool NuPlayer2::audioDecoderStillNeeded() {
- // Audio decoder is no longer needed if it's in shut/shutting down status.
- return ((mFlushingAudio != SHUT_DOWN) && (mFlushingAudio != SHUTTING_DOWN_DECODER));
-}
-
-void NuPlayer2::handleFlushComplete(bool audio, bool isDecoder) {
- // We wait for both the decoder flush and the renderer flush to complete
- // before entering either the FLUSHED or the SHUTTING_DOWN_DECODER state.
-
- mFlushComplete[audio][isDecoder] = true;
- if (!mFlushComplete[audio][!isDecoder]) {
- return;
- }
-
- FlushStatus *state = audio ? &mFlushingAudio : &mFlushingVideo;
- switch (*state) {
- case FLUSHING_DECODER:
- {
- *state = FLUSHED;
- break;
- }
-
- case FLUSHING_DECODER_SHUTDOWN:
- {
- *state = SHUTTING_DOWN_DECODER;
-
- ALOGV("initiating %s decoder shutdown", audio ? "audio" : "video");
- getDecoder(audio)->initiateShutdown();
- break;
- }
-
- default:
- // decoder flush completes only occur in a flushing state.
- LOG_ALWAYS_FATAL_IF(isDecoder, "decoder flush in invalid state %d", *state);
- break;
- }
-}
-
-void NuPlayer2::finishFlushIfPossible() {
- if (mFlushingAudio != NONE && mFlushingAudio != FLUSHED
- && mFlushingAudio != SHUT_DOWN) {
- return;
- }
-
- if (mFlushingVideo != NONE && mFlushingVideo != FLUSHED
- && mFlushingVideo != SHUT_DOWN) {
- return;
- }
-
- ALOGV("both audio and video are flushed now.");
-
- mFlushingAudio = NONE;
- mFlushingVideo = NONE;
-
- clearFlushComplete();
-
- processDeferredActions();
-}
-
-void NuPlayer2::postScanSources() {
- if (mScanSourcesPending) {
- return;
- }
-
- sp<AMessage> msg = new AMessage(kWhatScanSources, this);
- msg->setInt32("generation", mScanSourcesGeneration);
- msg->post();
-
- mScanSourcesPending = true;
-}
-
-void NuPlayer2::tryOpenAudioSinkForOffload(
- const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo) {
- // Note: This is called early in NuPlayer2 to determine whether offloading
- // is possible; otherwise the decoders call the renderer openAudioSink directly.
-
- status_t err = mRenderer->openAudioSink(
- format, true /* offloadOnly */, hasVideo,
- AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio, mCurrentSourceInfo.mSource->isStreaming());
- if (err != OK) {
- // Any failure we turn off mOffloadAudio.
- mOffloadAudio = false;
- } else if (mOffloadAudio) {
- sendMetaDataToHal(mAudioSink, audioMeta);
- }
-}
-
-void NuPlayer2::closeAudioSink() {
- mRenderer->closeAudioSink();
-}
-
-void NuPlayer2::restartAudio(
- int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
- if (mAudioDecoder != NULL) {
- mAudioDecoder->pause();
- Mutex::Autolock autoLock(mDecoderLock);
- mAudioDecoder.clear();
- mAudioDecoderError = false;
- ++mAudioDecoderGeneration;
- }
- if (mFlushingAudio == FLUSHING_DECODER) {
- mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
- mFlushingAudio = FLUSHED;
- finishFlushIfPossible();
- } else if (mFlushingAudio == FLUSHING_DECODER_SHUTDOWN
- || mFlushingAudio == SHUTTING_DOWN_DECODER) {
- mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
- mFlushingAudio = SHUT_DOWN;
- finishFlushIfPossible();
- needsToCreateAudioDecoder = false;
- }
- if (mRenderer == NULL) {
- return;
- }
- closeAudioSink();
- mRenderer->flush(true /* audio */, false /* notifyComplete */);
- if (mVideoDecoder != NULL) {
- mDeferredActions.push_back(
- new FlushDecoderAction(FLUSH_CMD_NONE /* audio */,
- FLUSH_CMD_FLUSH /* video */));
- mDeferredActions.push_back(
- new SeekAction(currentPositionUs,
- MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */));
- // After a flush without shutdown, decoder is paused.
- // Don't resume it until source seek is done, otherwise it could
- // start pulling stale data too soon.
- mDeferredActions.push_back(new ResumeDecoderAction(false));
- processDeferredActions();
- } else {
- performSeek(currentPositionUs, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */);
- }
-
- if (forceNonOffload) {
- mRenderer->signalDisableOffloadAudio();
- mOffloadAudio = false;
- }
- if (needsToCreateAudioDecoder) {
- instantiateDecoder(true /* audio */, &mAudioDecoder, !forceNonOffload);
- }
-}
-
-void NuPlayer2::determineAudioModeChange(const sp<AMessage> &audioFormat) {
- if (mCurrentSourceInfo.mSource == NULL || mAudioSink == NULL) {
- return;
- }
-
- if (mRenderer == NULL) {
- ALOGW("No renderer can be used to determine audio mode. Use non-offload for safety.");
- mOffloadAudio = false;
- return;
- }
-
- sp<MetaData> audioMeta = mCurrentSourceInfo.mSource->getFormatMeta(true /* audio */);
- sp<AMessage> videoFormat = mCurrentSourceInfo.mSource->getFormat(false /* audio */);
- audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
- const bool hasVideo = (videoFormat != NULL);
- bool canOffload = JMediaPlayer2Utils::isOffloadedAudioPlaybackSupported(
- audioMeta, hasVideo, mCurrentSourceInfo.mSource->isStreaming(), streamType)
- && (mPlaybackSettings.mSpeed == 1.f && mPlaybackSettings.mPitch == 1.f);
-
- // Modular DRM: Disabling audio offload if the source is protected
- if (canOffload && mCurrentSourceInfo.mIsDrmProtected) {
- canOffload = false;
- ALOGV("determineAudioModeChange: Disabling mOffloadAudio b/c the source is protected.");
- }
-
- if (canOffload) {
- if (!mOffloadAudio) {
- mRenderer->signalEnableOffloadAudio();
- }
- // open audio sink early under offload mode.
- tryOpenAudioSinkForOffload(audioFormat, audioMeta, hasVideo);
- } else {
- if (mOffloadAudio) {
- mRenderer->signalDisableOffloadAudio();
- mOffloadAudio = false;
- }
- }
-}
-
-status_t NuPlayer2::instantiateDecoder(
- bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
- // The audio decoder could be cleared by tear down. If still in shut down
- // process, no need to create a new audio decoder.
- if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
- return OK;
- }
-
- sp<AMessage> format = mCurrentSourceInfo.mSource->getFormat(audio);
-
- if (format == NULL) {
- return UNKNOWN_ERROR;
- } else {
- status_t err;
- if (format->findInt32("err", &err) && err) {
- return err;
- }
- }
-
- format->setInt32("priority", 0 /* realtime */);
-
- if (!audio) {
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, this);
- if (mCCDecoder == NULL) {
- mCCDecoder = new CCDecoder(ccNotify);
- }
-
- if (mCurrentSourceInfo.mSourceFlags & Source::FLAG_SECURE) {
- format->setInt32("secure", true);
- }
-
- if (mCurrentSourceInfo.mSourceFlags & Source::FLAG_PROTECTED) {
- format->setInt32("protected", true);
- }
-
- float rate = getFrameRate();
- if (rate > 0) {
- format->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
- }
- }
-
- Mutex::Autolock autoLock(mDecoderLock);
-
- if (audio) {
- sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
- ++mAudioDecoderGeneration;
- notify->setInt32("generation", mAudioDecoderGeneration);
-
- if (checkAudioModeChange) {
- determineAudioModeChange(format);
- }
- if (mOffloadAudio) {
- mCurrentSourceInfo.mSource->setOffloadAudio(true /* offload */);
-
- const bool hasVideo = (mCurrentSourceInfo.mSource->getFormat(false /*audio */) != NULL);
- format->setInt32("has-video", hasVideo);
- *decoder = new DecoderPassThrough(notify, mCurrentSourceInfo.mSource, mRenderer);
- ALOGV("instantiateDecoder audio DecoderPassThrough hasVideo: %d", hasVideo);
- } else {
- mCurrentSourceInfo.mSource->setOffloadAudio(false /* offload */);
-
- *decoder = new Decoder(notify, mCurrentSourceInfo.mSource, mPID, mUID, mRenderer);
- ALOGV("instantiateDecoder audio Decoder");
- }
- mAudioDecoderError = false;
- } else {
- sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
- ++mVideoDecoderGeneration;
- notify->setInt32("generation", mVideoDecoderGeneration);
-
- *decoder = new Decoder(
- notify, mCurrentSourceInfo.mSource, mPID, mUID, mRenderer, mNativeWindow,
- mCCDecoder);
- mVideoDecoderError = false;
-
- // enable FRC if high-quality AV sync is requested, even if not
- // directly queuing to display, as this will even improve textureview
- // playback.
- {
- if (property_get_bool("persist.sys.media.avsync", false)) {
- format->setInt32("auto-frc", 1);
- }
- }
- }
- (*decoder)->init();
-
- // Modular DRM
- if (mCurrentSourceInfo.mIsDrmProtected) {
- format->setObject("crypto", mCurrentSourceInfo.mCrypto);
- ALOGV("instantiateDecoder: mCrypto: %p isSecure: %d",
- mCurrentSourceInfo.mCrypto.get(),
- (mCurrentSourceInfo.mSourceFlags & Source::FLAG_SECURE) != 0);
- }
-
- (*decoder)->configure(format);
-
- if (!audio) {
- sp<AMessage> params = new AMessage();
- float rate = getFrameRate();
- if (rate > 0) {
- params->setFloat("frame-rate-total", rate);
- }
-
- sp<MetaData> fileMeta = getFileMeta();
- if (fileMeta != NULL) {
- int32_t videoTemporalLayerCount;
- if (fileMeta->findInt32(kKeyTemporalLayerCount, &videoTemporalLayerCount)
- && videoTemporalLayerCount > 0) {
- params->setInt32("temporal-layer-count", videoTemporalLayerCount);
- }
- }
-
- if (params->countEntries() > 0) {
- (*decoder)->setParameters(params);
- }
- }
- return OK;
-}
-
-void NuPlayer2::updateVideoSize(
- int64_t srcId,
- const sp<AMessage> &inputFormat,
- const sp<AMessage> &outputFormat) {
- if (inputFormat == NULL) {
- ALOGW("Unknown video size, reporting 0x0!");
- notifyListener(srcId, MEDIA2_SET_VIDEO_SIZE, 0, 0);
- return;
- }
- int32_t err = OK;
- inputFormat->findInt32("err", &err);
- if (err == -EWOULDBLOCK) {
- ALOGW("Video meta is not available yet!");
- return;
- }
- if (err != OK) {
- ALOGW("Something is wrong with video meta!");
- return;
- }
-
- int32_t displayWidth, displayHeight;
- if (outputFormat != NULL) {
- int32_t width, height;
- CHECK(outputFormat->findInt32("width", &width));
- CHECK(outputFormat->findInt32("height", &height));
-
- int32_t cropLeft, cropTop, cropRight, cropBottom;
- CHECK(outputFormat->findRect(
- "crop",
- &cropLeft, &cropTop, &cropRight, &cropBottom));
-
- displayWidth = cropRight - cropLeft + 1;
- displayHeight = cropBottom - cropTop + 1;
-
- ALOGV("Video output format changed to %d x %d "
- "(crop: %d x %d @ (%d, %d))",
- width, height,
- displayWidth,
- displayHeight,
- cropLeft, cropTop);
- } else {
- CHECK(inputFormat->findInt32("width", &displayWidth));
- CHECK(inputFormat->findInt32("height", &displayHeight));
-
- ALOGV("Video input format %d x %d", displayWidth, displayHeight);
- }
-
- // Take into account sample aspect ratio if necessary:
- int32_t sarWidth, sarHeight;
- if (inputFormat->findInt32("sar-width", &sarWidth)
- && inputFormat->findInt32("sar-height", &sarHeight)
- && sarWidth > 0 && sarHeight > 0) {
- ALOGV("Sample aspect ratio %d : %d", sarWidth, sarHeight);
-
- displayWidth = (displayWidth * sarWidth) / sarHeight;
-
- ALOGV("display dimensions %d x %d", displayWidth, displayHeight);
- } else {
- int32_t width, height;
- if (inputFormat->findInt32("display-width", &width)
- && inputFormat->findInt32("display-height", &height)
- && width > 0 && height > 0
- && displayWidth > 0 && displayHeight > 0) {
- if (displayHeight * (int64_t)width / height > (int64_t)displayWidth) {
- displayHeight = (int32_t)(displayWidth * (int64_t)height / width);
- } else {
- displayWidth = (int32_t)(displayHeight * (int64_t)width / height);
- }
- ALOGV("Video display width and height are overridden to %d x %d",
- displayWidth, displayHeight);
- }
- }
-
- int32_t rotationDegrees;
- if (!inputFormat->findInt32("rotation-degrees", &rotationDegrees)) {
- rotationDegrees = 0;
- }
-
- if (rotationDegrees == 90 || rotationDegrees == 270) {
- int32_t tmp = displayWidth;
- displayWidth = displayHeight;
- displayHeight = tmp;
- }
-
- notifyListener(
- srcId,
- MEDIA2_SET_VIDEO_SIZE,
- displayWidth,
- displayHeight);
-}
-
-void NuPlayer2::notifyListener(
- int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *in) {
- if (mDriver == NULL) {
- return;
- }
-
- sp<NuPlayer2Driver> driver = mDriver.promote();
-
- if (driver == NULL) {
- return;
- }
-
- driver->notifyListener(srcId, msg, ext1, ext2, in);
-}
-
-void NuPlayer2::flushDecoder(bool audio, bool needShutdown) {
- ALOGV("[%s] flushDecoder needShutdown=%d",
- audio ? "audio" : "video", needShutdown);
-
- const sp<DecoderBase> &decoder = getDecoder(audio);
- if (decoder == NULL) {
- ALOGI("flushDecoder %s without decoder present",
- audio ? "audio" : "video");
- return;
- }
-
- // Make sure we don't continue to scan sources until we finish flushing.
- ++mScanSourcesGeneration;
- if (mScanSourcesPending) {
- if (!needShutdown) {
- mDeferredActions.push_back(
- new SimpleAction(&NuPlayer2::performScanSources));
- }
- mScanSourcesPending = false;
- }
-
- decoder->signalFlush();
-
- FlushStatus newStatus =
- needShutdown ? FLUSHING_DECODER_SHUTDOWN : FLUSHING_DECODER;
-
- mFlushComplete[audio][false /* isDecoder */] = (mRenderer == NULL);
- mFlushComplete[audio][true /* isDecoder */] = false;
- if (audio) {
- ALOGE_IF(mFlushingAudio != NONE,
- "audio flushDecoder() is called in state %d", mFlushingAudio);
- mFlushingAudio = newStatus;
- } else {
- ALOGE_IF(mFlushingVideo != NONE,
- "video flushDecoder() is called in state %d", mFlushingVideo);
- mFlushingVideo = newStatus;
- }
-}
-
-void NuPlayer2::queueDecoderShutdown(
- bool audio, bool video, const sp<AMessage> &reply) {
- ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
-
- mDeferredActions.push_back(
- new FlushDecoderAction(
- audio ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE,
- video ? FLUSH_CMD_SHUTDOWN : FLUSH_CMD_NONE));
-
- mDeferredActions.push_back(
- new SimpleAction(&NuPlayer2::performScanSources));
-
- mDeferredActions.push_back(new PostMessageAction(reply));
-
- processDeferredActions();
-}
-
-status_t NuPlayer2::setVideoScalingMode(int32_t mode) {
- mVideoScalingMode = mode;
- if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
- status_t ret = native_window_set_scaling_mode(
- mNativeWindow->getANativeWindow(), mVideoScalingMode);
- if (ret != OK) {
- ALOGE("Failed to set scaling mode (%d): %s",
- -ret, strerror(-ret));
- return ret;
- }
- }
- return OK;
-}
-
-status_t NuPlayer2::getTrackInfo(int64_t srcId, PlayerMessage* reply) const {
- sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
- msg->setInt64("srcId", srcId);
- msg->setPointer("reply", reply);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- return err;
-}
-
-status_t NuPlayer2::getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const {
- sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
- msg->setPointer("reply", reply);
- msg->setInt64("srcId", srcId);
- msg->setInt32("type", type);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-status_t NuPlayer2::selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs) {
- sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
- msg->setInt64("srcId", srcId);
- msg->setSize("trackIndex", trackIndex);
- msg->setInt32("select", select);
- msg->setInt64("timeUs", timeUs);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
-
- if (err != OK) {
- return err;
- }
-
- if (!response->findInt32("err", &err)) {
- err = OK;
- }
-
- return err;
-}
-
-status_t NuPlayer2::getCurrentPosition(int64_t *mediaUs) {
- sp<Renderer> renderer = mRenderer;
- if (renderer == NULL) {
- return NO_INIT;
- }
-
- return renderer->getCurrentPosition(mediaUs);
-}
-
-void NuPlayer2::getStats(Vector<sp<AMessage> > *mTrackStats) {
- CHECK(mTrackStats != NULL);
-
- mTrackStats->clear();
-
- Mutex::Autolock autoLock(mDecoderLock);
- if (mVideoDecoder != NULL) {
- mTrackStats->push_back(mVideoDecoder->getStats());
- }
- if (mAudioDecoder != NULL) {
- mTrackStats->push_back(mAudioDecoder->getStats());
- }
-}
-
-sp<MetaData> NuPlayer2::getFileMeta() {
- return mCurrentSourceInfo.mSource->getFileFormatMeta();
-}
-
-float NuPlayer2::getFrameRate() {
- sp<MetaData> meta = mCurrentSourceInfo.mSource->getFormatMeta(false /* audio */);
- if (meta == NULL) {
- return 0;
- }
- int32_t rate;
- if (!meta->findInt32(kKeyFrameRate, &rate)) {
- // fall back to try file meta
- sp<MetaData> fileMeta = getFileMeta();
- if (fileMeta == NULL) {
- ALOGW("source has video meta but not file meta");
- return -1;
- }
- int32_t fileMetaRate;
- if (!fileMeta->findInt32(kKeyFrameRate, &fileMetaRate)) {
- return -1;
- }
- return fileMetaRate;
- }
- return rate;
-}
-
-void NuPlayer2::schedulePollDuration() {
- sp<AMessage> msg = new AMessage(kWhatPollDuration, this);
- msg->setInt32("generation", mPollDurationGeneration);
- msg->post();
-}
-
-void NuPlayer2::cancelPollDuration() {
- ++mPollDurationGeneration;
-}
-
-void NuPlayer2::processDeferredActions() {
- while (!mDeferredActions.empty()) {
- // We won't execute any deferred actions until we're no longer in
- // an intermediate state, i.e. one more more decoders are currently
- // flushing or shutting down.
-
- if (mFlushingAudio != NONE || mFlushingVideo != NONE) {
- // We're currently flushing, postpone the reset until that's
- // completed.
-
- ALOGV("postponing action mFlushingAudio=%d, mFlushingVideo=%d",
- mFlushingAudio, mFlushingVideo);
-
- break;
- }
-
- sp<Action> action = *mDeferredActions.begin();
- mDeferredActions.erase(mDeferredActions.begin());
-
- action->execute(this);
- }
-}
-
-void NuPlayer2::performSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
- ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), mode=%d",
- (long long)seekTimeUs, seekTimeUs / 1E6, mode);
-
- if (mCurrentSourceInfo.mSource == NULL) {
- // This happens when reset occurs right before the loop mode
- // asynchronously seeks to the start of the stream.
- LOG_ALWAYS_FATAL_IF(mAudioDecoder != NULL || mVideoDecoder != NULL,
- "mCurrentSourceInfo.mSource is NULL and decoders not NULL audio(%p) video(%p)",
- mAudioDecoder.get(), mVideoDecoder.get());
- return;
- }
- mPreviousSeekTimeUs = seekTimeUs;
- mCurrentSourceInfo.mSource->seekTo(seekTimeUs, mode);
- ++mTimedTextGeneration;
-
- // everything's flushed, continue playback.
-}
-
-void NuPlayer2::performDecoderFlush(FlushCommand audio, FlushCommand video) {
- ALOGV("performDecoderFlush audio=%d, video=%d", audio, video);
-
- if ((audio == FLUSH_CMD_NONE || mAudioDecoder == NULL)
- && (video == FLUSH_CMD_NONE || mVideoDecoder == NULL)) {
- return;
- }
-
- if (audio != FLUSH_CMD_NONE && mAudioDecoder != NULL) {
- flushDecoder(true /* audio */, (audio == FLUSH_CMD_SHUTDOWN));
- }
-
- if (video != FLUSH_CMD_NONE && mVideoDecoder != NULL) {
- flushDecoder(false /* audio */, (video == FLUSH_CMD_SHUTDOWN));
- }
-}
-
-void NuPlayer2::performReset() {
- ALOGV("performReset");
-
- CHECK(mAudioDecoder == NULL);
- CHECK(mVideoDecoder == NULL);
-
- stopPlaybackTimer("performReset");
- stopRebufferingTimer(true);
-
- cancelPollDuration();
-
- ++mScanSourcesGeneration;
- mScanSourcesPending = false;
-
- if (mRendererLooper != NULL) {
- if (mRenderer != NULL) {
- mRendererLooper->unregisterHandler(mRenderer->id());
- }
- mRendererLooper->stop();
- mRendererLooper.clear();
- }
- mRenderer.clear();
- ++mRendererGeneration;
-
- resetSourceInfo(mCurrentSourceInfo);
- resetSourceInfo(mNextSourceInfo);
-
- if (mDriver != NULL) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifyResetComplete(mCurrentSourceInfo.mSrcId);
- }
- }
-
- mStarted = false;
- mPrepared = false;
- mResetting = false;
- mSourceStarted = false;
-
-}
-
-void NuPlayer2::performPlayNextDataSource() {
- ALOGV("performPlayNextDataSource");
-
- CHECK(mAudioDecoder == NULL);
- CHECK(mVideoDecoder == NULL);
-
- stopPlaybackTimer("performPlayNextDataSource");
- stopRebufferingTimer(true);
-
- cancelPollDuration();
-
- ++mScanSourcesGeneration;
- mScanSourcesPending = false;
-
- ++mRendererGeneration;
-
- if (mCurrentSourceInfo.mSource != NULL) {
- mCurrentSourceInfo.mSource->stop();
- }
-
- long previousSrcId;
- {
- Mutex::Autolock autoLock(mSourceLock);
- previousSrcId = mCurrentSourceInfo.mSrcId;
-
- mCurrentSourceInfo = mNextSourceInfo;
- mNextSourceInfo = SourceInfo();
- mNextSourceInfo.mSrcId = ~mCurrentSourceInfo.mSrcId; // to distinguish the two sources.
- }
-
- if (mDriver != NULL) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- notifyListener(previousSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_END, 0);
-
- int64_t durationUs;
- if (mCurrentSourceInfo.mSource->getDuration(&durationUs) == OK) {
- driver->notifyDuration(mCurrentSourceInfo.mSrcId, durationUs);
- }
- notifyListener(
- mCurrentSourceInfo.mSrcId, MEDIA2_INFO, MEDIA2_INFO_DATA_SOURCE_START, 0);
- }
- }
-
- mStarted = false;
- mPrepared = true; // TODO: what if it's not prepared
- mResetting = false;
- mSourceStarted = false;
-
- addEndTimeMonitor();
-
- if (mRenderer != NULL) {
- mRenderer->resume();
- }
-
- onStart(true /* play */);
- mPausedByClient = false;
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_STARTED, 0, 0);
-}
-
-void NuPlayer2::performScanSources() {
- ALOGV("performScanSources");
-
- if (!mStarted) {
- return;
- }
-
- if (mAudioDecoder == NULL || mVideoDecoder == NULL) {
- postScanSources();
- }
-}
-
-void NuPlayer2::performSetSurface(const sp<ANativeWindowWrapper> &nww) {
- ALOGV("performSetSurface");
-
- mNativeWindow = nww;
-
- // XXX - ignore error from setVideoScalingMode for now
- setVideoScalingMode(mVideoScalingMode);
-
- if (mDriver != NULL) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifySetSurfaceComplete(mCurrentSourceInfo.mSrcId);
- }
- }
-}
-
-void NuPlayer2::performResumeDecoders(bool needNotify) {
- if (needNotify) {
- mResumePending = true;
- if (mVideoDecoder == NULL) {
- // if audio-only, we can notify seek complete now,
- // as the resume operation will be relatively fast.
- finishResume();
- }
- }
-
- if (mVideoDecoder != NULL) {
- // When there is continuous seek, MediaPlayer will cache the seek
- // position, and send down new seek request when previous seek is
- // complete. Let's wait for at least one video output frame before
- // notifying seek complete, so that the video thumbnail gets updated
- // when seekbar is dragged.
- mVideoDecoder->signalResume(needNotify);
- }
-
- if (mAudioDecoder != NULL) {
- mAudioDecoder->signalResume(false /* needNotify */);
- }
-}
-
-void NuPlayer2::finishResume() {
- if (mResumePending) {
- mResumePending = false;
- notifyDriverSeekComplete(mCurrentSourceInfo.mSrcId);
- }
-}
-
-void NuPlayer2::notifyDriverSeekComplete(int64_t srcId) {
- if (mDriver != NULL) {
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- driver->notifySeekComplete(srcId);
- }
- }
-}
-
-void NuPlayer2::onSourceNotify(const sp<AMessage> &msg) {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- int64_t srcId;
- CHECK(msg->findInt64("srcId", &srcId));
- switch (what) {
- case Source::kWhatPrepared:
- {
- ALOGV("NuPlayer2::onSourceNotify Source::kWhatPrepared source:%p, Id(%lld)",
- mCurrentSourceInfo.mSource.get(), (long long)srcId);
- if (srcId == mCurrentSourceInfo.mSrcId) {
- if (mCurrentSourceInfo.mSource == NULL) {
- // This is a stale notification from a source that was
- // asynchronously preparing when the client called reset().
- // We handled the reset, the source is gone.
- break;
- }
-
- int32_t err;
- CHECK(msg->findInt32("err", &err));
-
- if (err != OK) {
- // shut down potential secure codecs in case client never calls reset
- mDeferredActions.push_back(
- new FlushDecoderAction(FLUSH_CMD_SHUTDOWN /* audio */,
- FLUSH_CMD_SHUTDOWN /* video */));
- processDeferredActions();
- } else {
- mPrepared = true;
- }
-
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- // notify duration first, so that it's definitely set when
- // the app received the "prepare complete" callback.
- int64_t durationUs;
- if (mCurrentSourceInfo.mSource->getDuration(&durationUs) == OK) {
- driver->notifyDuration(srcId, durationUs);
- }
- driver->notifyPrepareCompleted(srcId, err);
- }
- } else if (srcId == mNextSourceInfo.mSrcId) {
- if (mNextSourceInfo.mSource == NULL) {
- break; // stale
- }
-
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
- int32_t err;
- CHECK(msg->findInt32("err", &err));
- driver->notifyPrepareCompleted(srcId, err);
- }
- }
-
- break;
- }
-
- // Modular DRM
- case Source::kWhatDrmInfo:
- {
- PlayerMessage playerMsg;
- sp<ABuffer> drmInfo;
- CHECK(msg->findBuffer("drmInfo", &drmInfo));
- playerMsg.ParseFromArray(drmInfo->data(), drmInfo->size());
-
- ALOGV("onSourceNotify() kWhatDrmInfo MEDIA2_DRM_INFO drmInfo: %p playerMsg size: %d",
- drmInfo.get(), playerMsg.ByteSize());
-
- notifyListener(srcId, MEDIA2_DRM_INFO, 0 /* ext1 */, 0 /* ext2 */, &playerMsg);
-
- break;
- }
-
- case Source::kWhatFlagsChanged:
- {
- uint32_t flags;
- CHECK(msg->findInt32("flags", (int32_t *)&flags));
-
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver != NULL) {
-
- ALOGV("onSourceNotify() kWhatFlagsChanged FLAG_CAN_PAUSE: %d "
- "FLAG_CAN_SEEK_BACKWARD: %d \n\t\t\t\t FLAG_CAN_SEEK_FORWARD: %d "
- "FLAG_CAN_SEEK: %d FLAG_DYNAMIC_DURATION: %d \n"
- "\t\t\t\t FLAG_SECURE: %d FLAG_PROTECTED: %d",
- (flags & Source::FLAG_CAN_PAUSE) != 0,
- (flags & Source::FLAG_CAN_SEEK_BACKWARD) != 0,
- (flags & Source::FLAG_CAN_SEEK_FORWARD) != 0,
- (flags & Source::FLAG_CAN_SEEK) != 0,
- (flags & Source::FLAG_DYNAMIC_DURATION) != 0,
- (flags & Source::FLAG_SECURE) != 0,
- (flags & Source::FLAG_PROTECTED) != 0);
-
- if ((flags & NuPlayer2::Source::FLAG_CAN_SEEK) == 0) {
- driver->notifyListener(
- srcId, MEDIA2_INFO, MEDIA2_INFO_NOT_SEEKABLE, 0);
- }
- if (srcId == mCurrentSourceInfo.mSrcId) {
- driver->notifyFlagsChanged(srcId, flags);
- }
- }
-
- if (srcId == mCurrentSourceInfo.mSrcId) {
- if ((mCurrentSourceInfo.mSourceFlags & Source::FLAG_DYNAMIC_DURATION)
- && (!(flags & Source::FLAG_DYNAMIC_DURATION))) {
- cancelPollDuration();
- } else if (!(mCurrentSourceInfo.mSourceFlags & Source::FLAG_DYNAMIC_DURATION)
- && (flags & Source::FLAG_DYNAMIC_DURATION)
- && (mAudioDecoder != NULL || mVideoDecoder != NULL)) {
- schedulePollDuration();
- }
-
- mCurrentSourceInfo.mSourceFlags = flags;
- } else if (srcId == mNextSourceInfo.mSrcId) {
- // TODO: handle duration polling for next source.
- mNextSourceInfo.mSourceFlags = flags;
- }
- break;
- }
-
- case Source::kWhatVideoSizeChanged:
- {
- sp<AMessage> format;
- CHECK(msg->findMessage("format", &format));
-
- updateVideoSize(srcId, format);
- break;
- }
-
- case Source::kWhatBufferingUpdate:
- {
- int32_t percentage;
- CHECK(msg->findInt32("percentage", &percentage));
-
- notifyListener(srcId, MEDIA2_BUFFERING_UPDATE, percentage, 0);
- break;
- }
-
- case Source::kWhatPauseOnBufferingStart:
- {
- // ignore if not playing
- if (mStarted) {
- ALOGI("buffer low, pausing...");
-
- startRebufferingTimer();
- mPausedForBuffering = true;
- onPause();
- }
- notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_BUFFERING_START, 0);
- break;
- }
-
- case Source::kWhatResumeOnBufferingEnd:
- {
- // ignore if not playing
- if (mStarted) {
- ALOGI("buffer ready, resuming...");
-
- stopRebufferingTimer(false);
- mPausedForBuffering = false;
-
- // do not resume yet if client didn't unpause
- if (!mPausedByClient) {
- onResume();
- }
- }
- notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_BUFFERING_END, 0);
- break;
- }
-
- case Source::kWhatCacheStats:
- {
- int32_t kbps;
- CHECK(msg->findInt32("bandwidth", &kbps));
-
- notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_NETWORK_BANDWIDTH, kbps);
- break;
- }
-
- case Source::kWhatSubtitleData:
- {
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
-
- sendSubtitleData(buffer, 0 /* baseIndex */);
- break;
- }
-
- case Source::kWhatTimedMetaData:
- {
- sp<ABuffer> buffer;
- if (!msg->findBuffer("buffer", &buffer)) {
- notifyListener(srcId, MEDIA2_INFO, MEDIA2_INFO_METADATA_UPDATE, 0);
- } else {
- sendTimedMetaData(buffer);
- }
- break;
- }
-
- case Source::kWhatTimedTextData:
- {
- int32_t generation;
- if (msg->findInt32("generation", &generation)
- && generation != mTimedTextGeneration) {
- break;
- }
-
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
-
- sp<NuPlayer2Driver> driver = mDriver.promote();
- if (driver == NULL) {
- break;
- }
-
- int64_t posMs;
- int64_t timeUs, posUs;
- driver->getCurrentPosition(&posMs);
- posUs = posMs * 1000LL;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- if (posUs < timeUs) {
- if (!msg->findInt32("generation", &generation)) {
- msg->setInt32("generation", mTimedTextGeneration);
- }
- msg->post(timeUs - posUs);
- } else {
- sendTimedTextData(buffer);
- }
- break;
- }
-
- case Source::kWhatQueueDecoderShutdown:
- {
- int32_t audio, video;
- CHECK(msg->findInt32("audio", &audio));
- CHECK(msg->findInt32("video", &video));
-
- sp<AMessage> reply;
- CHECK(msg->findMessage("reply", &reply));
-
- queueDecoderShutdown(audio, video, reply);
- break;
- }
-
- case Source::kWhatDrmNoLicense:
- {
- notifyListener(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE);
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void NuPlayer2::onClosedCaptionNotify(const sp<AMessage> &msg) {
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- switch (what) {
- case NuPlayer2::CCDecoder::kWhatClosedCaptionData:
- {
- sp<ABuffer> buffer;
- CHECK(msg->findBuffer("buffer", &buffer));
-
- size_t inbandTracks = 0;
- if (mCurrentSourceInfo.mSource != NULL) {
- inbandTracks = mCurrentSourceInfo.mSource->getTrackCount();
- }
-
- sendSubtitleData(buffer, inbandTracks);
- break;
- }
-
- case NuPlayer2::CCDecoder::kWhatTrackAdded:
- {
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_INFO, MEDIA2_INFO_METADATA_UPDATE, 0);
-
- break;
- }
-
- default:
- TRESPASS();
- }
-
-
-}
-
-void NuPlayer2::sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex) {
- int32_t trackIndex;
- int64_t timeUs, durationUs;
- CHECK(buffer->meta()->findInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, &trackIndex));
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
- CHECK(buffer->meta()->findInt64("durationUs", &durationUs));
-
- PlayerMessage playerMsg;
- playerMsg.add_values()->set_int32_value(trackIndex + baseIndex);
- playerMsg.add_values()->set_int64_value(timeUs);
- playerMsg.add_values()->set_int64_value(durationUs);
- playerMsg.add_values()->set_bytes_value(buffer->data(), buffer->size());
-
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_SUBTITLE_DATA, 0, 0, &playerMsg);
-}
-
-void NuPlayer2::sendTimedMetaData(const sp<ABuffer> &buffer) {
- int64_t timeUs;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- PlayerMessage playerMsg;
- playerMsg.add_values()->set_int64_value(timeUs);
- playerMsg.add_values()->set_bytes_value(buffer->data(), buffer->size());
-
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_META_DATA, 0, 0, &playerMsg);
-}
-
-void NuPlayer2::sendTimedTextData(const sp<ABuffer> &buffer) {
- const void *data;
- size_t size = 0;
- int64_t timeUs;
- int32_t flag = TextDescriptions2::IN_BAND_TEXT_3GPP;
-
- AString mime;
- CHECK(buffer->meta()->findString("mime", &mime));
- CHECK(strcasecmp(mime.c_str(), MEDIA_MIMETYPE_TEXT_3GPP) == 0);
-
- data = buffer->data();
- size = buffer->size();
-
- PlayerMessage playerMsg;
- if (size > 0) {
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
- int32_t global = 0;
- if (buffer->meta()->findInt32("global", &global) && global) {
- flag |= TextDescriptions2::GLOBAL_DESCRIPTIONS;
- } else {
- flag |= TextDescriptions2::LOCAL_DESCRIPTIONS;
- }
- TextDescriptions2::getPlayerMessageOfDescriptions(
- (const uint8_t *)data, size, flag, timeUs / 1000, &playerMsg);
- }
-
- if (playerMsg.values_size() > 0) {
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_TIMED_TEXT, 0, 0, &playerMsg);
- } else { // send an empty timed text
- notifyListener(mCurrentSourceInfo.mSrcId, MEDIA2_TIMED_TEXT, 0, 0);
- }
-}
-
-const char *NuPlayer2::getDataSourceType() {
- switch (mCurrentSourceInfo.mDataSourceType) {
- case DATA_SOURCE_TYPE_HTTP_LIVE:
- return "HTTPLive";
-
- case DATA_SOURCE_TYPE_RTSP:
- return "RTSP";
-
- case DATA_SOURCE_TYPE_GENERIC_URL:
- return "GenURL";
-
- case DATA_SOURCE_TYPE_GENERIC_FD:
- return "GenFD";
-
- case DATA_SOURCE_TYPE_MEDIA:
- return "Media";
-
- case DATA_SOURCE_TYPE_NONE:
- default:
- return "None";
- }
- }
-
-NuPlayer2::SourceInfo* NuPlayer2::getSourceInfoByIdInMsg(const sp<AMessage> &msg) {
- int64_t srcId;
- CHECK(msg->findInt64("srcId", &srcId));
- if (mCurrentSourceInfo.mSrcId == srcId) {
- return &mCurrentSourceInfo;
- } else if (mNextSourceInfo.mSrcId == srcId) {
- return &mNextSourceInfo;
- } else {
- return NULL;
- }
-}
-
-void NuPlayer2::resetSourceInfo(NuPlayer2::SourceInfo &srcInfo) {
- if (srcInfo.mSource != NULL) {
- srcInfo.mSource->stop();
-
- Mutex::Autolock autoLock(mSourceLock);
- srcInfo.mSource.clear();
- }
- // Modular DRM
- ALOGD("performReset mCrypto: %p", srcInfo.mCrypto.get());
- srcInfo.mCrypto.clear();
- srcInfo.mIsDrmProtected = false;
-}
-
-// Modular DRM begin
-status_t NuPlayer2::prepareDrm(
- int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
-{
- ALOGV("prepareDrm ");
-
- // Passing to the looper anyway; called in a pre-config prepared state so no race on mCrypto
- sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
- // synchronous call so just passing the address but with local copies of "const" args
- uint8_t UUID[16];
- memcpy(UUID, uuid, sizeof(UUID));
- Vector<uint8_t> sessionId = drmSessionId;
- msg->setInt64("srcId", srcId);
- msg->setPointer("uuid", (void*)UUID);
- msg->setPointer("drmSessionId", (void*)&sessionId);
-
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- CHECK(response->findInt32("status", &status));
- ALOGV("prepareDrm ret: %d ", status);
- } else {
- ALOGE("prepareDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer2::releaseDrm(int64_t srcId)
-{
- ALOGV("releaseDrm ");
-
- sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
- msg->setInt64("srcId", srcId);
-
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- CHECK(response->findInt32("status", &status));
- ALOGV("releaseDrm ret: %d ", status);
- } else {
- ALOGE("releaseDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer2::onPrepareDrm(const sp<AMessage> &msg)
-{
- // TODO change to ALOGV
- ALOGD("onPrepareDrm ");
-
- status_t status = INVALID_OPERATION;
- SourceInfo *srcInfo = getSourceInfoByIdInMsg(msg);
- if (srcInfo == NULL) {
- return status;
- }
-
- int64_t srcId = srcInfo->mSrcId;
- if (srcInfo->mSource == NULL) {
- ALOGE("onPrepareDrm: srcInfo(%lld) No source. onPrepareDrm failed with %d.",
- (long long)srcId, status);
- return status;
- }
-
- uint8_t *uuid;
- Vector<uint8_t> *drmSessionId;
- CHECK(msg->findPointer("uuid", (void**)&uuid));
- CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
-
- status = OK;
- sp<AMediaCryptoWrapper> crypto = NULL;
-
- status = srcInfo->mSource->prepareDrm(uuid, *drmSessionId, &crypto);
- if (crypto == NULL) {
- ALOGE("onPrepareDrm: srcInfo(%lld).mSource->prepareDrm failed. status: %d",
- (long long)srcId, status);
- return status;
- }
- ALOGV("onPrepareDrm: srcInfo(%lld).mSource->prepareDrm succeeded", (long long)srcId);
-
- if (srcInfo->mCrypto != NULL) {
- ALOGE("onPrepareDrm: srcInfo(%lld) Unexpected. Already having mCrypto: %p",
- (long long)srcId, srcInfo->mCrypto.get());
- srcInfo->mCrypto.clear();
- }
-
- srcInfo->mCrypto = crypto;
- srcInfo->mIsDrmProtected = true;
- // TODO change to ALOGV
- ALOGD("onPrepareDrm: mCrypto: %p", srcInfo->mCrypto.get());
-
- return status;
-}
-
-status_t NuPlayer2::onReleaseDrm(const sp<AMessage> &msg)
-{
- // TODO change to ALOGV
- ALOGD("onReleaseDrm ");
- SourceInfo *srcInfo = getSourceInfoByIdInMsg(msg);;
- if (srcInfo == NULL) {
- return INVALID_OPERATION;
- }
-
- int64_t srcId = srcInfo->mSrcId;
- if (!srcInfo->mIsDrmProtected) {
- ALOGW("onReleaseDrm: srcInfo(%lld) Unexpected. mIsDrmProtected is already false.",
- (long long)srcId);
- }
-
- srcInfo->mIsDrmProtected = false;
-
- status_t status;
- if (srcInfo->mCrypto != NULL) {
- // notifying the source first before removing crypto from codec
- if (srcInfo->mSource != NULL) {
- srcInfo->mSource->releaseDrm();
- }
-
- status=OK;
- // first making sure the codecs have released their crypto reference
- const sp<DecoderBase> &videoDecoder = getDecoder(false/*audio*/);
- if (videoDecoder != NULL) {
- status = videoDecoder->releaseCrypto();
- ALOGV("onReleaseDrm: video decoder ret: %d", status);
- }
-
- const sp<DecoderBase> &audioDecoder = getDecoder(true/*audio*/);
- if (audioDecoder != NULL) {
- status_t status_audio = audioDecoder->releaseCrypto();
- if (status == OK) { // otherwise, returning the first error
- status = status_audio;
- }
- ALOGV("onReleaseDrm: audio decoder ret: %d", status_audio);
- }
-
- // TODO change to ALOGV
- ALOGD("onReleaseDrm: mCrypto: %p", srcInfo->mCrypto.get());
- srcInfo->mCrypto.clear();
- } else { // srcInfo->mCrypto == NULL
- ALOGE("onReleaseDrm: Unexpected. There is no crypto.");
- status = INVALID_OPERATION;
- }
-
- return status;
-}
-// Modular DRM end
-////////////////////////////////////////////////////////////////////////////////
-
-sp<AMessage> NuPlayer2::Source::getFormat(bool audio) {
- sp<MetaData> meta = getFormatMeta(audio);
-
- if (meta == NULL) {
- return NULL;
- }
-
- sp<AMessage> msg = new AMessage;
-
- if(convertMetaDataToMessage(meta, &msg) == OK) {
- return msg;
- }
- return NULL;
-}
-
-void NuPlayer2::Source::notifyFlagsChanged(uint32_t flags) {
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatFlagsChanged);
- notify->setInt32("flags", flags);
- notify->post();
-}
-
-void NuPlayer2::Source::notifyVideoSizeChanged(const sp<AMessage> &format) {
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatVideoSizeChanged);
- notify->setMessage("format", format);
- notify->post();
-}
-
-void NuPlayer2::Source::notifyPrepared(status_t err) {
- ALOGV("Source::notifyPrepared %d", err);
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatPrepared);
- notify->setInt32("err", err);
- notify->post();
-}
-
-void NuPlayer2::Source::notifyDrmInfo(const sp<ABuffer> &drmInfoBuffer)
-{
- ALOGV("Source::notifyDrmInfo");
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatDrmInfo);
- notify->setBuffer("drmInfo", drmInfoBuffer);
-
- notify->post();
-}
-
-void NuPlayer2::Source::onMessageReceived(const sp<AMessage> & /* msg */) {
- TRESPASS();
-}
-
-NuPlayer2::SourceInfo::SourceInfo()
- : mDataSourceType(DATA_SOURCE_TYPE_NONE),
- mSrcId(0),
- mSourceFlags(0),
- mStartTimeUs(0),
- mEndTimeUs(DataSourceDesc::kMaxTimeUs) {
-}
-
-NuPlayer2::SourceInfo & NuPlayer2::SourceInfo::operator=(const NuPlayer2::SourceInfo &other) {
- mSource = other.mSource;
- mCrypto = other.mCrypto;
- mDataSourceType = (DATA_SOURCE_TYPE)other.mDataSourceType;
- mSrcId = other.mSrcId;
- mSourceFlags = other.mSourceFlags;
- mStartTimeUs = other.mStartTimeUs;
- mEndTimeUs = other.mEndTimeUs;
- mIsDrmProtected = other.mIsDrmProtected;
- return *this;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.h b/media/libmediaplayer2/nuplayer2/NuPlayer2.h
deleted file mode 100644
index b8fb988..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.h
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NU_PLAYER2_H_
-
-#define NU_PLAYER2_H_
-
-#include <media/AudioResamplerPublic.h>
-#include <media/stagefright/foundation/AHandler.h>
-
-#include <mediaplayer2/MediaPlayer2Interface.h>
-#include <mediaplayer2/JObjectHolder.h>
-
-#include "mediaplayer2.pb.h"
-
-using android::media::MediaPlayer2Proto::PlayerMessage;
-
-namespace android {
-
-struct ABuffer;
-struct AMediaCryptoWrapper;
-struct AMessage;
-struct ANativeWindowWrapper;
-struct AudioPlaybackRate;
-struct AVSyncSettings;
-struct DataSourceDesc;
-struct MediaClock;
-struct MediaHTTPService;
-class MetaData;
-struct NuPlayer2Driver;
-
-struct NuPlayer2 : public AHandler {
- explicit NuPlayer2(pid_t pid, uid_t uid,
- const sp<MediaClock> &mediaClock, const sp<JObjectHolder> &context);
-
- void setDriver(const wp<NuPlayer2Driver> &driver);
-
- void setDataSourceAsync(const sp<DataSourceDesc> &dsd);
- void prepareNextDataSourceAsync(const sp<DataSourceDesc> &dsd);
- void playNextDataSource(int64_t srcId);
-
- status_t getBufferingSettings(BufferingSettings* buffering /* nonnull */);
- status_t setBufferingSettings(const BufferingSettings& buffering);
-
- void prepareAsync();
-
- void setVideoSurfaceTextureAsync(const sp<ANativeWindowWrapper> &nww);
-
- void setAudioSink(const sp<MediaPlayer2Interface::AudioSink> &sink);
- status_t setPlaybackSettings(const AudioPlaybackRate &rate);
- status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
- status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
- status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
-
- void start();
-
- void pause();
-
- // Will notify the driver through "notifyResetComplete" once finished.
- void resetAsync();
-
- // Request a notification when specified media time is reached.
- status_t notifyAt(int64_t mediaTimeUs);
-
- // Will notify the driver through "notifySeekComplete" once finished
- // and needNotify is true.
- void seekToAsync(
- int64_t seekTimeUs,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC,
- bool needNotify = false);
- void rewind();
-
- status_t setVideoScalingMode(int32_t mode);
- status_t getTrackInfo(int64_t srcId, PlayerMessage* reply) const;
- status_t getSelectedTrack(int64_t srcId, int32_t type, PlayerMessage* reply) const;
- status_t selectTrack(int64_t srcId, size_t trackIndex, bool select, int64_t timeUs);
- status_t getCurrentPosition(int64_t *mediaUs);
- void getStats(Vector<sp<AMessage> > *mTrackStats);
-
- sp<MetaData> getFileMeta();
- float getFrameRate();
-
- // Modular DRM
- status_t prepareDrm(int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
- status_t releaseDrm(int64_t srcId);
-
- const char *getDataSourceType();
-
-protected:
- virtual ~NuPlayer2();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-public:
- struct StreamListener;
- struct Source;
-
-private:
- struct Decoder;
- struct DecoderBase;
- struct DecoderPassThrough;
- struct CCDecoder;
- struct GenericSource2;
- struct HTTPLiveSource2;
- struct Renderer;
- struct RTSPSource2;
- struct Action;
- struct SeekAction;
- struct SetSurfaceAction;
- struct ResumeDecoderAction;
- struct FlushDecoderAction;
- struct PostMessageAction;
- struct SimpleAction;
-
- enum {
- kWhatSetDataSource = '=DaS',
- kWhatPrepare = 'prep',
- kWhatPrepareNextDataSource = 'pNDS',
- kWhatPlayNextDataSource = 'plNS',
- kWhatSetVideoSurface = '=VSu',
- kWhatSetAudioSink = '=AuS',
- kWhatMoreDataQueued = 'more',
- kWhatConfigPlayback = 'cfPB',
- kWhatConfigSync = 'cfSy',
- kWhatGetPlaybackSettings = 'gPbS',
- kWhatGetSyncSettings = 'gSyS',
- kWhatStart = 'strt',
- kWhatScanSources = 'scan',
- kWhatVideoNotify = 'vidN',
- kWhatAudioNotify = 'audN',
- kWhatClosedCaptionNotify = 'capN',
- kWhatRendererNotify = 'renN',
- kWhatReset = 'rset',
- kWhatNotifyTime = 'nfyT',
- kWhatSeek = 'seek',
- kWhatPause = 'paus',
- kWhatResume = 'rsme',
- kWhatPollDuration = 'polD',
- kWhatSourceNotify = 'srcN',
- kWhatGetTrackInfo = 'gTrI',
- kWhatGetSelectedTrack = 'gSel',
- kWhatSelectTrack = 'selT',
- kWhatGetBufferingSettings = 'gBus',
- kWhatSetBufferingSettings = 'sBuS',
- kWhatPrepareDrm = 'pDrm',
- kWhatReleaseDrm = 'rDrm',
- kWhatRewind = 'reWd',
- kWhatEOSMonitor = 'eosM',
- };
-
- typedef enum {
- DATA_SOURCE_TYPE_NONE,
- DATA_SOURCE_TYPE_HTTP_LIVE,
- DATA_SOURCE_TYPE_RTSP,
- DATA_SOURCE_TYPE_GENERIC_URL,
- DATA_SOURCE_TYPE_GENERIC_FD,
- DATA_SOURCE_TYPE_MEDIA,
- } DATA_SOURCE_TYPE;
-
- struct SourceInfo {
- SourceInfo();
- SourceInfo &operator=(const SourceInfo &);
-
- sp<Source> mSource;
- std::atomic<DATA_SOURCE_TYPE> mDataSourceType;
- int64_t mSrcId;
- uint32_t mSourceFlags;
- int64_t mStartTimeUs;
- int64_t mEndTimeUs;
- // Modular DRM
- sp<AMediaCryptoWrapper> mCrypto;
- bool mIsDrmProtected = false;
- };
-
- wp<NuPlayer2Driver> mDriver;
- pid_t mPID;
- uid_t mUID;
- const sp<MediaClock> mMediaClock;
- Mutex mSourceLock; // guard |mSource|.
- SourceInfo mCurrentSourceInfo;
- SourceInfo mNextSourceInfo;
- sp<ANativeWindowWrapper> mNativeWindow;
- sp<MediaPlayer2Interface::AudioSink> mAudioSink;
- sp<DecoderBase> mVideoDecoder;
- bool mOffloadAudio;
- sp<DecoderBase> mAudioDecoder;
- Mutex mDecoderLock; // guard |mAudioDecoder| and |mVideoDecoder|.
- sp<CCDecoder> mCCDecoder;
- sp<Renderer> mRenderer;
- sp<ALooper> mRendererLooper;
- int32_t mAudioDecoderGeneration;
- int32_t mVideoDecoderGeneration;
- int32_t mRendererGeneration;
- int32_t mEOSMonitorGeneration;
-
- Mutex mPlayingTimeLock;
- int64_t mLastStartedPlayingTimeNs;
- void stopPlaybackTimer(const char *where);
- void startPlaybackTimer(const char *where);
-
- int64_t mLastStartedRebufferingTimeNs;
- void startRebufferingTimer();
- void stopRebufferingTimer(bool exitingPlayback);
-
- int64_t mPreviousSeekTimeUs;
-
- List<sp<Action> > mDeferredActions;
-
- bool mAudioEOS;
- bool mVideoEOS;
-
- bool mScanSourcesPending;
- int32_t mScanSourcesGeneration;
-
- int32_t mPollDurationGeneration;
- int32_t mTimedTextGeneration;
-
- enum FlushStatus {
- NONE,
- FLUSHING_DECODER,
- FLUSHING_DECODER_SHUTDOWN,
- SHUTTING_DOWN_DECODER,
- FLUSHED,
- SHUT_DOWN,
- };
-
- enum FlushCommand {
- FLUSH_CMD_NONE,
- FLUSH_CMD_FLUSH,
- FLUSH_CMD_SHUTDOWN,
- };
-
- // Status of flush responses from the decoder and renderer.
- bool mFlushComplete[2][2];
-
- FlushStatus mFlushingAudio;
- FlushStatus mFlushingVideo;
-
- // Status of flush responses from the decoder and renderer.
- bool mResumePending;
-
- int32_t mVideoScalingMode;
-
- AudioPlaybackRate mPlaybackSettings;
- AVSyncSettings mSyncSettings;
- float mVideoFpsHint;
- bool mStarted;
- bool mPrepared;
- bool mResetting;
- bool mSourceStarted;
- bool mAudioDecoderError;
- bool mVideoDecoderError;
-
- // Actual pause state, either as requested by client or due to buffering.
- bool mPaused;
-
- // Pause state as requested by client. Note that if mPausedByClient is
- // true, mPaused is always true; if mPausedByClient is false, mPaused could
- // still become true, when we pause internally due to buffering.
- bool mPausedByClient;
-
- // Pause state as requested by source (internally) due to buffering
- bool mPausedForBuffering;
-
- // Passed from JAVA
- const sp<JObjectHolder> mContext;
-
- inline const sp<DecoderBase> &getDecoder(bool audio) {
- return audio ? mAudioDecoder : mVideoDecoder;
- }
-
- inline void clearFlushComplete() {
- mFlushComplete[0][0] = false;
- mFlushComplete[0][1] = false;
- mFlushComplete[1][0] = false;
- mFlushComplete[1][1] = false;
- }
-
- void disconnectSource();
-
- status_t createNuPlayer2Source(const sp<DataSourceDesc> &dsd,
- sp<Source> *source,
- DATA_SOURCE_TYPE *dataSourceType);
-
- void tryOpenAudioSinkForOffload(
- const sp<AMessage> &format, const sp<MetaData> &audioMeta, bool hasVideo);
- void closeAudioSink();
- void restartAudio(
- int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
- void determineAudioModeChange(const sp<AMessage> &audioFormat);
-
- status_t instantiateDecoder(
- bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange = true);
-
- void updateVideoSize(
- int64_t srcId,
- const sp<AMessage> &inputFormat,
- const sp<AMessage> &outputFormat = NULL);
-
- void notifyListener(int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *in = NULL);
-
- void addEndTimeMonitor();
-
- void handleFlushComplete(bool audio, bool isDecoder);
- void finishFlushIfPossible();
-
- void onStart(bool play);
- void onResume();
- void onPause();
-
- bool audioDecoderStillNeeded();
-
- void flushDecoder(bool audio, bool needShutdown);
-
- void finishResume();
- void notifyDriverSeekComplete(int64_t srcId);
-
- void postScanSources();
-
- void schedulePollDuration();
- void cancelPollDuration();
-
- void processDeferredActions();
-
- void performSeek(int64_t seekTimeUs, MediaPlayer2SeekMode mode);
- void performDecoderFlush(FlushCommand audio, FlushCommand video);
- void performReset();
- void performPlayNextDataSource();
- void performScanSources();
- void performSetSurface(const sp<ANativeWindowWrapper> &nw);
- void performResumeDecoders(bool needNotify);
-
- void onSourceNotify(const sp<AMessage> &msg);
- void onClosedCaptionNotify(const sp<AMessage> &msg);
-
- void queueDecoderShutdown(
- bool audio, bool video, const sp<AMessage> &reply);
-
- void sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex);
- void sendTimedMetaData(const sp<ABuffer> &buffer);
- void sendTimedTextData(const sp<ABuffer> &buffer);
-
- void writeTrackInfo(PlayerMessage* reply, const sp<AMessage>& format) const;
-
- status_t onPrepareDrm(const sp<AMessage> &msg);
- status_t onReleaseDrm(const sp<AMessage> &msg);
-
- SourceInfo* getSourceInfoByIdInMsg(const sp<AMessage> &msg);
- void resetSourceInfo(SourceInfo &srcInfo);
-
- DISALLOW_EVIL_CONSTRUCTORS(NuPlayer2);
-};
-
-} // namespace android
-
-#endif // NU_PLAYER2_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
deleted file mode 100644
index 98c3403..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.cpp
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2CCDecoder"
-#include <utils/Log.h>
-#include <inttypes.h>
-
-#include "NuPlayer2CCDecoder.h"
-
-#include <media/NdkMediaFormat.h>
-#include <media/stagefright/foundation/ABitReader.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/MediaDefs.h>
-
-namespace android {
-
-// In CEA-708B, the maximum bandwidth of CC is set to 9600bps.
-static const size_t kMaxBandwithSizeBytes = 9600 / 8;
-
-struct CCData {
- CCData(uint8_t type, uint8_t data1, uint8_t data2)
- : mType(type), mData1(data1), mData2(data2) {
- }
- bool getChannel(size_t *channel) const {
- if (mData1 >= 0x10 && mData1 <= 0x1f) {
- *channel = (mData1 >= 0x18 ? 1 : 0) + (mType ? 2 : 0);
- return true;
- }
- return false;
- }
-
- uint8_t mType;
- uint8_t mData1;
- uint8_t mData2;
-};
-
-static bool isNullPad(CCData *cc) {
- return cc->mData1 < 0x10 && cc->mData2 < 0x10;
-}
-
-static void dumpBytePair(const sp<ABuffer> &ccBuf) __attribute__ ((unused));
-static void dumpBytePair(const sp<ABuffer> &ccBuf) {
- size_t offset = 0;
- AString out;
-
- while (offset < ccBuf->size()) {
- char tmp[128];
-
- CCData *cc = (CCData *) (ccBuf->data() + offset);
-
- if (isNullPad(cc)) {
- // 1 null pad or XDS metadata, ignore
- offset += sizeof(CCData);
- continue;
- }
-
- if (cc->mData1 >= 0x20 && cc->mData1 <= 0x7f) {
- // 2 basic chars
- snprintf(tmp, sizeof(tmp), "[%d]Basic: %c %c", cc->mType, cc->mData1, cc->mData2);
- } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
- && cc->mData2 >= 0x30 && cc->mData2 <= 0x3f) {
- // 1 special char
- snprintf(tmp, sizeof(tmp), "[%d]Special: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else if ((cc->mData1 == 0x12 || cc->mData1 == 0x1A)
- && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
- // 1 Spanish/French char
- snprintf(tmp, sizeof(tmp), "[%d]Spanish: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else if ((cc->mData1 == 0x13 || cc->mData1 == 0x1B)
- && cc->mData2 >= 0x20 && cc->mData2 <= 0x3f){
- // 1 Portuguese/German/Danish char
- snprintf(tmp, sizeof(tmp), "[%d]German: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else if ((cc->mData1 == 0x11 || cc->mData1 == 0x19)
- && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f){
- // Mid-Row Codes (Table 69)
- snprintf(tmp, sizeof(tmp), "[%d]Mid-row: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else if (((cc->mData1 == 0x14 || cc->mData1 == 0x1c)
- && cc->mData2 >= 0x20 && cc->mData2 <= 0x2f)
- ||
- ((cc->mData1 == 0x17 || cc->mData1 == 0x1f)
- && cc->mData2 >= 0x21 && cc->mData2 <= 0x23)){
- // Misc Control Codes (Table 70)
- snprintf(tmp, sizeof(tmp), "[%d]Ctrl: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else if ((cc->mData1 & 0x70) == 0x10
- && (cc->mData2 & 0x40) == 0x40
- && ((cc->mData1 & 0x07) || !(cc->mData2 & 0x20)) ) {
- // Preamble Address Codes (Table 71)
- snprintf(tmp, sizeof(tmp), "[%d]PAC: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- } else {
- snprintf(tmp, sizeof(tmp), "[%d]Invalid: %02x %02x", cc->mType, cc->mData1, cc->mData2);
- }
-
- if (out.size() > 0) {
- out.append(", ");
- }
-
- out.append(tmp);
-
- offset += sizeof(CCData);
- }
-
- ALOGI("%s", out.c_str());
-}
-
-NuPlayer2::CCDecoder::CCDecoder(const sp<AMessage> ¬ify)
- : mNotify(notify),
- mSelectedTrack(-1),
- mDTVCCPacket(new ABuffer(kMaxBandwithSizeBytes)) {
- mDTVCCPacket->setRange(0, 0);
-
- // In CEA-608, streams from packets which have the value 0 of cc_type contain CC1 and CC2, and
- // streams from packets which have the value 1 of cc_type contain CC3 and CC4.
- // The following array indicates the current transmitting channels for each value of cc_type.
- mLine21Channels[0] = 0; // CC1
- mLine21Channels[1] = 2; // CC3
-}
-
-size_t NuPlayer2::CCDecoder::getTrackCount() const {
- return mTracks.size();
-}
-
-sp<AMessage> NuPlayer2::CCDecoder::getTrackInfo(size_t index) const {
- if (!isTrackValid(index)) {
- return NULL;
- }
-
- sp<AMessage> format = new AMessage();
-
- CCTrack track = mTracks[index];
-
- format->setInt32("type", MEDIA_TRACK_TYPE_SUBTITLE);
- format->setString("language", "und");
-
- switch (track.mTrackType) {
- case kTrackTypeCEA608:
- format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_608);
- break;
- case kTrackTypeCEA708:
- format->setString("mime", MEDIA_MIMETYPE_TEXT_CEA_708);
- break;
- default:
- ALOGE("Unknown track type: %d", track.mTrackType);
- return NULL;
- }
-
- // For CEA-608 CC1, field 0 channel 0
- bool isDefaultAuto = track.mTrackType == kTrackTypeCEA608
- && track.mTrackChannel == 0;
- // For CEA-708, Primary Caption Service.
- bool isDefaultOnly = track.mTrackType == kTrackTypeCEA708
- && track.mTrackChannel == 1;
- format->setInt32("auto", isDefaultAuto);
- format->setInt32("default", isDefaultAuto || isDefaultOnly);
- format->setInt32("forced", 0);
-
- return format;
-}
-
-status_t NuPlayer2::CCDecoder::selectTrack(size_t index, bool select) {
- if (!isTrackValid(index)) {
- return BAD_VALUE;
- }
-
- if (select) {
- if (mSelectedTrack == (ssize_t)index) {
- ALOGE("track %zu already selected", index);
- return BAD_VALUE;
- }
- ALOGV("selected track %zu", index);
- mSelectedTrack = index;
- } else {
- if (mSelectedTrack != (ssize_t)index) {
- ALOGE("track %zu is not selected", index);
- return BAD_VALUE;
- }
- ALOGV("unselected track %zu", index);
- mSelectedTrack = -1;
- }
-
- // Clear the previous track payloads
- mCCMap.clear();
-
- return OK;
-}
-
-ssize_t NuPlayer2::CCDecoder::getSelectedTrack(media_track_type type) const {
- if (mSelectedTrack != -1) {
- CCTrack track = mTracks[mSelectedTrack];
- if (track.mTrackType == kTrackTypeCEA608 || track.mTrackType == kTrackTypeCEA708) {
- return (type == MEDIA_TRACK_TYPE_SUBTITLE ? mSelectedTrack : -1);
- }
- return (type == MEDIA_TRACK_TYPE_UNKNOWN ? mSelectedTrack : -1);
- }
-
- return -1;
-}
-
-bool NuPlayer2::CCDecoder::isSelected() const {
- return mSelectedTrack >= 0 && mSelectedTrack < (int32_t)getTrackCount();
-}
-
-bool NuPlayer2::CCDecoder::isTrackValid(size_t index) const {
- return index < getTrackCount();
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
- sp<ABuffer> sei;
- if (!accessUnit->meta()->findBuffer("sei", &sei) || sei == NULL) {
- return false;
- }
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- bool trackAdded = false;
-
- const NALPosition *nal = (NALPosition *)sei->data();
-
- for (size_t i = 0; i < sei->size() / sizeof(NALPosition); ++i, ++nal) {
- trackAdded |= parseSEINalUnit(
- timeUs, accessUnit->data() + nal->nalOffset, nal->nalSize);
- }
-
- return trackAdded;
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size) {
- unsigned nalType = data[0] & 0x1f;
-
- // the buffer should only have SEI in it
- if (nalType != 6) {
- return false;
- }
-
- bool trackAdded = false;
- NALBitReader br(data + 1, size - 1);
-
- // sei_message()
- while (br.atLeastNumBitsLeft(16)) { // at least 16-bit for sei_message()
- uint32_t payload_type = 0;
- size_t payload_size = 0;
- uint8_t last_byte;
-
- do {
- last_byte = br.getBits(8);
- payload_type += last_byte;
- } while (last_byte == 0xFF);
-
- do {
- last_byte = br.getBits(8);
- payload_size += last_byte;
- } while (last_byte == 0xFF);
-
- if (payload_size > SIZE_MAX / 8
- || !br.atLeastNumBitsLeft(payload_size * 8)) {
- ALOGV("Malformed SEI payload");
- break;
- }
-
- // sei_payload()
- if (payload_type == 4) {
- bool isCC = false;
- if (payload_size > 1 + 2 + 4 + 1) {
- // user_data_registered_itu_t_t35()
-
- // ATSC A/72: 6.4.2
- uint8_t itu_t_t35_country_code = br.getBits(8);
- uint16_t itu_t_t35_provider_code = br.getBits(16);
- uint32_t user_identifier = br.getBits(32);
- uint8_t user_data_type_code = br.getBits(8);
-
- payload_size -= 1 + 2 + 4 + 1;
-
- isCC = itu_t_t35_country_code == 0xB5
- && itu_t_t35_provider_code == 0x0031
- && user_identifier == 'GA94'
- && user_data_type_code == 0x3;
- }
-
- if (isCC && payload_size > 2) {
- trackAdded |= parseMPEGCCData(timeUs, br.data(), br.numBitsLeft() / 8);
- } else {
- ALOGV("Malformed SEI payload type 4");
- }
- } else {
- ALOGV("Unsupported SEI payload type %d", payload_type);
- }
-
- // skipping remaining bits of this payload
- br.skipBits(payload_size * 8);
- }
-
- return trackAdded;
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::extractFromMPEGUserData(const sp<ABuffer> &accessUnit) {
- sp<ABuffer> mpegUserData;
- if (!accessUnit->meta()->findBuffer(AMEDIAFORMAT_KEY_MPEG_USER_DATA, &mpegUserData)
- || mpegUserData == NULL) {
- return false;
- }
-
- int64_t timeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
- bool trackAdded = false;
-
- const size_t *userData = (size_t *)mpegUserData->data();
-
- for (size_t i = 0; i < mpegUserData->size() / sizeof(size_t); ++i) {
- trackAdded |= parseMPEGUserDataUnit(
- timeUs, accessUnit->data() + userData[i], accessUnit->size() - userData[i]);
- }
-
- return trackAdded;
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size) {
- ABitReader br(data + 4, 5);
-
- uint32_t user_identifier = br.getBits(32);
- uint8_t user_data_type = br.getBits(8);
-
- if (user_identifier == 'GA94' && user_data_type == 0x3) {
- return parseMPEGCCData(timeUs, data + 9, size - 9);
- }
-
- return false;
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size) {
- bool trackAdded = false;
-
- // MPEG_cc_data()
- // ATSC A/53 Part 4: 6.2.3.1
- ABitReader br(data, size);
-
- if (br.numBitsLeft() <= 16) {
- return false;
- }
-
- br.skipBits(1);
- bool process_cc_data_flag = br.getBits(1);
- br.skipBits(1);
- size_t cc_count = br.getBits(5);
- br.skipBits(8);
-
- if (!process_cc_data_flag || 3 * 8 * cc_count >= br.numBitsLeft()) {
- return false;
- }
-
- sp<ABuffer> line21CCBuf = NULL;
-
- for (size_t i = 0; i < cc_count; ++i) {
- br.skipBits(5);
- bool cc_valid = br.getBits(1);
- uint8_t cc_type = br.getBits(2);
-
- if (cc_valid) {
- if (cc_type == 3) {
- if (mDTVCCPacket->size() > 0) {
- trackAdded |= parseDTVCCPacket(
- timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
- mDTVCCPacket->setRange(0, 0);
- }
- if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
- return false;
- }
- memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
- mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
- br.skipBits(16);
- } else if (mDTVCCPacket->size() > 0 && cc_type == 2) {
- if (mDTVCCPacket->size() + 2 > mDTVCCPacket->capacity()) {
- return false;
- }
- memcpy(mDTVCCPacket->data() + mDTVCCPacket->size(), br.data(), 2);
- mDTVCCPacket->setRange(0, mDTVCCPacket->size() + 2);
- br.skipBits(16);
- } else if (cc_type == 0 || cc_type == 1) {
- uint8_t cc_data_1 = br.getBits(8) & 0x7f;
- uint8_t cc_data_2 = br.getBits(8) & 0x7f;
-
- CCData cc(cc_type, cc_data_1, cc_data_2);
-
- if (isNullPad(&cc)) {
- continue;
- }
-
- size_t channel;
- if (cc.getChannel(&channel)) {
- mLine21Channels[cc_type] = channel;
-
- // create a new track if it does not exist.
- getTrackIndex(kTrackTypeCEA608, channel, &trackAdded);
- }
-
- if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
- && mTracks[mSelectedTrack].mTrackChannel == mLine21Channels[cc_type]) {
- if (line21CCBuf == NULL) {
- line21CCBuf = new ABuffer((cc_count - i) * sizeof(CCData));
- line21CCBuf->setRange(0, 0);
- }
- if (line21CCBuf->size() + sizeof(cc) > line21CCBuf->capacity()) {
- return false;
- }
- memcpy(line21CCBuf->data() + line21CCBuf->size(), &cc, sizeof(cc));
- line21CCBuf->setRange(0, line21CCBuf->size() + sizeof(CCData));
- }
- } else {
- br.skipBits(16);
- }
- } else {
- if ((cc_type == 3 || cc_type == 2) && mDTVCCPacket->size() > 0) {
- trackAdded |= parseDTVCCPacket(timeUs, mDTVCCPacket->data(), mDTVCCPacket->size());
- mDTVCCPacket->setRange(0, 0);
- }
- br.skipBits(16);
- }
- }
-
- if (isSelected() && mTracks[mSelectedTrack].mTrackType == kTrackTypeCEA608
- && line21CCBuf != NULL && line21CCBuf->size() > 0) {
- mCCMap.add(timeUs, line21CCBuf);
- }
-
- return trackAdded;
-}
-
-// returns true if a new CC track is found
-bool NuPlayer2::CCDecoder::parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size) {
- // CEA-708B 5 DTVCC Packet Layer.
- ABitReader br(data, size);
- br.skipBits(2);
-
- size_t packet_size = br.getBits(6);
- if (packet_size == 0) packet_size = 64;
- packet_size *= 2;
-
- if (size != packet_size) {
- return false;
- }
-
- bool trackAdded = false;
-
- while (br.numBitsLeft() >= 16) {
- // CEA-708B Figure 5 and 6.
- uint8_t service_number = br.getBits(3);
- size_t block_size = br.getBits(5);
-
- if (service_number == 64) {
- br.skipBits(2);
- service_number = br.getBits(6);
-
- if (service_number < 64) {
- return trackAdded;
- }
- }
-
- if (br.numBitsLeft() < block_size * 8) {
- return trackAdded;
- }
-
- if (block_size > 0) {
- size_t trackIndex = getTrackIndex(kTrackTypeCEA708, service_number, &trackAdded);
- if (mSelectedTrack == (ssize_t)trackIndex) {
- sp<ABuffer> ccPacket = new ABuffer(block_size);
- if (ccPacket->capacity() == 0) {
- return false;
- }
- memcpy(ccPacket->data(), br.data(), block_size);
- mCCMap.add(timeUs, ccPacket);
- }
- }
- br.skipBits(block_size * 8);
- }
-
- return trackAdded;
-}
-
-// return the track index for a given type and channel.
-// if the track does not exist, creates a new one.
-size_t NuPlayer2::CCDecoder::getTrackIndex(
- int32_t trackType, size_t channel, bool *trackAdded) {
- CCTrack track(trackType, channel);
- ssize_t index = mTrackIndices.indexOfKey(track);
-
- if (index < 0) {
- // A new track is added.
- index = mTracks.size();
- mTrackIndices.add(track, index);
- mTracks.add(track);
- *trackAdded = true;
- return index;
- }
-
- return mTrackIndices.valueAt(index);
-}
-
-void NuPlayer2::CCDecoder::decode(const sp<ABuffer> &accessUnit) {
- if (extractFromMPEGUserData(accessUnit) || extractFromSEI(accessUnit)) {
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatTrackAdded);
- msg->post();
- }
- // TODO: extract CC from other sources
-}
-
-void NuPlayer2::CCDecoder::display(int64_t timeUs) {
- if (!isSelected()) {
- return;
- }
-
- ssize_t index = mCCMap.indexOfKey(timeUs);
- if (index < 0) {
- ALOGV("cc for timestamp %" PRId64 " not found", timeUs);
- return;
- }
-
- sp<ABuffer> ccBuf;
-
- if (index == 0) {
- ccBuf = mCCMap.valueAt(index);
- } else {
- size_t size = 0;
-
- for (ssize_t i = 0; i <= index; ++i) {
- size += mCCMap.valueAt(i)->size();
- }
-
- ccBuf = new ABuffer(size);
- ccBuf->setRange(0, 0);
-
- if (ccBuf->capacity() > 0) {
- for (ssize_t i = 0; i <= index; ++i) {
- sp<ABuffer> buf = mCCMap.valueAt(i);
- memcpy(ccBuf->data() + ccBuf->size(), buf->data(), buf->size());
- ccBuf->setRange(0, ccBuf->size() + buf->size());
- }
- }
- }
-
- if (ccBuf->size() > 0) {
-#if 0
- dumpBytePair(ccBuf);
-#endif
-
- ccBuf->meta()->setInt32(AMEDIAFORMAT_KEY_TRACK_INDEX, mSelectedTrack);
- ccBuf->meta()->setInt64("timeUs", timeUs);
- ccBuf->meta()->setInt64("durationUs", 0LL);
-
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatClosedCaptionData);
- msg->setBuffer("buffer", ccBuf);
- msg->post();
- }
-
- // remove all entries before timeUs
- mCCMap.removeItemsAt(0, index + 1);
-}
-
-void NuPlayer2::CCDecoder::flush() {
- mCCMap.clear();
- mDTVCCPacket->setRange(0, 0);
-}
-
-int32_t NuPlayer2::CCDecoder::CCTrack::compare(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
- int32_t cmp = mTrackType - rhs.mTrackType;
- if (cmp != 0) return cmp;
- return mTrackChannel - rhs.mTrackChannel;
-}
-
-bool NuPlayer2::CCDecoder::CCTrack::operator<(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
- return compare(rhs) < 0;
-}
-
-bool NuPlayer2::CCDecoder::CCTrack::operator==(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
- return compare(rhs) == 0;
-}
-
-bool NuPlayer2::CCDecoder::CCTrack::operator!=(const NuPlayer2::CCDecoder::CCTrack& rhs) const {
- return compare(rhs) != 0;
-}
-
-} // namespace android
-
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h b/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
deleted file mode 100644
index 97834d1..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2CCDecoder.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_CCDECODER_H_
-
-#define NUPLAYER2_CCDECODER_H_
-
-#include "NuPlayer2.h"
-
-namespace android {
-
-struct NuPlayer2::CCDecoder : public RefBase {
- enum {
- kWhatClosedCaptionData,
- kWhatTrackAdded,
- };
-
- enum {
- kTrackTypeCEA608,
- kTrackTypeCEA708,
- };
-
- explicit CCDecoder(const sp<AMessage> ¬ify);
-
- size_t getTrackCount() const;
- sp<AMessage> getTrackInfo(size_t index) const;
- status_t selectTrack(size_t index, bool select);
- ssize_t getSelectedTrack(media_track_type type) const;
- bool isSelected() const;
- void decode(const sp<ABuffer> &accessUnit);
- void display(int64_t timeUs);
- void flush();
-
-private:
- // CC track identifier.
- struct CCTrack {
- CCTrack() : mTrackType(0), mTrackChannel(0) { }
-
- CCTrack(const int32_t trackType, const size_t trackChannel)
- : mTrackType(trackType), mTrackChannel(trackChannel) { }
-
- int32_t mTrackType;
- size_t mTrackChannel;
-
- // The ordering of CCTracks is to build a map of track to index.
- // It is necessary to find the index of the matched CCTrack when CC data comes.
- int compare(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
- inline bool operator<(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
- inline bool operator==(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
- inline bool operator!=(const NuPlayer2::CCDecoder::CCTrack& rhs) const;
- };
-
- sp<AMessage> mNotify;
- KeyedVector<int64_t, sp<ABuffer> > mCCMap;
- ssize_t mSelectedTrack;
- KeyedVector<CCTrack, size_t> mTrackIndices;
- Vector<CCTrack> mTracks;
-
- // CEA-608 closed caption
- size_t mLine21Channels[2]; // The current channels of NTSC_CC_FIELD_{1, 2}
-
- // CEA-708 closed caption
- sp<ABuffer> mDTVCCPacket;
-
- bool isTrackValid(size_t index) const;
- size_t getTrackIndex(int32_t trackType, size_t channel, bool *trackAdded);
-
- // Extract from H.264 SEIs
- bool extractFromSEI(const sp<ABuffer> &accessUnit);
- bool parseSEINalUnit(int64_t timeUs, const uint8_t *data, size_t size);
-
- // Extract from MPEG user data
- bool extractFromMPEGUserData(const sp<ABuffer> &accessUnit);
- bool parseMPEGUserDataUnit(int64_t timeUs, const uint8_t *data, size_t size);
-
- // Extract CC tracks from MPEG_cc_data
- bool parseMPEGCCData(int64_t timeUs, const uint8_t *data, size_t size);
- bool parseDTVCCPacket(int64_t timeUs, const uint8_t *data, size_t size);
-
- DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_CCDECODER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
deleted file mode 100644
index 66bfae5..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.cpp
+++ /dev/null
@@ -1,1315 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2Decoder"
-#include <utils/Log.h>
-#include <inttypes.h>
-
-#include <algorithm>
-
-#include "NuPlayer2CCDecoder.h"
-#include "NuPlayer2Decoder.h"
-#include "NuPlayer2Drm.h"
-#include "NuPlayer2Renderer.h"
-#include "NuPlayer2Source.h"
-
-#include <cutils/properties.h>
-#include <media/MediaBufferHolder.h>
-#include <media/MediaCodecBuffer.h>
-#include <media/NdkMediaCodec.h>
-#include <media/NdkWrapper.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/SurfaceUtils.h>
-
-#include <system/window.h>
-#include "ATSParser.h"
-
-namespace android {
-
-static float kDisplayRefreshingRate = 60.f; // TODO: get this from the display
-
-// The default total video frame rate of a stream when that info is not available from
-// the source.
-static float kDefaultVideoFrameRateTotal = 30.f;
-
-static inline bool getAudioDeepBufferSetting() {
- return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
-}
-
-NuPlayer2::Decoder::Decoder(
- const sp<AMessage> ¬ify,
- const sp<Source> &source,
- pid_t pid,
- uid_t uid,
- const sp<Renderer> &renderer,
- const sp<ANativeWindowWrapper> &nww,
- const sp<CCDecoder> &ccDecoder)
- : DecoderBase(notify),
- mNativeWindow(nww),
- mSource(source),
- mRenderer(renderer),
- mCCDecoder(ccDecoder),
- mPid(pid),
- mUid(uid),
- mSkipRenderingUntilMediaTimeUs(-1LL),
- mNumFramesTotal(0LL),
- mNumInputFramesDropped(0LL),
- mNumOutputFramesDropped(0LL),
- mVideoWidth(0),
- mVideoHeight(0),
- mIsAudio(true),
- mIsVideoAVC(false),
- mIsSecure(false),
- mIsEncrypted(false),
- mIsEncryptedObservedEarlier(false),
- mFormatChangePending(false),
- mTimeChangePending(false),
- mFrameRateTotal(kDefaultVideoFrameRateTotal),
- mPlaybackSpeed(1.0f),
- mNumVideoTemporalLayerTotal(1), // decode all layers
- mNumVideoTemporalLayerAllowed(1),
- mCurrentMaxVideoTemporalLayerId(0),
- mResumePending(false),
- mComponentName("decoder") {
- mVideoTemporalLayerAggregateFps[0] = mFrameRateTotal;
-}
-
-NuPlayer2::Decoder::~Decoder() {
- // Need to stop looper first since mCodec could be accessed on the mDecoderLooper.
- stopLooper();
- if (mCodec != NULL) {
- mCodec->release();
- }
- releaseAndResetMediaBuffers();
-}
-
-sp<AMessage> NuPlayer2::Decoder::getStats() const {
- mStats->setInt64("frames-total", mNumFramesTotal);
- mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
- mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
- mStats->setFloat("frame-rate-total", mFrameRateTotal);
-
- // i'm mutexed right now.
- // make our own copy, so we aren't victim to any later changes.
- sp<AMessage> copiedStats = mStats->dup();
- return copiedStats;
-}
-
-status_t NuPlayer2::Decoder::setVideoSurface(const sp<ANativeWindowWrapper> &nww) {
- if (nww == NULL || nww->getANativeWindow() == NULL
- || ADebug::isExperimentEnabled("legacy-setsurface")) {
- return BAD_VALUE;
- }
-
- sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
-
- msg->setObject("surface", nww);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-void NuPlayer2::Decoder::onMessageReceived(const sp<AMessage> &msg) {
- ALOGV("[%s] onMessage: %s", mComponentName.c_str(), msg->debugString().c_str());
-
- switch (msg->what()) {
- case kWhatCodecNotify:
- {
- int32_t cbID;
- CHECK(msg->findInt32("callbackID", &cbID));
-
- ALOGV("[%s] kWhatCodecNotify: cbID = %d, paused = %d",
- mIsAudio ? "audio" : "video", cbID, mPaused);
-
- if (mPaused) {
- break;
- }
-
- switch (cbID) {
- case AMediaCodecWrapper::CB_INPUT_AVAILABLE:
- {
- int32_t index;
- CHECK(msg->findInt32("index", &index));
-
- handleAnInputBuffer(index);
- break;
- }
-
- case AMediaCodecWrapper::CB_OUTPUT_AVAILABLE:
- {
- int32_t index;
- size_t offset;
- size_t size;
- int64_t timeUs;
- int32_t flags;
-
- CHECK(msg->findInt32("index", &index));
- CHECK(msg->findSize("offset", &offset));
- CHECK(msg->findSize("size", &size));
- CHECK(msg->findInt64("timeUs", &timeUs));
- CHECK(msg->findInt32("flags", &flags));
-
- handleAnOutputBuffer(index, offset, size, timeUs, flags);
- break;
- }
-
- case AMediaCodecWrapper::CB_OUTPUT_FORMAT_CHANGED:
- {
- sp<AMessage> format;
- CHECK(msg->findMessage("format", &format));
-
- handleOutputFormatChange(format);
- break;
- }
-
- case AMediaCodecWrapper::CB_ERROR:
- {
- status_t err;
- CHECK(msg->findInt32("err", &err));
- ALOGE("Decoder (%s) reported error : 0x%x",
- mIsAudio ? "audio" : "video", err);
-
- handleError(err);
- break;
- }
-
- default:
- {
- TRESPASS();
- break;
- }
- }
-
- break;
- }
-
- case kWhatRenderBuffer:
- {
- if (!isStaleReply(msg)) {
- onRenderBuffer(msg);
- }
- break;
- }
-
- case kWhatAudioOutputFormatChanged:
- {
- if (!isStaleReply(msg)) {
- status_t err;
- if (msg->findInt32("err", &err) && err != OK) {
- ALOGE("Renderer reported 0x%x when changing audio output format", err);
- handleError(err);
- }
- }
- break;
- }
-
- case kWhatSetVideoSurface:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- sp<RefBase> obj;
- CHECK(msg->findObject("surface", &obj));
- sp<ANativeWindowWrapper> nww =
- static_cast<ANativeWindowWrapper *>(obj.get()); // non-null
- if (nww == NULL || nww->getANativeWindow() == NULL) {
- break;
- }
- int32_t err = INVALID_OPERATION;
- // NOTE: in practice mNativeWindow is always non-null,
- // but checking here for completeness
- if (mCodec != NULL
- && mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
- // TODO: once AwesomePlayer is removed, remove this automatic connecting
- // to the surface by MediaPlayerService.
- //
- // at this point MediaPlayer2Manager::client has already connected to the
- // surface, which MediaCodec does not expect
- err = native_window_api_disconnect(nww->getANativeWindow(),
- NATIVE_WINDOW_API_MEDIA);
- if (err == OK) {
- err = mCodec->setOutputSurface(nww);
- ALOGI_IF(err, "codec setOutputSurface returned: %d", err);
- if (err == OK) {
- // reconnect to the old surface as MPS::Client will expect to
- // be able to disconnect from it.
- (void)native_window_api_connect(mNativeWindow->getANativeWindow(),
- NATIVE_WINDOW_API_MEDIA);
-
- mNativeWindow = nww;
- }
- }
- if (err != OK) {
- // reconnect to the new surface on error as MPS::Client will expect to
- // be able to disconnect from it.
- (void)native_window_api_connect(nww->getANativeWindow(),
- NATIVE_WINDOW_API_MEDIA);
- }
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatDrmReleaseCrypto:
- {
- ALOGV("kWhatDrmReleaseCrypto");
- onReleaseCrypto(msg);
- break;
- }
-
- default:
- DecoderBase::onMessageReceived(msg);
- break;
- }
-}
-
-void NuPlayer2::Decoder::onConfigure(const sp<AMessage> &format) {
- ALOGV("[%s] onConfigure (format=%s)", mComponentName.c_str(), format->debugString().c_str());
- CHECK(mCodec == NULL);
-
- mFormatChangePending = false;
- mTimeChangePending = false;
-
- ++mBufferGeneration;
-
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
- mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());
-
- mComponentName = mime;
- mComponentName.append(" decoder");
- ALOGV("[%s] onConfigure (nww=%p)", mComponentName.c_str(),
- (mNativeWindow == NULL ? NULL : mNativeWindow->getANativeWindow()));
-
- mCodec = AMediaCodecWrapper::CreateDecoderByType(mime);
- int32_t secure = 0;
- if (format->findInt32("secure", &secure) && secure != 0) {
- if (mCodec != NULL) {
- if (mCodec->getName(&mComponentName) == OK) {
- mComponentName.append(".secure");
- mCodec->release();
- ALOGI("[%s] creating", mComponentName.c_str());
- mCodec = AMediaCodecWrapper::CreateCodecByName(mComponentName);
- } else {
- mCodec = NULL;
- }
- }
- }
- if (mCodec == NULL) {
- ALOGE("Failed to create %s%s decoder",
- (secure ? "secure " : ""), mime.c_str());
- handleError(NO_INIT);
- return;
- }
- mIsSecure = secure;
-
- mCodec->getName(&mComponentName);
-
- status_t err;
- if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
- // disconnect from surface as MediaCodec will reconnect
- err = native_window_api_disconnect(mNativeWindow->getANativeWindow(),
- NATIVE_WINDOW_API_MEDIA);
- // We treat this as a warning, as this is a preparatory step.
- // Codec will try to connect to the surface, which is where
- // any error signaling will occur.
- ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
- }
-
- // Modular DRM
- sp<RefBase> objCrypto;
- format->findObject("crypto", &objCrypto);
- sp<AMediaCryptoWrapper> crypto = static_cast<AMediaCryptoWrapper *>(objCrypto.get());
- // non-encrypted source won't have a crypto
- mIsEncrypted = (crypto != NULL);
- // configure is called once; still using OR in case the behavior changes.
- mIsEncryptedObservedEarlier = mIsEncryptedObservedEarlier || mIsEncrypted;
- ALOGV("onConfigure mCrypto: %p, mIsSecure: %d", crypto.get(), mIsSecure);
-
- err = mCodec->configure(
- AMediaFormatWrapper::Create(format),
- mNativeWindow,
- crypto,
- 0 /* flags */);
-
- if (err != OK) {
- ALOGE("Failed to configure [%s] decoder (err=%d)", mComponentName.c_str(), err);
- mCodec->release();
- mCodec.clear();
- handleError(err);
- return;
- }
- rememberCodecSpecificData(format);
-
- // the following should work in configured state
- sp<AMediaFormatWrapper> outputFormat = mCodec->getOutputFormat();
- if (outputFormat == NULL) {
- handleError(INVALID_OPERATION);
- return;
- }
- mInputFormat = mCodec->getInputFormat();
- if (mInputFormat == NULL) {
- handleError(INVALID_OPERATION);
- return;
- }
-
- mStats->setString("mime", mime.c_str());
- mStats->setString("component-name", mComponentName.c_str());
-
- if (!mIsAudio) {
- int32_t width, height;
- if (outputFormat->getInt32("width", &width)
- && outputFormat->getInt32("height", &height)) {
- mStats->setInt32("width", width);
- mStats->setInt32("height", height);
- }
- }
-
- sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
- mCodec->setCallback(reply);
-
- err = mCodec->start();
- if (err != OK) {
- ALOGE("Failed to start [%s] decoder (err=%d)", mComponentName.c_str(), err);
- mCodec->release();
- mCodec.clear();
- handleError(err);
- return;
- }
-
- releaseAndResetMediaBuffers();
-
- mPaused = false;
- mResumePending = false;
-}
-
-void NuPlayer2::Decoder::onSetParameters(const sp<AMessage> ¶ms) {
- bool needAdjustLayers = false;
- float frameRateTotal;
- if (params->findFloat("frame-rate-total", &frameRateTotal)
- && mFrameRateTotal != frameRateTotal) {
- needAdjustLayers = true;
- mFrameRateTotal = frameRateTotal;
- }
-
- int32_t numVideoTemporalLayerTotal;
- if (params->findInt32("temporal-layer-count", &numVideoTemporalLayerTotal)
- && numVideoTemporalLayerTotal >= 0
- && numVideoTemporalLayerTotal <= kMaxNumVideoTemporalLayers
- && mNumVideoTemporalLayerTotal != numVideoTemporalLayerTotal) {
- needAdjustLayers = true;
- mNumVideoTemporalLayerTotal = std::max(numVideoTemporalLayerTotal, 1);
- }
-
- if (needAdjustLayers && mNumVideoTemporalLayerTotal > 1) {
- // TODO: For now, layer fps is calculated for some specific architectures.
- // But it really should be extracted from the stream.
- mVideoTemporalLayerAggregateFps[0] =
- mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - 1));
- for (int32_t i = 1; i < mNumVideoTemporalLayerTotal; ++i) {
- mVideoTemporalLayerAggregateFps[i] =
- mFrameRateTotal / (float)(1LL << (mNumVideoTemporalLayerTotal - i))
- + mVideoTemporalLayerAggregateFps[i - 1];
- }
- }
-
- float playbackSpeed;
- if (params->findFloat("playback-speed", &playbackSpeed)
- && mPlaybackSpeed != playbackSpeed) {
- needAdjustLayers = true;
- mPlaybackSpeed = playbackSpeed;
- }
-
- if (needAdjustLayers) {
- float decodeFrameRate = mFrameRateTotal;
- // enable temporal layering optimization only if we know the layering depth
- if (mNumVideoTemporalLayerTotal > 1) {
- int32_t layerId;
- for (layerId = 0; layerId < mNumVideoTemporalLayerTotal - 1; ++layerId) {
- if (mVideoTemporalLayerAggregateFps[layerId] * mPlaybackSpeed
- >= kDisplayRefreshingRate * 0.9) {
- break;
- }
- }
- mNumVideoTemporalLayerAllowed = layerId + 1;
- decodeFrameRate = mVideoTemporalLayerAggregateFps[layerId];
- }
- ALOGV("onSetParameters: allowed layers=%d, decodeFps=%g",
- mNumVideoTemporalLayerAllowed, decodeFrameRate);
-
- if (mCodec == NULL) {
- ALOGW("onSetParameters called before codec is created.");
- return;
- }
-
- sp<AMediaFormatWrapper> codecParams = new AMediaFormatWrapper();
- codecParams->setFloat("operating-rate", decodeFrameRate * mPlaybackSpeed);
- mCodec->setParameters(codecParams);
- }
-}
-
-void NuPlayer2::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
- mRenderer = renderer;
-}
-
-void NuPlayer2::Decoder::onResume(bool notifyComplete) {
- mPaused = false;
-
- if (notifyComplete) {
- mResumePending = true;
- }
-
- if (mCodec == NULL) {
- ALOGE("[%s] onResume without a valid codec", mComponentName.c_str());
- handleError(NO_INIT);
- return;
- }
- mCodec->start();
-}
-
-void NuPlayer2::Decoder::doFlush(bool notifyComplete) {
- if (mCCDecoder != NULL) {
- mCCDecoder->flush();
- }
-
- if (mRenderer != NULL) {
- mRenderer->flush(mIsAudio, notifyComplete);
- mRenderer->signalTimeDiscontinuity();
- }
-
- status_t err = OK;
- if (mCodec != NULL) {
- err = mCodec->flush();
- mCSDsToSubmit = mCSDsForCurrentFormat; // copy operator
- ++mBufferGeneration;
- }
-
- if (err != OK) {
- ALOGE("failed to flush [%s] (err=%d)", mComponentName.c_str(), err);
- handleError(err);
- // finish with posting kWhatFlushCompleted.
- // we attempt to release the buffers even if flush fails.
- }
- releaseAndResetMediaBuffers();
- mPaused = true;
-}
-
-
-void NuPlayer2::Decoder::onFlush() {
- doFlush(true);
-
- if (isDiscontinuityPending()) {
- // This could happen if the client starts seeking/shutdown
- // after we queued an EOS for discontinuities.
- // We can consider discontinuity handled.
- finishHandleDiscontinuity(false /* flushOnTimeChange */);
- }
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatFlushCompleted);
- notify->post();
-}
-
-void NuPlayer2::Decoder::onShutdown(bool notifyComplete) {
- status_t err = OK;
-
- // if there is a pending resume request, notify complete now
- notifyResumeCompleteIfNecessary();
-
- if (mCodec != NULL) {
- err = mCodec->release();
- mCodec = NULL;
- ++mBufferGeneration;
-
- if (mNativeWindow != NULL && mNativeWindow->getANativeWindow() != NULL) {
- // reconnect to surface as MediaCodec disconnected from it
- status_t error = native_window_api_connect(mNativeWindow->getANativeWindow(),
- NATIVE_WINDOW_API_MEDIA);
- ALOGW_IF(error != NO_ERROR,
- "[%s] failed to connect to native window, error=%d",
- mComponentName.c_str(), error);
- }
- mComponentName = "decoder";
- }
-
- releaseAndResetMediaBuffers();
-
- if (err != OK) {
- ALOGE("failed to release [%s] (err=%d)", mComponentName.c_str(), err);
- handleError(err);
- // finish with posting kWhatShutdownCompleted.
- }
-
- if (notifyComplete) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatShutdownCompleted);
- notify->post();
- mPaused = true;
- }
-}
-
-/*
- * returns true if we should request more data
- */
-bool NuPlayer2::Decoder::doRequestBuffers() {
- if (isDiscontinuityPending()) {
- return false;
- }
- status_t err = OK;
- while (err == OK && !mDequeuedInputBuffers.empty()) {
- size_t bufferIx = *mDequeuedInputBuffers.begin();
- sp<AMessage> msg = new AMessage();
- msg->setSize("buffer-ix", bufferIx);
- err = fetchInputData(msg);
- if (err != OK && err != ERROR_END_OF_STREAM) {
- // if EOS, need to queue EOS buffer
- break;
- }
- mDequeuedInputBuffers.erase(mDequeuedInputBuffers.begin());
-
- if (!mPendingInputMessages.empty()
- || !onInputBufferFetched(msg)) {
- mPendingInputMessages.push_back(msg);
- }
- }
-
- return err == -EWOULDBLOCK
- && mSource->feedMoreTSData() == OK;
-}
-
-void NuPlayer2::Decoder::handleError(int32_t err)
-{
- // We cannot immediately release the codec due to buffers still outstanding
- // in the renderer. We signal to the player the error so it can shutdown/release the
- // decoder after flushing and increment the generation to discard unnecessary messages.
-
- ++mBufferGeneration;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatError);
- notify->setInt32("err", err);
- notify->post();
-}
-
-status_t NuPlayer2::Decoder::releaseCrypto()
-{
- ALOGV("releaseCrypto");
-
- sp<AMessage> msg = new AMessage(kWhatDrmReleaseCrypto, this);
-
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
- if (status == OK && response != NULL) {
- CHECK(response->findInt32("status", &status));
- ALOGV("releaseCrypto ret: %d ", status);
- } else {
- ALOGE("releaseCrypto err: %d", status);
- }
-
- return status;
-}
-
-void NuPlayer2::Decoder::onReleaseCrypto(const sp<AMessage>& msg)
-{
- status_t status = INVALID_OPERATION;
- if (mCodec != NULL) {
- status = mCodec->releaseCrypto();
- } else {
- // returning OK if the codec has been already released
- status = OK;
- ALOGE("onReleaseCrypto No mCodec. err: %d", status);
- }
-
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- // Clearing the state as it's tied to crypto. mIsEncryptedObservedEarlier is sticky though
- // and lasts for the lifetime of this codec. See its use in fetchInputData.
- mIsEncrypted = false;
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-bool NuPlayer2::Decoder::handleAnInputBuffer(size_t index) {
- if (isDiscontinuityPending()) {
- return false;
- }
-
- if (mCodec == NULL) {
- ALOGE("[%s] handleAnInputBuffer without a valid codec", mComponentName.c_str());
- handleError(NO_INIT);
- return false;
- }
-
- size_t bufferSize = 0;
- uint8_t *bufferBase = mCodec->getInputBuffer(index, &bufferSize);
-
- if (bufferBase == NULL) {
- ALOGE("[%s] handleAnInputBuffer, failed to get input buffer", mComponentName.c_str());
- handleError(UNKNOWN_ERROR);
- return false;
- }
-
- sp<MediaCodecBuffer> buffer =
- new MediaCodecBuffer(NULL /* format */, new ABuffer(bufferBase, bufferSize));
-
- if (index >= mInputBuffers.size()) {
- for (size_t i = mInputBuffers.size(); i <= index; ++i) {
- mInputBuffers.add();
- mMediaBuffers.add();
- mInputBufferIsDequeued.add();
- mMediaBuffers.editItemAt(i) = NULL;
- mInputBufferIsDequeued.editItemAt(i) = false;
- }
- }
- mInputBuffers.editItemAt(index) = buffer;
-
- //CHECK_LT(bufferIx, mInputBuffers.size());
-
- if (mMediaBuffers[index] != NULL) {
- mMediaBuffers[index]->release();
- mMediaBuffers.editItemAt(index) = NULL;
- }
- mInputBufferIsDequeued.editItemAt(index) = true;
-
- if (!mCSDsToSubmit.isEmpty()) {
- sp<AMessage> msg = new AMessage();
- msg->setSize("buffer-ix", index);
-
- sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
- ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
- msg->setBuffer("buffer", buffer);
- mCSDsToSubmit.removeAt(0);
- if (!onInputBufferFetched(msg)) {
- handleError(UNKNOWN_ERROR);
- return false;
- }
- return true;
- }
-
- while (!mPendingInputMessages.empty()) {
- sp<AMessage> msg = *mPendingInputMessages.begin();
- if (!onInputBufferFetched(msg)) {
- break;
- }
- mPendingInputMessages.erase(mPendingInputMessages.begin());
- }
-
- if (!mInputBufferIsDequeued.editItemAt(index)) {
- return true;
- }
-
- mDequeuedInputBuffers.push_back(index);
-
- onRequestInputBuffers();
- return true;
-}
-
-bool NuPlayer2::Decoder::handleAnOutputBuffer(
- size_t index,
- size_t offset,
- size_t size,
- int64_t timeUs,
- int32_t flags) {
- if (mCodec == NULL) {
- ALOGE("[%s] handleAnOutputBuffer without a valid codec", mComponentName.c_str());
- handleError(NO_INIT);
- return false;
- }
-
-// CHECK_LT(bufferIx, mOutputBuffers.size());
-
- size_t bufferSize = 0;
- uint8_t *bufferBase = mCodec->getOutputBuffer(index, &bufferSize);
-
- if (bufferBase == NULL) {
- ALOGE("[%s] handleAnOutputBuffer, failed to get output buffer", mComponentName.c_str());
- handleError(UNKNOWN_ERROR);
- return false;
- }
-
- sp<MediaCodecBuffer> buffer =
- new MediaCodecBuffer(NULL /* format */, new ABuffer(bufferBase, bufferSize));
-
- if (index >= mOutputBuffers.size()) {
- for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
- mOutputBuffers.add();
- }
- }
-
- mOutputBuffers.editItemAt(index) = buffer;
-
- buffer->setRange(offset, size);
- buffer->meta()->clear();
- buffer->meta()->setInt64("timeUs", timeUs);
-
- bool eos = flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
- // we do not expect CODECCONFIG or SYNCFRAME for decoder
-
- sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
- reply->setSize("buffer-ix", index);
- reply->setInt32("generation", mBufferGeneration);
-
- if (eos) {
- ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
-
- buffer->meta()->setInt32("eos", true);
- reply->setInt32("eos", true);
- }
-
- mNumFramesTotal += !mIsAudio;
-
- if (mSkipRenderingUntilMediaTimeUs >= 0) {
- if (timeUs < mSkipRenderingUntilMediaTimeUs) {
- ALOGV("[%s] dropping buffer at time %lld as requested.",
- mComponentName.c_str(), (long long)timeUs);
-
- reply->post();
- if (eos) {
- notifyResumeCompleteIfNecessary();
- if (mRenderer != NULL && !isDiscontinuityPending()) {
- mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
- }
- }
- return true;
- }
-
- mSkipRenderingUntilMediaTimeUs = -1;
- }
-
- // wait until 1st frame comes out to signal resume complete
- notifyResumeCompleteIfNecessary();
-
- if (mRenderer != NULL) {
- // send the buffer to renderer.
- mRenderer->queueBuffer(mIsAudio, buffer, reply);
- if (eos && !isDiscontinuityPending()) {
- mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
- }
- }
-
- return true;
-}
-
-void NuPlayer2::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
- if (!mIsAudio) {
- int32_t width, height;
- if (format->findInt32("width", &width)
- && format->findInt32("height", &height)) {
- mStats->setInt32("width", width);
- mStats->setInt32("height", height);
- }
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatVideoSizeChanged);
- notify->setMessage("format", format);
- notify->post();
- } else if (mRenderer != NULL) {
- uint32_t flags;
- int64_t durationUs;
- bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
- if (getAudioDeepBufferSetting() // override regardless of source duration
- || (mSource->getDuration(&durationUs) == OK
- && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
- flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- } else {
- flags = AUDIO_OUTPUT_FLAG_NONE;
- }
-
- sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
- reply->setInt32("generation", mBufferGeneration);
- mRenderer->changeAudioFormat(
- format, false /* offloadOnly */, hasVideo,
- flags, mSource->isStreaming(), reply);
- }
-}
-
-void NuPlayer2::Decoder::releaseAndResetMediaBuffers() {
- for (size_t i = 0; i < mMediaBuffers.size(); i++) {
- if (mMediaBuffers[i] != NULL) {
- mMediaBuffers[i]->release();
- mMediaBuffers.editItemAt(i) = NULL;
- }
- }
- mMediaBuffers.resize(mInputBuffers.size());
- for (size_t i = 0; i < mMediaBuffers.size(); i++) {
- mMediaBuffers.editItemAt(i) = NULL;
- }
- mInputBufferIsDequeued.clear();
- mInputBufferIsDequeued.resize(mInputBuffers.size());
- for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) {
- mInputBufferIsDequeued.editItemAt(i) = false;
- }
-
- mPendingInputMessages.clear();
- mDequeuedInputBuffers.clear();
- mSkipRenderingUntilMediaTimeUs = -1;
-}
-
-bool NuPlayer2::Decoder::isStaleReply(const sp<AMessage> &msg) {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- return generation != mBufferGeneration;
-}
-
-status_t NuPlayer2::Decoder::fetchInputData(sp<AMessage> &reply) {
- sp<ABuffer> accessUnit;
- bool dropAccessUnit = true;
- do {
- status_t err = mSource->dequeueAccessUnit(mIsAudio, &accessUnit);
-
- if (err == -EWOULDBLOCK) {
- return err;
- } else if (err != OK) {
- if (err == INFO_DISCONTINUITY) {
- int32_t type;
- CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
-
- bool formatChange =
- (mIsAudio &&
- (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT))
- || (!mIsAudio &&
- (type & ATSParser::DISCONTINUITY_VIDEO_FORMAT));
-
- bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0;
-
- ALOGI("%s discontinuity (format=%d, time=%d)",
- mIsAudio ? "audio" : "video", formatChange, timeChange);
-
- bool seamlessFormatChange = false;
- sp<AMessage> newFormat = mSource->getFormat(mIsAudio);
- if (formatChange) {
- seamlessFormatChange =
- supportsSeamlessFormatChange(newFormat);
- // treat seamless format change separately
- formatChange = !seamlessFormatChange;
- }
-
- // For format or time change, return EOS to queue EOS input,
- // then wait for EOS on output.
- if (formatChange /* not seamless */) {
- mFormatChangePending = true;
- err = ERROR_END_OF_STREAM;
- } else if (timeChange) {
- rememberCodecSpecificData(newFormat);
- mTimeChangePending = true;
- err = ERROR_END_OF_STREAM;
- } else if (seamlessFormatChange) {
- // reuse existing decoder and don't flush
- rememberCodecSpecificData(newFormat);
- continue;
- } else {
- // This stream is unaffected by the discontinuity
- return -EWOULDBLOCK;
- }
- }
-
- // reply should only be returned without a buffer set
- // when there is an error (including EOS)
- CHECK(err != OK);
-
- reply->setInt32("err", err);
- return ERROR_END_OF_STREAM;
- }
-
- dropAccessUnit = false;
- if (!mIsAudio && !mIsEncrypted) {
- // Extra safeguard if higher-level behavior changes. Otherwise, not required now.
- // Preventing the buffer from being processed (and sent to codec) if this is a later
- // round of playback but this time without prepareDrm. Or if there is a race between
- // stop (which is not blocking) and releaseDrm allowing buffers being processed after
- // Crypto has been released (GenericSource currently prevents this race though).
- // Particularly doing this check before IsAVCReferenceFrame call to prevent parsing
- // of encrypted data.
- if (mIsEncryptedObservedEarlier) {
- ALOGE("fetchInputData: mismatched mIsEncrypted/mIsEncryptedObservedEarlier (0/1)");
-
- return INVALID_OPERATION;
- }
-
- int32_t layerId = 0;
- bool haveLayerId = accessUnit->meta()->findInt32("temporal-layer-id", &layerId);
- if (mRenderer->getVideoLateByUs() > 100000LL
- && mIsVideoAVC
- && !IsAVCReferenceFrame(accessUnit)) {
- dropAccessUnit = true;
- } else if (haveLayerId && mNumVideoTemporalLayerTotal > 1) {
- // Add only one layer each time.
- if (layerId > mCurrentMaxVideoTemporalLayerId + 1
- || layerId >= mNumVideoTemporalLayerAllowed) {
- dropAccessUnit = true;
- ALOGV("dropping layer(%d), speed=%g, allowed layer count=%d, max layerId=%d",
- layerId, mPlaybackSpeed, mNumVideoTemporalLayerAllowed,
- mCurrentMaxVideoTemporalLayerId);
- } else if (layerId > mCurrentMaxVideoTemporalLayerId) {
- mCurrentMaxVideoTemporalLayerId = layerId;
- } else if (layerId == 0 && mNumVideoTemporalLayerTotal > 1
- && IsIDR(accessUnit->data(), accessUnit->size())) {
- mCurrentMaxVideoTemporalLayerId = mNumVideoTemporalLayerTotal - 1;
- }
- }
- if (dropAccessUnit) {
- if (layerId <= mCurrentMaxVideoTemporalLayerId && layerId > 0) {
- mCurrentMaxVideoTemporalLayerId = layerId - 1;
- }
- ++mNumInputFramesDropped;
- }
- }
- } while (dropAccessUnit);
-
- // ALOGV("returned a valid buffer of %s data", mIsAudio ? "mIsAudio" : "video");
-#if 0
- int64_t mediaTimeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("[%s] feeding input buffer at media time %.3f",
- mIsAudio ? "audio" : "video",
- mediaTimeUs / 1E6);
-#endif
-
- if (mCCDecoder != NULL) {
- mCCDecoder->decode(accessUnit);
- }
-
- reply->setBuffer("buffer", accessUnit);
-
- return OK;
-}
-
-bool NuPlayer2::Decoder::onInputBufferFetched(const sp<AMessage> &msg) {
- if (mCodec == NULL) {
- ALOGE("[%s] onInputBufferFetched without a valid codec", mComponentName.c_str());
- handleError(NO_INIT);
- return false;
- }
-
- size_t bufferIx;
- CHECK(msg->findSize("buffer-ix", &bufferIx));
- CHECK_LT(bufferIx, mInputBuffers.size());
- sp<MediaCodecBuffer> codecBuffer = mInputBuffers[bufferIx];
-
- sp<ABuffer> buffer;
- bool hasBuffer = msg->findBuffer("buffer", &buffer);
- bool needsCopy = true;
-
- if (buffer == NULL /* includes !hasBuffer */) {
- int32_t streamErr = ERROR_END_OF_STREAM;
- CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
-
- CHECK(streamErr != OK);
-
- // attempt to queue EOS
- status_t err = mCodec->queueInputBuffer(
- bufferIx,
- 0,
- 0,
- 0,
- AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM);
- if (err == OK) {
- mInputBufferIsDequeued.editItemAt(bufferIx) = false;
- } else if (streamErr == ERROR_END_OF_STREAM) {
- streamErr = err;
- // err will not be ERROR_END_OF_STREAM
- }
-
- if (streamErr != ERROR_END_OF_STREAM) {
- ALOGE("Stream error for [%s] (err=%d), EOS %s queued",
- mComponentName.c_str(),
- streamErr,
- err == OK ? "successfully" : "unsuccessfully");
- handleError(streamErr);
- }
- } else {
- sp<AMessage> extra;
- if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
- int64_t resumeAtMediaTimeUs;
- if (extra->findInt64(
- "resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
- ALOGI("[%s] suppressing rendering until %lld us",
- mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
- mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
- }
- }
-
- int64_t timeUs = 0;
- uint32_t flags = 0;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-
- int32_t eos, csd;
- // we do not expect SYNCFRAME for decoder
- if (buffer->meta()->findInt32("eos", &eos) && eos) {
- flags |= AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
- } else if (buffer->meta()->findInt32("csd", &csd) && csd) {
- flags |= AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
- }
-
- // Modular DRM
- MediaBufferBase *mediaBuf = NULL;
- sp<AMediaCodecCryptoInfoWrapper> cryptInfo;
-
- // copy into codec buffer
- if (needsCopy) {
- if (buffer->size() > codecBuffer->capacity()) {
- handleError(ERROR_BUFFER_TOO_SMALL);
- mDequeuedInputBuffers.push_back(bufferIx);
- return false;
- }
-
- if (buffer->data() != NULL) {
- codecBuffer->setRange(0, buffer->size());
- memcpy(codecBuffer->data(), buffer->data(), buffer->size());
- } else { // No buffer->data()
- //Modular DRM
- sp<RefBase> holder;
- if (buffer->meta()->findObject("mediaBufferHolder", &holder)) {
- mediaBuf = (holder != nullptr) ?
- static_cast<MediaBufferHolder*>(holder.get())->mediaBuffer() : nullptr;
- }
- if (mediaBuf != NULL) {
- if (mediaBuf->size() > codecBuffer->capacity()) {
- handleError(ERROR_BUFFER_TOO_SMALL);
- mDequeuedInputBuffers.push_back(bufferIx);
- return false;
- }
-
- codecBuffer->setRange(0, mediaBuf->size());
- memcpy(codecBuffer->data(), mediaBuf->data(), mediaBuf->size());
-
- MetaDataBase &meta_data = mediaBuf->meta_data();
- cryptInfo = AMediaCodecCryptoInfoWrapper::Create(meta_data);
- } else { // No mediaBuf
- ALOGE("onInputBufferFetched: buffer->data()/mediaBuf are NULL for %p",
- buffer.get());
- handleError(UNKNOWN_ERROR);
- return false;
- }
- } // buffer->data()
- } // needsCopy
-
- sp<RefBase> cryptInfoObj;
- if (buffer->meta()->findObject("cryptInfo", &cryptInfoObj)) {
- cryptInfo = static_cast<AMediaCodecCryptoInfoWrapper *>(cryptInfoObj.get());
- }
-
- status_t err;
- if (cryptInfo != NULL) {
- err = mCodec->queueSecureInputBuffer(
- bufferIx,
- codecBuffer->offset(),
- cryptInfo,
- timeUs,
- flags);
- // synchronous call so done with cryptInfo here
- } else {
- err = mCodec->queueInputBuffer(
- bufferIx,
- codecBuffer->offset(),
- codecBuffer->size(),
- timeUs,
- flags);
- } // no cryptInfo
-
- if (err != OK) {
- ALOGE("onInputBufferFetched: queue%sInputBuffer failed for [%s] (err=%d)",
- (cryptInfo != NULL ? "Secure" : ""),
- mComponentName.c_str(), err);
- handleError(err);
- } else {
- mInputBufferIsDequeued.editItemAt(bufferIx) = false;
- }
-
- } // buffer != NULL
- return true;
-}
-
-void NuPlayer2::Decoder::onRenderBuffer(const sp<AMessage> &msg) {
- status_t err;
- int32_t render;
- size_t bufferIx;
- int32_t eos;
- CHECK(msg->findSize("buffer-ix", &bufferIx));
-
- if (!mIsAudio) {
- int64_t timeUs;
- sp<MediaCodecBuffer> buffer = mOutputBuffers[bufferIx];
- buffer->meta()->findInt64("timeUs", &timeUs);
-
- if (mCCDecoder != NULL && mCCDecoder->isSelected()) {
- mCCDecoder->display(timeUs);
- }
- }
-
- if (mCodec == NULL) {
- err = NO_INIT;
- } else if (msg->findInt32("render", &render) && render) {
- int64_t timestampNs;
- CHECK(msg->findInt64("timestampNs", ×tampNs));
- err = mCodec->releaseOutputBufferAtTime(bufferIx, timestampNs);
- } else {
- mNumOutputFramesDropped += !mIsAudio;
- err = mCodec->releaseOutputBuffer(bufferIx, false /* render */);
- }
- if (err != OK) {
- ALOGE("failed to release output buffer for [%s] (err=%d)",
- mComponentName.c_str(), err);
- handleError(err);
- }
- if (msg->findInt32("eos", &eos) && eos
- && isDiscontinuityPending()) {
- finishHandleDiscontinuity(true /* flushOnTimeChange */);
- }
-}
-
-bool NuPlayer2::Decoder::isDiscontinuityPending() const {
- return mFormatChangePending || mTimeChangePending;
-}
-
-void NuPlayer2::Decoder::finishHandleDiscontinuity(bool flushOnTimeChange) {
- ALOGV("finishHandleDiscontinuity: format %d, time %d, flush %d",
- mFormatChangePending, mTimeChangePending, flushOnTimeChange);
-
- // If we have format change, pause and wait to be killed;
- // If we have time change only, flush and restart fetching.
-
- if (mFormatChangePending) {
- mPaused = true;
- } else if (mTimeChangePending) {
- if (flushOnTimeChange) {
- doFlush(false /* notifyComplete */);
- signalResume(false /* notifyComplete */);
- }
- }
-
- // Notify NuPlayer2 to either shutdown decoder, or rescan sources
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatInputDiscontinuity);
- msg->setInt32("formatChange", mFormatChangePending);
- msg->post();
-
- mFormatChangePending = false;
- mTimeChangePending = false;
-}
-
-bool NuPlayer2::Decoder::supportsSeamlessAudioFormatChange(
- const sp<AMessage> &targetFormat) const {
- if (targetFormat == NULL) {
- return true;
- }
-
- AString mime;
- if (!targetFormat->findString("mime", &mime)) {
- return false;
- }
-
- if (!strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_AAC)) {
- // field-by-field comparison
- const char * keys[] = { "channel-count", "sample-rate", "is-adts" };
- for (unsigned int i = 0; i < sizeof(keys) / sizeof(keys[0]); i++) {
- int32_t oldVal, newVal;
- if (!mInputFormat->getInt32(keys[i], &oldVal) ||
- !targetFormat->findInt32(keys[i], &newVal) ||
- oldVal != newVal) {
- return false;
- }
- }
-
- sp<ABuffer> newBuf;
- uint8_t *oldBufData = NULL;
- size_t oldBufSize = 0;
- if (mInputFormat->getBuffer("csd-0", (void**)&oldBufData, &oldBufSize) &&
- targetFormat->findBuffer("csd-0", &newBuf)) {
- if (oldBufSize != newBuf->size()) {
- return false;
- }
- return !memcmp(oldBufData, newBuf->data(), oldBufSize);
- }
- }
- return false;
-}
-
-bool NuPlayer2::Decoder::supportsSeamlessFormatChange(const sp<AMessage> &targetFormat) const {
- if (mInputFormat == NULL) {
- return false;
- }
-
- if (targetFormat == NULL) {
- return true;
- }
-
- AString oldMime, newMime;
- if (!mInputFormat->getString("mime", &oldMime)
- || !targetFormat->findString("mime", &newMime)
- || !(oldMime == newMime)) {
- return false;
- }
-
- bool audio = !strncasecmp(oldMime.c_str(), "audio/", strlen("audio/"));
- bool seamless;
- if (audio) {
- seamless = supportsSeamlessAudioFormatChange(targetFormat);
- } else {
- int32_t isAdaptive;
- seamless = (mCodec != NULL &&
- mInputFormat->getInt32("adaptive-playback", &isAdaptive) &&
- isAdaptive);
- }
-
- ALOGV("%s seamless support for %s", seamless ? "yes" : "no", oldMime.c_str());
- return seamless;
-}
-
-void NuPlayer2::Decoder::rememberCodecSpecificData(const sp<AMessage> &format) {
- if (format == NULL) {
- return;
- }
- mCSDsForCurrentFormat.clear();
- for (int32_t i = 0; ; ++i) {
- AString tag = "csd-";
- tag.append(i);
- sp<ABuffer> buffer;
- if (!format->findBuffer(tag.c_str(), &buffer)) {
- break;
- }
- mCSDsForCurrentFormat.push(buffer);
- }
-}
-
-void NuPlayer2::Decoder::notifyResumeCompleteIfNecessary() {
- if (mResumePending) {
- mResumePending = false;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatResumeCompleted);
- notify->post();
- }
-}
-
-} // namespace android
-
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h
deleted file mode 100644
index fdfb10e..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Decoder.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_DECODER_H_
-#define NUPLAYER2_DECODER_H_
-
-#include "NuPlayer2.h"
-
-#include "NuPlayer2DecoderBase.h"
-
-namespace android {
-
-class MediaCodecBuffer;
-
-struct AMediaCodecWrapper;
-struct AMediaFormatWrapper;
-
-struct NuPlayer2::Decoder : public DecoderBase {
- Decoder(const sp<AMessage> ¬ify,
- const sp<Source> &source,
- pid_t pid,
- uid_t uid,
- const sp<Renderer> &renderer = NULL,
- const sp<ANativeWindowWrapper> &nww = NULL,
- const sp<CCDecoder> &ccDecoder = NULL);
-
- virtual sp<AMessage> getStats() const;
-
- // sets the output surface of video decoders.
- virtual status_t setVideoSurface(const sp<ANativeWindowWrapper> &nww);
-
- virtual status_t releaseCrypto();
-
-protected:
- virtual ~Decoder();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- virtual void onConfigure(const sp<AMessage> &format);
- virtual void onSetParameters(const sp<AMessage> ¶ms);
- virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onResume(bool notifyComplete);
- virtual void onFlush();
- virtual void onShutdown(bool notifyComplete);
- virtual bool doRequestBuffers();
-
-private:
- enum {
- kWhatCodecNotify = 'cdcN',
- kWhatRenderBuffer = 'rndr',
- kWhatSetVideoSurface = 'sSur',
- kWhatAudioOutputFormatChanged = 'aofc',
- kWhatDrmReleaseCrypto = 'rDrm',
- };
-
- enum {
- kMaxNumVideoTemporalLayers = 32,
- };
-
- sp<ANativeWindowWrapper> mNativeWindow;
-
- sp<Source> mSource;
- sp<Renderer> mRenderer;
- sp<CCDecoder> mCCDecoder;
-
- sp<AMediaFormatWrapper> mInputFormat;
- sp<AMediaCodecWrapper> mCodec;
-
- List<sp<AMessage> > mPendingInputMessages;
-
- Vector<sp<MediaCodecBuffer> > mInputBuffers;
- Vector<sp<MediaCodecBuffer> > mOutputBuffers;
- Vector<sp<ABuffer> > mCSDsForCurrentFormat;
- Vector<sp<ABuffer> > mCSDsToSubmit;
- Vector<bool> mInputBufferIsDequeued;
- Vector<MediaBuffer *> mMediaBuffers;
- Vector<size_t> mDequeuedInputBuffers;
-
- const pid_t mPid;
- const uid_t mUid;
- int64_t mSkipRenderingUntilMediaTimeUs;
- int64_t mNumFramesTotal;
- int64_t mNumInputFramesDropped;
- int64_t mNumOutputFramesDropped;
- int32_t mVideoWidth;
- int32_t mVideoHeight;
- bool mIsAudio;
- bool mIsVideoAVC;
- bool mIsSecure;
- bool mIsEncrypted;
- bool mIsEncryptedObservedEarlier;
- bool mFormatChangePending;
- bool mTimeChangePending;
- float mFrameRateTotal;
- float mPlaybackSpeed;
- int32_t mNumVideoTemporalLayerTotal;
- int32_t mNumVideoTemporalLayerAllowed;
- int32_t mCurrentMaxVideoTemporalLayerId;
- float mVideoTemporalLayerAggregateFps[kMaxNumVideoTemporalLayers];
-
- bool mResumePending;
- AString mComponentName;
-
- void handleError(int32_t err);
- bool handleAnInputBuffer(size_t index);
- bool handleAnOutputBuffer(
- size_t index,
- size_t offset,
- size_t size,
- int64_t timeUs,
- int32_t flags);
- void handleOutputFormatChange(const sp<AMessage> &format);
-
- void releaseAndResetMediaBuffers();
- bool isStaleReply(const sp<AMessage> &msg);
-
- void doFlush(bool notifyComplete);
- status_t fetchInputData(sp<AMessage> &reply);
- bool onInputBufferFetched(const sp<AMessage> &msg);
- void onRenderBuffer(const sp<AMessage> &msg);
-
- bool supportsSeamlessFormatChange(const sp<AMessage> &to) const;
- bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
- void rememberCodecSpecificData(const sp<AMessage> &format);
- bool isDiscontinuityPending() const;
- void finishHandleDiscontinuity(bool flushOnTimeChange);
-
- void notifyResumeCompleteIfNecessary();
-
- void onReleaseCrypto(const sp<AMessage>& msg);
-
- DISALLOW_EVIL_CONSTRUCTORS(Decoder);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_DECODER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
deleted file mode 100644
index 914f29f..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.cpp
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2DecoderBase"
-#include <utils/Log.h>
-#include <inttypes.h>
-
-#include "NuPlayer2DecoderBase.h"
-
-#include "NuPlayer2Renderer.h"
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-namespace android {
-
-NuPlayer2::DecoderBase::DecoderBase(const sp<AMessage> ¬ify)
- : mNotify(notify),
- mBufferGeneration(0),
- mPaused(false),
- mStats(new AMessage),
- mRequestInputBuffersPending(false) {
- // Every decoder has its own looper because MediaCodec operations
- // are blocking, but NuPlayer2 needs asynchronous operations.
- mDecoderLooper = new ALooper;
- mDecoderLooper->setName("NP2Decoder");
- mDecoderLooper->start(false, /* runOnCallingThread */
- true, /* canCallJava */
- ANDROID_PRIORITY_AUDIO);
-}
-
-NuPlayer2::DecoderBase::~DecoderBase() {
- stopLooper();
-}
-
-static
-status_t PostAndAwaitResponse(
- const sp<AMessage> &msg, sp<AMessage> *response) {
- status_t err = msg->postAndAwaitResponse(response);
-
- if (err != OK) {
- return err;
- }
-
- if (!(*response)->findInt32("err", &err)) {
- err = OK;
- }
-
- return err;
-}
-
-void NuPlayer2::DecoderBase::configure(const sp<AMessage> &format) {
- sp<AMessage> msg = new AMessage(kWhatConfigure, this);
- msg->setMessage("format", format);
- msg->post();
-}
-
-void NuPlayer2::DecoderBase::init() {
- mDecoderLooper->registerHandler(this);
-}
-
-void NuPlayer2::DecoderBase::stopLooper() {
- mDecoderLooper->unregisterHandler(id());
- mDecoderLooper->stop();
-}
-
-void NuPlayer2::DecoderBase::setParameters(const sp<AMessage> ¶ms) {
- sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
- msg->setMessage("params", params);
- msg->post();
-}
-
-void NuPlayer2::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
- sp<AMessage> msg = new AMessage(kWhatSetRenderer, this);
- msg->setObject("renderer", renderer);
- msg->post();
-}
-
-void NuPlayer2::DecoderBase::pause() {
- sp<AMessage> msg = new AMessage(kWhatPause, this);
-
- sp<AMessage> response;
- PostAndAwaitResponse(msg, &response);
-}
-
-void NuPlayer2::DecoderBase::signalFlush() {
- (new AMessage(kWhatFlush, this))->post();
-}
-
-void NuPlayer2::DecoderBase::signalResume(bool notifyComplete) {
- sp<AMessage> msg = new AMessage(kWhatResume, this);
- msg->setInt32("notifyComplete", notifyComplete);
- msg->post();
-}
-
-void NuPlayer2::DecoderBase::initiateShutdown() {
- (new AMessage(kWhatShutdown, this))->post();
-}
-
-void NuPlayer2::DecoderBase::onRequestInputBuffers() {
- if (mRequestInputBuffersPending) {
- return;
- }
-
- // doRequestBuffers() return true if we should request more data
- if (doRequestBuffers()) {
- mRequestInputBuffersPending = true;
-
- sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
- msg->post(10 * 1000LL);
- }
-}
-
-void NuPlayer2::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
-
- switch (msg->what()) {
- case kWhatConfigure:
- {
- sp<AMessage> format;
- CHECK(msg->findMessage("format", &format));
- onConfigure(format);
- break;
- }
-
- case kWhatSetParameters:
- {
- sp<AMessage> params;
- CHECK(msg->findMessage("params", ¶ms));
- onSetParameters(params);
- break;
- }
-
- case kWhatSetRenderer:
- {
- sp<RefBase> obj;
- CHECK(msg->findObject("renderer", &obj));
- onSetRenderer(static_cast<Renderer *>(obj.get()));
- break;
- }
-
- case kWhatPause:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- mPaused = true;
-
- (new AMessage)->postReply(replyID);
- break;
- }
-
- case kWhatRequestInputBuffers:
- {
- mRequestInputBuffersPending = false;
- onRequestInputBuffers();
- break;
- }
-
- case kWhatFlush:
- {
- onFlush();
- break;
- }
-
- case kWhatResume:
- {
- int32_t notifyComplete;
- CHECK(msg->findInt32("notifyComplete", ¬ifyComplete));
-
- onResume(notifyComplete);
- break;
- }
-
- case kWhatShutdown:
- {
- onShutdown(true);
- break;
- }
-
- default:
- TRESPASS();
- break;
- }
-}
-
-void NuPlayer2::DecoderBase::handleError(int32_t err)
-{
- // We cannot immediately release the codec due to buffers still outstanding
- // in the renderer. We signal to the player the error so it can shutdown/release the
- // decoder after flushing and increment the generation to discard unnecessary messages.
-
- ++mBufferGeneration;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatError);
- notify->setInt32("err", err);
- notify->post();
-}
-
-} // namespace android
-
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h
deleted file mode 100644
index 1e57f0d..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderBase.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_DECODER_BASE_H_
-
-#define NUPLAYER2_DECODER_BASE_H_
-
-#include "NuPlayer2.h"
-
-#include <media/stagefright/foundation/AHandler.h>
-
-namespace android {
-
-struct ABuffer;
-struct ANativeWindowWrapper;
-struct MediaCodec;
-class MediaBuffer;
-class MediaCodecBuffer;
-
-struct NuPlayer2::DecoderBase : public AHandler {
- explicit DecoderBase(const sp<AMessage> ¬ify);
-
- void configure(const sp<AMessage> &format);
- void init();
- void setParameters(const sp<AMessage> ¶ms);
-
- // Synchronous call to ensure decoder will not request or send out data.
- void pause();
-
- void setRenderer(const sp<Renderer> &renderer);
- virtual status_t setVideoSurface(const sp<ANativeWindowWrapper> &) { return INVALID_OPERATION; }
-
- void signalFlush();
- void signalResume(bool notifyComplete);
- void initiateShutdown();
-
- virtual sp<AMessage> getStats() const {
- return mStats;
- }
-
- virtual status_t releaseCrypto() {
- return INVALID_OPERATION;
- }
-
- enum {
- kWhatInputDiscontinuity = 'inDi',
- kWhatVideoSizeChanged = 'viSC',
- kWhatFlushCompleted = 'flsC',
- kWhatShutdownCompleted = 'shDC',
- kWhatResumeCompleted = 'resC',
- kWhatEOS = 'eos ',
- kWhatError = 'err ',
- };
-
-protected:
-
- virtual ~DecoderBase();
-
- void stopLooper();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- virtual void onConfigure(const sp<AMessage> &format) = 0;
- virtual void onSetParameters(const sp<AMessage> ¶ms) = 0;
- virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
- virtual void onResume(bool notifyComplete) = 0;
- virtual void onFlush() = 0;
- virtual void onShutdown(bool notifyComplete) = 0;
-
- void onRequestInputBuffers();
- virtual bool doRequestBuffers() = 0;
- virtual void handleError(int32_t err);
-
- sp<AMessage> mNotify;
- int32_t mBufferGeneration;
- bool mPaused;
- sp<AMessage> mStats;
-
-private:
- enum {
- kWhatConfigure = 'conf',
- kWhatSetParameters = 'setP',
- kWhatSetRenderer = 'setR',
- kWhatPause = 'paus',
- kWhatRequestInputBuffers = 'reqB',
- kWhatFlush = 'flus',
- kWhatShutdown = 'shuD',
- };
-
- sp<ALooper> mDecoderLooper;
- bool mRequestInputBuffersPending;
-
- DISALLOW_EVIL_CONSTRUCTORS(DecoderBase);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_DECODER_BASE_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
deleted file mode 100644
index 0514e88..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.cpp
+++ /dev/null
@@ -1,434 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2DecoderPassThrough"
-#include <utils/Log.h>
-#include <inttypes.h>
-
-#include "NuPlayer2DecoderPassThrough.h"
-
-#include "NuPlayer2Renderer.h"
-#include "NuPlayer2Source.h"
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include "ATSParser.h"
-
-namespace android {
-
-// TODO optimize buffer size for power consumption
-// The offload read buffer size is 32 KB but 24 KB uses less power.
-static const size_t kAggregateBufferSizeBytes = 24 * 1024;
-static const size_t kMaxCachedBytes = 200000;
-
-NuPlayer2::DecoderPassThrough::DecoderPassThrough(
- const sp<AMessage> ¬ify,
- const sp<Source> &source,
- const sp<Renderer> &renderer)
- : DecoderBase(notify),
- mSource(source),
- mRenderer(renderer),
- mSkipRenderingUntilMediaTimeUs(-1LL),
- mReachedEOS(true),
- mPendingAudioErr(OK),
- mPendingBuffersToDrain(0),
- mCachedBytes(0),
- mComponentName("pass through decoder") {
- ALOGW_IF(renderer == NULL, "expect a non-NULL renderer");
-}
-
-NuPlayer2::DecoderPassThrough::~DecoderPassThrough() {
-}
-
-void NuPlayer2::DecoderPassThrough::onConfigure(const sp<AMessage> &format) {
- ALOGV("[%s] onConfigure", mComponentName.c_str());
- mCachedBytes = 0;
- mPendingBuffersToDrain = 0;
- mReachedEOS = false;
- ++mBufferGeneration;
-
- onRequestInputBuffers();
-
- int32_t hasVideo = 0;
- format->findInt32("has-video", &hasVideo);
-
- // The audio sink is already opened before the PassThrough decoder is created.
- // Opening again might be relevant if decoder is instantiated after shutdown and
- // format is different.
- status_t err = mRenderer->openAudioSink(
- format, true /* offloadOnly */, hasVideo,
- AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
- if (err != OK) {
- handleError(err);
- }
-}
-
-void NuPlayer2::DecoderPassThrough::onSetParameters(const sp<AMessage> &/*params*/) {
- ALOGW("onSetParameters() called unexpectedly");
-}
-
-void NuPlayer2::DecoderPassThrough::onSetRenderer(
- const sp<Renderer> &renderer) {
- // renderer can't be changed during offloading
- ALOGW_IF(renderer != mRenderer,
- "ignoring request to change renderer");
-}
-
-bool NuPlayer2::DecoderPassThrough::isStaleReply(const sp<AMessage> &msg) {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- return generation != mBufferGeneration;
-}
-
-bool NuPlayer2::DecoderPassThrough::isDoneFetching() const {
- ALOGV("[%s] mCachedBytes = %zu, mReachedEOS = %d mPaused = %d",
- mComponentName.c_str(), mCachedBytes, mReachedEOS, mPaused);
-
- return mCachedBytes >= kMaxCachedBytes || mReachedEOS || mPaused;
-}
-
-/*
- * returns true if we should request more data
- */
-bool NuPlayer2::DecoderPassThrough::doRequestBuffers() {
- status_t err = OK;
- while (!isDoneFetching()) {
- sp<AMessage> msg = new AMessage();
-
- err = fetchInputData(msg);
- if (err != OK) {
- break;
- }
-
- onInputBufferFetched(msg);
- }
-
- return err == -EWOULDBLOCK
- && mSource->feedMoreTSData() == OK;
-}
-
-status_t NuPlayer2::DecoderPassThrough::dequeueAccessUnit(sp<ABuffer> *accessUnit) {
- status_t err;
-
- // Did we save an accessUnit earlier because of a discontinuity?
- if (mPendingAudioAccessUnit != NULL) {
- *accessUnit = mPendingAudioAccessUnit;
- mPendingAudioAccessUnit.clear();
- err = mPendingAudioErr;
- ALOGV("feedDecoderInputData() use mPendingAudioAccessUnit");
- } else {
- err = mSource->dequeueAccessUnit(true /* audio */, accessUnit);
- }
-
- if (err == INFO_DISCONTINUITY || err == ERROR_END_OF_STREAM) {
- if (mAggregateBuffer != NULL) {
- // We already have some data so save this for later.
- mPendingAudioErr = err;
- mPendingAudioAccessUnit = *accessUnit;
- (*accessUnit).clear();
- ALOGD("return aggregated buffer and save err(=%d) for later", err);
- err = OK;
- }
- }
-
- return err;
-}
-
-sp<ABuffer> NuPlayer2::DecoderPassThrough::aggregateBuffer(
- const sp<ABuffer> &accessUnit) {
- sp<ABuffer> aggregate;
-
- if (accessUnit == NULL) {
- // accessUnit is saved to mPendingAudioAccessUnit
- // return current mAggregateBuffer
- aggregate = mAggregateBuffer;
- mAggregateBuffer.clear();
- return aggregate;
- }
-
- size_t smallSize = accessUnit->size();
- if ((mAggregateBuffer == NULL)
- // Don't bother if only room for a few small buffers.
- && (smallSize < (kAggregateBufferSizeBytes / 3))) {
- // Create a larger buffer for combining smaller buffers from the extractor.
- mAggregateBuffer = new ABuffer(kAggregateBufferSizeBytes);
- mAggregateBuffer->setRange(0, 0); // start empty
- }
-
- if (mAggregateBuffer != NULL) {
- int64_t timeUs;
- int64_t dummy;
- bool smallTimestampValid = accessUnit->meta()->findInt64("timeUs", &timeUs);
- bool bigTimestampValid = mAggregateBuffer->meta()->findInt64("timeUs", &dummy);
- // Will the smaller buffer fit?
- size_t bigSize = mAggregateBuffer->size();
- size_t roomLeft = mAggregateBuffer->capacity() - bigSize;
- // Should we save this small buffer for the next big buffer?
- // If the first small buffer did not have a timestamp then save
- // any buffer that does have a timestamp until the next big buffer.
- if ((smallSize > roomLeft)
- || (!bigTimestampValid && (bigSize > 0) && smallTimestampValid)) {
- mPendingAudioErr = OK;
- mPendingAudioAccessUnit = accessUnit;
- aggregate = mAggregateBuffer;
- mAggregateBuffer.clear();
- } else {
- // Grab time from first small buffer if available.
- if ((bigSize == 0) && smallTimestampValid) {
- mAggregateBuffer->meta()->setInt64("timeUs", timeUs);
- }
- // Append small buffer to the bigger buffer.
- memcpy(mAggregateBuffer->base() + bigSize, accessUnit->data(), smallSize);
- bigSize += smallSize;
- mAggregateBuffer->setRange(0, bigSize);
-
- ALOGV("feedDecoderInputData() smallSize = %zu, bigSize = %zu, capacity = %zu",
- smallSize, bigSize, mAggregateBuffer->capacity());
- }
- } else {
- // decided not to aggregate
- aggregate = accessUnit;
- }
-
- return aggregate;
-}
-
-status_t NuPlayer2::DecoderPassThrough::fetchInputData(sp<AMessage> &reply) {
- sp<ABuffer> accessUnit;
-
- do {
- status_t err = dequeueAccessUnit(&accessUnit);
-
- if (err == -EWOULDBLOCK) {
- // Flush out the aggregate buffer to try to avoid underrun.
- accessUnit = aggregateBuffer(NULL /* accessUnit */);
- if (accessUnit != NULL) {
- break;
- }
- return err;
- } else if (err != OK) {
- if (err == INFO_DISCONTINUITY) {
- int32_t type;
- CHECK(accessUnit->meta()->findInt32("discontinuity", &type));
-
- bool formatChange =
- (type & ATSParser::DISCONTINUITY_AUDIO_FORMAT) != 0;
-
- bool timeChange =
- (type & ATSParser::DISCONTINUITY_TIME) != 0;
-
- ALOGI("audio discontinuity (formatChange=%d, time=%d)",
- formatChange, timeChange);
-
- if (formatChange || timeChange) {
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatInputDiscontinuity);
- // will perform seamless format change,
- // only notify NuPlayer2 to scan sources
- msg->setInt32("formatChange", false);
- msg->post();
- }
-
- if (timeChange) {
- doFlush(false /* notifyComplete */);
- err = OK;
- } else if (formatChange) {
- // do seamless format change
- err = OK;
- } else {
- // This stream is unaffected by the discontinuity
- return -EWOULDBLOCK;
- }
- }
-
- reply->setInt32("err", err);
- return OK;
- }
-
- accessUnit = aggregateBuffer(accessUnit);
- } while (accessUnit == NULL);
-
-#if 0
- int64_t mediaTimeUs;
- CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("feeding audio input buffer at media time %.2f secs",
- mediaTimeUs / 1E6);
-#endif
-
- reply->setBuffer("buffer", accessUnit);
-
- return OK;
-}
-
-void NuPlayer2::DecoderPassThrough::onInputBufferFetched(
- const sp<AMessage> &msg) {
- if (mReachedEOS) {
- return;
- }
-
- sp<ABuffer> buffer;
- bool hasBuffer = msg->findBuffer("buffer", &buffer);
- if (buffer == NULL) {
- int32_t streamErr = ERROR_END_OF_STREAM;
- CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
- if (streamErr == OK) {
- return;
- }
-
- if (streamErr != ERROR_END_OF_STREAM) {
- handleError(streamErr);
- }
- mReachedEOS = true;
- if (mRenderer != NULL) {
- mRenderer->queueEOS(true /* audio */, ERROR_END_OF_STREAM);
- }
- return;
- }
-
- sp<AMessage> extra;
- if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
- int64_t resumeAtMediaTimeUs;
- if (extra->findInt64(
- "resume-at-mediatimeUs", &resumeAtMediaTimeUs)) {
- ALOGI("[%s] suppressing rendering until %lld us",
- mComponentName.c_str(), (long long)resumeAtMediaTimeUs);
- mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
- }
- }
-
- int32_t bufferSize = buffer->size();
- mCachedBytes += bufferSize;
-
- int64_t timeUs = 0;
- CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
- if (mSkipRenderingUntilMediaTimeUs >= 0) {
- if (timeUs < mSkipRenderingUntilMediaTimeUs) {
- ALOGV("[%s] dropping buffer at time %lld as requested.",
- mComponentName.c_str(), (long long)timeUs);
-
- onBufferConsumed(bufferSize);
- return;
- }
-
- mSkipRenderingUntilMediaTimeUs = -1;
- }
-
- if (mRenderer == NULL) {
- onBufferConsumed(bufferSize);
- return;
- }
-
- sp<AMessage> reply = new AMessage(kWhatBufferConsumed, this);
- reply->setInt32("generation", mBufferGeneration);
- reply->setInt32("size", bufferSize);
-
- sp<MediaCodecBuffer> mcBuffer = new MediaCodecBuffer(nullptr, buffer);
- mcBuffer->meta()->setInt64("timeUs", timeUs);
-
- mRenderer->queueBuffer(true /* audio */, mcBuffer, reply);
-
- ++mPendingBuffersToDrain;
- ALOGV("onInputBufferFilled: #ToDrain = %zu, cachedBytes = %zu",
- mPendingBuffersToDrain, mCachedBytes);
-}
-
-void NuPlayer2::DecoderPassThrough::onBufferConsumed(int32_t size) {
- --mPendingBuffersToDrain;
- mCachedBytes -= size;
- ALOGV("onBufferConsumed: #ToDrain = %zu, cachedBytes = %zu",
- mPendingBuffersToDrain, mCachedBytes);
- onRequestInputBuffers();
-}
-
-void NuPlayer2::DecoderPassThrough::onResume(bool notifyComplete) {
- mPaused = false;
-
- onRequestInputBuffers();
-
- if (notifyComplete) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatResumeCompleted);
- notify->post();
- }
-}
-
-void NuPlayer2::DecoderPassThrough::doFlush(bool notifyComplete) {
- ++mBufferGeneration;
- mSkipRenderingUntilMediaTimeUs = -1;
- mPendingAudioAccessUnit.clear();
- mPendingAudioErr = OK;
- mAggregateBuffer.clear();
-
- if (mRenderer != NULL) {
- mRenderer->flush(true /* audio */, notifyComplete);
- mRenderer->signalTimeDiscontinuity();
- }
-
- mPendingBuffersToDrain = 0;
- mCachedBytes = 0;
- mReachedEOS = false;
-}
-
-void NuPlayer2::DecoderPassThrough::onFlush() {
- doFlush(true /* notifyComplete */);
-
- mPaused = true;
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatFlushCompleted);
- notify->post();
-
-}
-
-void NuPlayer2::DecoderPassThrough::onShutdown(bool notifyComplete) {
- ++mBufferGeneration;
- mSkipRenderingUntilMediaTimeUs = -1;
-
- if (notifyComplete) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatShutdownCompleted);
- notify->post();
- }
-
- mReachedEOS = true;
-}
-
-void NuPlayer2::DecoderPassThrough::onMessageReceived(const sp<AMessage> &msg) {
- ALOGV("[%s] onMessage: %s", mComponentName.c_str(),
- msg->debugString().c_str());
-
- switch (msg->what()) {
- case kWhatBufferConsumed:
- {
- if (!isStaleReply(msg)) {
- int32_t size;
- CHECK(msg->findInt32("size", &size));
- onBufferConsumed(size);
- }
- break;
- }
-
- default:
- DecoderBase::onMessageReceived(msg);
- break;
- }
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h b/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h
deleted file mode 100644
index 838c60a..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2DecoderPassThrough.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_DECODER_PASS_THROUGH_H_
-
-#define NUPLAYER2_DECODER_PASS_THROUGH_H_
-
-#include "NuPlayer2.h"
-
-#include "NuPlayer2DecoderBase.h"
-
-namespace android {
-
-struct NuPlayer2::DecoderPassThrough : public DecoderBase {
- DecoderPassThrough(const sp<AMessage> ¬ify,
- const sp<Source> &source,
- const sp<Renderer> &renderer);
-
-protected:
-
- virtual ~DecoderPassThrough();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- virtual void onConfigure(const sp<AMessage> &format);
- virtual void onSetParameters(const sp<AMessage> ¶ms);
- virtual void onSetRenderer(const sp<Renderer> &renderer);
- virtual void onResume(bool notifyComplete);
- virtual void onFlush();
- virtual void onShutdown(bool notifyComplete);
- virtual bool doRequestBuffers();
-
-private:
- enum {
- kWhatBufferConsumed = 'bufC',
- };
-
- sp<Source> mSource;
- sp<Renderer> mRenderer;
- int64_t mSkipRenderingUntilMediaTimeUs;
-
- bool mReachedEOS;
-
- // Used by feedDecoderInputData to aggregate small buffers into
- // one large buffer.
- sp<ABuffer> mPendingAudioAccessUnit;
- status_t mPendingAudioErr;
- sp<ABuffer> mAggregateBuffer;
-
- // mPendingBuffersToDrain are only for debugging. It can be removed
- // when the power investigation is done.
- size_t mPendingBuffersToDrain;
- size_t mCachedBytes;
- AString mComponentName;
-
- bool isStaleReply(const sp<AMessage> &msg);
- bool isDoneFetching() const;
-
- status_t dequeueAccessUnit(sp<ABuffer> *accessUnit);
- sp<ABuffer> aggregateBuffer(const sp<ABuffer> &accessUnit);
- status_t fetchInputData(sp<AMessage> &reply);
- void doFlush(bool notifyComplete);
-
- void onInputBufferFetched(const sp<AMessage> &msg);
- void onBufferConsumed(int32_t size);
-
- DISALLOW_EVIL_CONSTRUCTORS(DecoderPassThrough);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_DECODER_PASS_THROUGH_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
deleted file mode 100644
index 1876496..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.cpp
+++ /dev/null
@@ -1,1010 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2Driver"
-#include <inttypes.h>
-#include <android-base/macros.h>
-#include <utils/Log.h>
-#include <cutils/properties.h>
-
-#include "NuPlayer2Driver.h"
-
-#include "NuPlayer2.h"
-#include "NuPlayer2Source.h"
-
-#include <media/DataSourceDesc.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/foundation/ByteUtils.h>
-#include <media/stagefright/MediaClock.h>
-#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
-#include <media/IMediaAnalyticsService.h>
-
-using google::protobuf::RepeatedPtrField;
-using android::media::MediaPlayer2Proto::Value;
-
-static const int kDumpLockRetries = 50;
-static const int kDumpLockSleepUs = 20000;
-
-namespace android {
-
-struct PlayerMessageWrapper : public RefBase {
- static sp<PlayerMessageWrapper> Create(const PlayerMessage *p) {
- if (p != NULL) {
- sp<PlayerMessageWrapper> pw = new PlayerMessageWrapper();
- pw->copyFrom(p);
- return pw;
- }
- return NULL;
- }
-
- const PlayerMessage *getPlayerMessage() {
- return mPlayerMessage;
- }
-
-protected:
- virtual ~PlayerMessageWrapper() {
- if (mPlayerMessage != NULL) {
- delete mPlayerMessage;
- }
- }
-
-private:
- PlayerMessageWrapper()
- : mPlayerMessage(NULL) { }
-
- void copyFrom(const PlayerMessage *p) {
- if (mPlayerMessage == NULL) {
- mPlayerMessage = new PlayerMessage;
- }
- mPlayerMessage->CopyFrom(*p);
- }
-
- PlayerMessage *mPlayerMessage;
-};
-
-// key for media statistics
-static const char *kKeyPlayer = "nuplayer2";
-// attrs for media statistics
- // NB: these are matched with public Java API constants defined
- // in frameworks/base/media/java/android/media/MediaPlayer2.java
- // These must be kept synchronized with the constants there.
-static const char *kPlayerVMime = "android.media.mediaplayer.video.mime";
-static const char *kPlayerVCodec = "android.media.mediaplayer.video.codec";
-static const char *kPlayerWidth = "android.media.mediaplayer.width";
-static const char *kPlayerHeight = "android.media.mediaplayer.height";
-static const char *kPlayerFrames = "android.media.mediaplayer.frames";
-static const char *kPlayerFramesDropped = "android.media.mediaplayer.dropped";
-static const char *kPlayerFrameRate = "android.media.mediaplayer.fps";
-static const char *kPlayerAMime = "android.media.mediaplayer.audio.mime";
-static const char *kPlayerACodec = "android.media.mediaplayer.audio.codec";
-static const char *kPlayerDuration = "android.media.mediaplayer.durationMs";
-static const char *kPlayerPlaying = "android.media.mediaplayer.playingMs";
-static const char *kPlayerError = "android.media.mediaplayer.err";
-static const char *kPlayerErrorCode = "android.media.mediaplayer.errcode";
-
-// NB: These are not yet exposed as public Java API constants.
-static const char *kPlayerErrorState = "android.media.mediaplayer.errstate";
-static const char *kPlayerDataSourceType = "android.media.mediaplayer.dataSource";
-//
-static const char *kPlayerRebuffering = "android.media.mediaplayer.rebufferingMs";
-static const char *kPlayerRebufferingCount = "android.media.mediaplayer.rebuffers";
-static const char *kPlayerRebufferingAtExit = "android.media.mediaplayer.rebufferExit";
-
-static const char *kPlayerVersion = "android.media.mediaplayer.version";
-
-
-NuPlayer2Driver::NuPlayer2Driver(pid_t pid, uid_t uid, const sp<JObjectHolder> &context)
- : mState(STATE_IDLE),
- mAsyncResult(UNKNOWN_ERROR),
- mSrcId(0),
- mSetSurfaceInProgress(false),
- mDurationUs(-1),
- mPositionUs(-1),
- mSeekInProgress(false),
- mPlayingTimeUs(0),
- mRebufferingTimeUs(0),
- mRebufferingEvents(0),
- mRebufferingAtExit(false),
- mLooper(new ALooper),
- mNuPlayer2Looper(new ALooper),
- mMediaClock(new MediaClock),
- mPlayer(new NuPlayer2(pid, uid, mMediaClock, context)),
- mPlayerFlags(0),
- mMetricsHandle(0),
- mPlayerVersion(0),
- mClientUid(uid),
- mAtEOS(false),
- mLooping(false),
- mAutoLoop(false) {
- ALOGD("NuPlayer2Driver(%p) created, clientPid(%d)", this, pid);
- mLooper->setName("NuPlayer2Driver Looper");
- mNuPlayer2Looper->setName("NuPlayer2 Looper");
-
- mMediaClock->init();
-
- // XXX: what version are we?
- // Ideally, this ticks with the apk version info for the APEX packaging
-
- // set up media metrics record
- mMetricsHandle = mediametrics_create(kKeyPlayer);
- mediametrics_setUid(mMetricsHandle, mClientUid);
- mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
-
- mNuPlayer2Looper->start(
- false, /* runOnCallingThread */
- true, /* canCallJava */
- PRIORITY_AUDIO);
-
- mNuPlayer2Looper->registerHandler(mPlayer);
-
- mPlayer->setDriver(this);
-}
-
-NuPlayer2Driver::~NuPlayer2Driver() {
- ALOGV("~NuPlayer2Driver(%p)", this);
- mNuPlayer2Looper->stop();
- mLooper->stop();
-
- // finalize any pending metrics, usually a no-op.
- updateMetrics("destructor");
- logMetrics("destructor");
-
- mediametrics_delete(mMetricsHandle);
-}
-
-status_t NuPlayer2Driver::initCheck() {
- mLooper->start(
- false, /* runOnCallingThread */
- true, /* canCallJava */
- PRIORITY_AUDIO);
-
- mLooper->registerHandler(this);
- return OK;
-}
-
-status_t NuPlayer2Driver::setDataSource(const sp<DataSourceDesc> &dsd) {
- ALOGV("setDataSource(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- if (mState != STATE_IDLE) {
- return INVALID_OPERATION;
- }
-
- mSrcId = dsd->mId;
- mState = STATE_SET_DATASOURCE_PENDING;
-
- mPlayer->setDataSourceAsync(dsd);
-
- while (mState == STATE_SET_DATASOURCE_PENDING) {
- mCondition.wait(mLock);
- }
-
- return mAsyncResult;
-}
-
-status_t NuPlayer2Driver::prepareNextDataSource(const sp<DataSourceDesc> &dsd) {
- ALOGV("prepareNextDataSource(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- mPlayer->prepareNextDataSourceAsync(dsd);
-
- return OK;
-}
-
-status_t NuPlayer2Driver::playNextDataSource(int64_t srcId) {
- ALOGV("playNextDataSource(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- mSrcId = srcId;
- mPlayer->playNextDataSource(srcId);
-
- return OK;
-}
-
-status_t NuPlayer2Driver::setVideoSurfaceTexture(const sp<ANativeWindowWrapper> &nww) {
- ALOGV("setVideoSurfaceTexture(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- if (mSetSurfaceInProgress) {
- return INVALID_OPERATION;
- }
-
- switch (mState) {
- case STATE_SET_DATASOURCE_PENDING:
- case STATE_RESET_IN_PROGRESS:
- return INVALID_OPERATION;
-
- default:
- break;
- }
-
- mSetSurfaceInProgress = true;
-
- mPlayer->setVideoSurfaceTextureAsync(nww);
-
- while (mSetSurfaceInProgress) {
- mCondition.wait(mLock);
- }
-
- return OK;
-}
-
-status_t NuPlayer2Driver::getBufferingSettings(BufferingSettings* buffering) {
- ALOGV("getBufferingSettings(%p)", this);
- {
- Mutex::Autolock autoLock(mLock);
- if (mState == STATE_IDLE) {
- return INVALID_OPERATION;
- }
- }
-
- return mPlayer->getBufferingSettings(buffering);
-}
-
-status_t NuPlayer2Driver::setBufferingSettings(const BufferingSettings& buffering) {
- ALOGV("setBufferingSettings(%p)", this);
- {
- Mutex::Autolock autoLock(mLock);
- if (mState == STATE_IDLE) {
- return INVALID_OPERATION;
- }
- }
-
- return mPlayer->setBufferingSettings(buffering);
-}
-
-status_t NuPlayer2Driver::prepareAsync() {
- ALOGV("prepareAsync(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- switch (mState) {
- case STATE_UNPREPARED:
- mState = STATE_PREPARING;
- mPlayer->prepareAsync();
- return OK;
- default:
- return INVALID_OPERATION;
- };
-}
-
-status_t NuPlayer2Driver::start() {
- ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
- Mutex::Autolock autoLock(mLock);
- return start_l();
-}
-
-status_t NuPlayer2Driver::start_l() {
- switch (mState) {
- case STATE_PAUSED:
- case STATE_PREPARED:
- {
- mPlayer->start();
- FALLTHROUGH_INTENDED;
- }
-
- case STATE_RUNNING:
- {
- if (mAtEOS) {
- mPlayer->rewind();
- mAtEOS = false;
- mPositionUs = -1;
- }
- break;
- }
-
- default:
- return INVALID_OPERATION;
- }
-
- mState = STATE_RUNNING;
-
- return OK;
-}
-
-status_t NuPlayer2Driver::pause() {
- ALOGD("pause(%p)", this);
- // The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
- // down for audio offload mode. If that happens, the NuPlayerRenderer will no longer know the
- // current position. So similar to seekTo, update |mPositionUs| to the pause position by calling
- // getCurrentPosition here.
- int64_t unused;
- getCurrentPosition(&unused);
-
- Mutex::Autolock autoLock(mLock);
-
- switch (mState) {
- case STATE_PAUSED:
- return OK;
-
- case STATE_PREPARED:
- case STATE_RUNNING:
- mState = STATE_PAUSED;
- mPlayer->pause();
- break;
-
- default:
- return INVALID_OPERATION;
- }
-
- return OK;
-}
-
-bool NuPlayer2Driver::isPlaying() {
- return mState == STATE_RUNNING && !mAtEOS;
-}
-
-status_t NuPlayer2Driver::setPlaybackSettings(const AudioPlaybackRate &rate) {
- status_t err = mPlayer->setPlaybackSettings(rate);
- if (err == OK) {
- // try to update position
- int64_t unused;
- getCurrentPosition(&unused);
- }
- return err;
-}
-
-status_t NuPlayer2Driver::getPlaybackSettings(AudioPlaybackRate *rate) {
- return mPlayer->getPlaybackSettings(rate);
-}
-
-status_t NuPlayer2Driver::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
- return mPlayer->setSyncSettings(sync, videoFpsHint);
-}
-
-status_t NuPlayer2Driver::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
- return mPlayer->getSyncSettings(sync, videoFps);
-}
-
-status_t NuPlayer2Driver::seekTo(int64_t msec, MediaPlayer2SeekMode mode) {
- ALOGD("seekTo(%p) (%lld ms, %d) at state %d", this, (long long)msec, mode, mState);
- Mutex::Autolock autoLock(mLock);
-
- int64_t seekTimeUs = msec * 1000LL;
-
- switch (mState) {
- case STATE_PREPARED:
- case STATE_PAUSED:
- case STATE_RUNNING:
- {
- mAtEOS = false;
- mSeekInProgress = true;
- mPlayer->seekToAsync(seekTimeUs, mode, true /* needNotify */);
- break;
- }
-
- default:
- return INVALID_OPERATION;
- }
-
- mPositionUs = seekTimeUs;
- return OK;
-}
-
-status_t NuPlayer2Driver::getCurrentPosition(int64_t *msec) {
- int64_t tempUs = 0;
- {
- Mutex::Autolock autoLock(mLock);
- if (mSeekInProgress || (mState == STATE_PAUSED && !mAtEOS)) {
- tempUs = (mPositionUs <= 0) ? 0 : mPositionUs;
- *msec = divRound(tempUs, (int64_t)(1000));
- return OK;
- }
- }
-
- status_t ret = mPlayer->getCurrentPosition(&tempUs);
-
- Mutex::Autolock autoLock(mLock);
- // We need to check mSeekInProgress here because mPlayer->seekToAsync is an async call, which
- // means getCurrentPosition can be called before seek is completed. Iow, renderer may return a
- // position value that's different the seek to position.
- if (ret != OK) {
- tempUs = (mPositionUs <= 0) ? 0 : mPositionUs;
- } else {
- mPositionUs = tempUs;
- }
- *msec = divRound(tempUs, (int64_t)(1000));
- return OK;
-}
-
-status_t NuPlayer2Driver::getDuration(int64_t *msec) {
- Mutex::Autolock autoLock(mLock);
-
- if (mDurationUs < 0) {
- return UNKNOWN_ERROR;
- }
-
- *msec = (mDurationUs + 500LL) / 1000;
-
- return OK;
-}
-
-void NuPlayer2Driver::updateMetrics(const char *where) {
- if (where == NULL) {
- where = "unknown";
- }
- ALOGV("updateMetrics(%p) from %s at state %d", this, where, mState);
-
- // gather the final stats for this record
- Vector<sp<AMessage>> trackStats;
- mPlayer->getStats(&trackStats);
-
- if (trackStats.size() > 0) {
- for (size_t i = 0; i < trackStats.size(); ++i) {
- const sp<AMessage> &stats = trackStats.itemAt(i);
-
- AString mime;
- stats->findString("mime", &mime);
-
- AString name;
- stats->findString("component-name", &name);
-
- if (mime.startsWith("video/")) {
- int32_t width, height;
- mediametrics_setCString(mMetricsHandle, kPlayerVMime, mime.c_str());
- if (!name.empty()) {
- mediametrics_setCString(mMetricsHandle, kPlayerVCodec, name.c_str());
- }
-
- if (stats->findInt32("width", &width)
- && stats->findInt32("height", &height)) {
- mediametrics_setInt32(mMetricsHandle, kPlayerWidth, width);
- mediametrics_setInt32(mMetricsHandle, kPlayerHeight, height);
- }
-
- int64_t numFramesTotal = 0;
- int64_t numFramesDropped = 0;
- stats->findInt64("frames-total", &numFramesTotal);
- stats->findInt64("frames-dropped-output", &numFramesDropped);
-
- mediametrics_setInt64(mMetricsHandle, kPlayerFrames, numFramesTotal);
- mediametrics_setInt64(mMetricsHandle, kPlayerFramesDropped, numFramesDropped);
-
- float frameRate = 0;
- if (stats->findFloat("frame-rate-output", &frameRate)) {
- mediametrics_setInt64(mMetricsHandle, kPlayerFrameRate, frameRate);
- }
-
- } else if (mime.startsWith("audio/")) {
- mediametrics_setCString(mMetricsHandle, kPlayerAMime, mime.c_str());
- if (!name.empty()) {
- mediametrics_setCString(mMetricsHandle, kPlayerACodec, name.c_str());
- }
- }
- }
- }
-
- // always provide duration and playing time, even if they have 0/unknown values.
-
- // getDuration() uses mLock for mutex -- careful where we use it.
- int64_t duration_ms = -1;
- getDuration(&duration_ms);
- mediametrics_setInt64(mMetricsHandle, kPlayerDuration, duration_ms);
-
- mediametrics_setInt64(mMetricsHandle, kPlayerPlaying, (mPlayingTimeUs+500)/1000 );
-
- if (mRebufferingEvents != 0) {
- mediametrics_setInt64(mMetricsHandle, kPlayerRebuffering, (mRebufferingTimeUs+500)/1000 );
- mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingCount, mRebufferingEvents);
- mediametrics_setInt32(mMetricsHandle, kPlayerRebufferingAtExit, mRebufferingAtExit);
- }
-
- mediametrics_setCString(mMetricsHandle, kPlayerDataSourceType, mPlayer->getDataSourceType());
-}
-
-
-void NuPlayer2Driver::logMetrics(const char *where) {
- if (where == NULL) {
- where = "unknown";
- }
- ALOGV("logMetrics(%p) from %s at state %d", this, where, mState);
-
- if (mMetricsHandle == 0 || mediametrics_isEnabled() == false) {
- return;
- }
-
- // log only non-empty records
- // we always updateMetrics() before we get here
- // and that always injects 3 fields (duration, playing time, and
- // datasource) into the record.
- // So the canonical "empty" record has 3 elements in it.
- if (mediametrics_count(mMetricsHandle) > 3) {
- mediametrics_selfRecord(mMetricsHandle);
- // re-init in case we prepare() and start() again.
- mediametrics_delete(mMetricsHandle);
- mMetricsHandle = mediametrics_create(kKeyPlayer);
- mediametrics_setUid(mMetricsHandle, mClientUid);
- mediametrics_setInt64(mMetricsHandle, kPlayerVersion, mPlayerVersion);
- } else {
- ALOGV("did not have anything to record");
- }
-}
-
-status_t NuPlayer2Driver::reset() {
- ALOGD("reset(%p) at state %d", this, mState);
-
- updateMetrics("reset");
- logMetrics("reset");
-
- Mutex::Autolock autoLock(mLock);
-
- switch (mState) {
- case STATE_IDLE:
- return OK;
-
- case STATE_SET_DATASOURCE_PENDING:
- case STATE_RESET_IN_PROGRESS:
- return INVALID_OPERATION;
-
- case STATE_PREPARING:
- {
- notifyListener_l(mSrcId, MEDIA2_PREPARED);
- break;
- }
-
- default:
- break;
- }
-
- mState = STATE_RESET_IN_PROGRESS;
- mPlayer->resetAsync();
-
- while (mState == STATE_RESET_IN_PROGRESS) {
- mCondition.wait(mLock);
- }
-
- mDurationUs = -1;
- mPositionUs = -1;
- mLooping = false;
- mPlayingTimeUs = 0;
- mRebufferingTimeUs = 0;
- mRebufferingEvents = 0;
- mRebufferingAtExit = false;
-
- return OK;
-}
-
-status_t NuPlayer2Driver::notifyAt(int64_t mediaTimeUs) {
- ALOGV("notifyAt(%p), time:%lld", this, (long long)mediaTimeUs);
- return mPlayer->notifyAt(mediaTimeUs);
-}
-
-status_t NuPlayer2Driver::setLooping(int loop) {
- mLooping = loop != 0;
- return OK;
-}
-
-status_t NuPlayer2Driver::invoke(const PlayerMessage &request, PlayerMessage *response) {
- if (response == NULL) {
- ALOGE("reply is a NULL pointer");
- return BAD_VALUE;
- }
-
- RepeatedPtrField<const Value>::const_iterator it = request.values().cbegin();
- int32_t methodId = (it++)->int32_value();
-
- switch (methodId) {
- case MEDIA_PLAYER2_INVOKE_ID_SET_VIDEO_SCALING_MODE:
- {
- int mode = (it++)->int32_value();
- return mPlayer->setVideoScalingMode(mode);
- }
-
- case MEDIA_PLAYER2_INVOKE_ID_GET_TRACK_INFO:
- {
- int64_t srcId = (it++)->int64_value();
- return mPlayer->getTrackInfo(srcId, response);
- }
-
- case MEDIA_PLAYER2_INVOKE_ID_SELECT_TRACK:
- {
- int64_t srcId = (it++)->int64_value();
- int trackIndex = (it++)->int32_value();
- int64_t msec = 0;
- // getCurrentPosition should always return OK
- getCurrentPosition(&msec);
- return mPlayer->selectTrack(srcId, trackIndex, true /* select */, msec * 1000LL);
- }
-
- case MEDIA_PLAYER2_INVOKE_ID_UNSELECT_TRACK:
- {
- int64_t srcId = (it++)->int64_value();
- int trackIndex = (it++)->int32_value();
- return mPlayer->selectTrack(
- srcId, trackIndex, false /* select */, 0xdeadbeef /* not used */);
- }
-
- case MEDIA_PLAYER2_INVOKE_ID_GET_SELECTED_TRACK:
- {
- int64_t srcId = (it++)->int64_value();
- int32_t type = (it++)->int32_value();
- return mPlayer->getSelectedTrack(srcId, type, response);
- }
-
- default:
- {
- return INVALID_OPERATION;
- }
- }
-}
-
-void NuPlayer2Driver::setAudioSink(const sp<AudioSink> &audioSink) {
- mPlayer->setAudioSink(audioSink);
- mAudioSink = audioSink;
-}
-
-status_t NuPlayer2Driver::setParameter(
- int /* key */, const Parcel & /* request */) {
- return INVALID_OPERATION;
-}
-
-status_t NuPlayer2Driver::getParameter(int key __unused, Parcel *reply __unused) {
- return INVALID_OPERATION;
-}
-
-status_t NuPlayer2Driver::getMetrics(char **buffer, size_t *length) {
- updateMetrics("api");
- if (mediametrics_getAttributes(mMetricsHandle, buffer, length))
- return OK;
- else
- return FAILED_TRANSACTION;
-}
-
-void NuPlayer2Driver::notifyResetComplete(int64_t /* srcId */) {
- ALOGD("notifyResetComplete(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- CHECK_EQ(mState, STATE_RESET_IN_PROGRESS);
- mState = STATE_IDLE;
- mCondition.broadcast();
-}
-
-void NuPlayer2Driver::notifySetSurfaceComplete(int64_t /* srcId */) {
- ALOGV("notifySetSurfaceComplete(%p)", this);
- Mutex::Autolock autoLock(mLock);
-
- CHECK(mSetSurfaceInProgress);
- mSetSurfaceInProgress = false;
-
- mCondition.broadcast();
-}
-
-void NuPlayer2Driver::notifyDuration(int64_t /* srcId */, int64_t durationUs) {
- Mutex::Autolock autoLock(mLock);
- mDurationUs = durationUs;
-}
-
-void NuPlayer2Driver::notifyMorePlayingTimeUs(int64_t /* srcId */, int64_t playingUs) {
- Mutex::Autolock autoLock(mLock);
- mPlayingTimeUs += playingUs;
-}
-
-void NuPlayer2Driver::notifyMoreRebufferingTimeUs(int64_t /* srcId */, int64_t rebufferingUs) {
- Mutex::Autolock autoLock(mLock);
- mRebufferingTimeUs += rebufferingUs;
- mRebufferingEvents++;
-}
-
-void NuPlayer2Driver::notifyRebufferingWhenExit(int64_t /* srcId */, bool status) {
- Mutex::Autolock autoLock(mLock);
- mRebufferingAtExit = status;
-}
-
-void NuPlayer2Driver::notifySeekComplete(int64_t srcId) {
- ALOGV("notifySeekComplete(%p)", this);
- Mutex::Autolock autoLock(mLock);
- mSeekInProgress = false;
- notifyListener_l(srcId, MEDIA2_SEEK_COMPLETE);
-}
-
-status_t NuPlayer2Driver::dump(
- int fd, const Vector<String16> & /* args */) const {
-
- Vector<sp<AMessage> > trackStats;
- mPlayer->getStats(&trackStats);
-
- AString logString(" NuPlayer2\n");
- char buf[256] = {0};
-
- bool locked = false;
- for (int i = 0; i < kDumpLockRetries; ++i) {
- if (mLock.tryLock() == NO_ERROR) {
- locked = true;
- break;
- }
- usleep(kDumpLockSleepUs);
- }
-
- if (locked) {
- snprintf(buf, sizeof(buf), " state(%d), atEOS(%d), looping(%d), autoLoop(%d)\n",
- mState, mAtEOS, mLooping, mAutoLoop);
- mLock.unlock();
- } else {
- snprintf(buf, sizeof(buf), " NPD(%p) lock is taken\n", this);
- }
- logString.append(buf);
-
- for (size_t i = 0; i < trackStats.size(); ++i) {
- const sp<AMessage> &stats = trackStats.itemAt(i);
-
- AString mime;
- if (stats->findString("mime", &mime)) {
- snprintf(buf, sizeof(buf), " mime(%s)\n", mime.c_str());
- logString.append(buf);
- }
-
- AString name;
- if (stats->findString("component-name", &name)) {
- snprintf(buf, sizeof(buf), " decoder(%s)\n", name.c_str());
- logString.append(buf);
- }
-
- if (mime.startsWith("video/")) {
- int32_t width, height;
- if (stats->findInt32("width", &width)
- && stats->findInt32("height", &height)) {
- snprintf(buf, sizeof(buf), " resolution(%d x %d)\n", width, height);
- logString.append(buf);
- }
-
- int64_t numFramesTotal = 0;
- int64_t numFramesDropped = 0;
-
- stats->findInt64("frames-total", &numFramesTotal);
- stats->findInt64("frames-dropped-output", &numFramesDropped);
- snprintf(buf, sizeof(buf), " numFramesTotal(%lld), numFramesDropped(%lld), "
- "percentageDropped(%.2f%%)\n",
- (long long)numFramesTotal,
- (long long)numFramesDropped,
- numFramesTotal == 0
- ? 0.0 : (double)(numFramesDropped * 100) / numFramesTotal);
- logString.append(buf);
- }
- }
-
- ALOGI("%s", logString.c_str());
-
- if (fd >= 0) {
- FILE *out = fdopen(dup(fd), "w");
- fprintf(out, "%s", logString.c_str());
- fclose(out);
- out = NULL;
- }
-
- return OK;
-}
-
-void NuPlayer2Driver::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatNotifyListener: {
- int64_t srcId;
- int32_t msgId;
- int32_t ext1 = 0;
- int32_t ext2 = 0;
- CHECK(msg->findInt64("srcId", &srcId));
- CHECK(msg->findInt32("messageId", &msgId));
- msg->findInt32("ext1", &ext1);
- msg->findInt32("ext2", &ext2);
- sp<PlayerMessageWrapper> in;
- sp<RefBase> obj;
- if (msg->findObject("obj", &obj) && obj != NULL) {
- in = static_cast<PlayerMessageWrapper *>(obj.get());
- }
- sendEvent(srcId, msgId, ext1, ext2, (in == NULL ? NULL : in->getPlayerMessage()));
- break;
- }
- default:
- break;
- }
-}
-
-void NuPlayer2Driver::notifyListener(
- int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *in) {
- Mutex::Autolock autoLock(mLock);
- notifyListener_l(srcId, msg, ext1, ext2, in);
-}
-
-void NuPlayer2Driver::notifyListener_l(
- int64_t srcId, int msg, int ext1, int ext2, const PlayerMessage *in) {
- ALOGD("notifyListener_l(%p), (%lld, %d, %d, %d, %d), loop setting(%d, %d)",
- this, (long long)srcId, msg, ext1, ext2,
- (in == NULL ? -1 : (int)in->ByteSize()), mAutoLoop, mLooping);
- if (srcId == mSrcId) {
- switch (msg) {
- case MEDIA2_PLAYBACK_COMPLETE:
- {
- if (mState != STATE_RESET_IN_PROGRESS) {
- if (mAutoLoop) {
- audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
- if (mAudioSink != NULL) {
- streamType = mAudioSink->getAudioStreamType();
- }
- if (streamType == AUDIO_STREAM_NOTIFICATION) {
- ALOGW("disabling auto-loop for notification");
- mAutoLoop = false;
- }
- }
- if (mLooping || mAutoLoop) {
- mPlayer->rewind();
- if (mAudioSink != NULL) {
- // The renderer has stopped the sink at the end in order to play out
- // the last little bit of audio. In looping mode, we need to restart it.
- mAudioSink->start();
- }
-
- sp<AMessage> notify = new AMessage(kWhatNotifyListener, this);
- notify->setInt64("srcId", srcId);
- notify->setInt32("messageId", MEDIA2_INFO);
- notify->setInt32("ext1", MEDIA2_INFO_DATA_SOURCE_REPEAT);
- notify->post();
- return;
- }
- if (property_get_bool("persist.debug.sf.stats", false)) {
- Vector<String16> args;
- dump(-1, args);
- }
- mPlayer->pause();
- mState = STATE_PAUSED;
- }
- FALLTHROUGH_INTENDED;
- }
-
- case MEDIA2_ERROR:
- {
- // when we have an error, add it to the analytics for this playback.
- // ext1 is our primary 'error type' value. Only add ext2 when non-zero.
- // [test against msg is due to fall through from previous switch value]
- if (msg == MEDIA2_ERROR) {
- mediametrics_setInt32(mMetricsHandle, kPlayerError, ext1);
- if (ext2 != 0) {
- mediametrics_setInt32(mMetricsHandle, kPlayerErrorCode, ext2);
- }
- mediametrics_setCString(mMetricsHandle, kPlayerErrorState, stateString(mState).c_str());
- }
- mAtEOS = true;
- break;
- }
-
- default:
- break;
- }
- }
-
- sp<AMessage> notify = new AMessage(kWhatNotifyListener, this);
- notify->setInt64("srcId", srcId);
- notify->setInt32("messageId", msg);
- notify->setInt32("ext1", ext1);
- notify->setInt32("ext2", ext2);
- notify->setObject("obj", PlayerMessageWrapper::Create((PlayerMessage*)in));
- notify->post();
-}
-
-void NuPlayer2Driver::notifySetDataSourceCompleted(int64_t /* srcId */, status_t err) {
- Mutex::Autolock autoLock(mLock);
-
- CHECK_EQ(mState, STATE_SET_DATASOURCE_PENDING);
-
- mAsyncResult = err;
- mState = (err == OK) ? STATE_UNPREPARED : STATE_IDLE;
- mCondition.broadcast();
-}
-
-void NuPlayer2Driver::notifyPrepareCompleted(int64_t srcId, status_t err) {
- ALOGV("notifyPrepareCompleted %d", err);
-
- Mutex::Autolock autoLock(mLock);
-
- if (srcId != mSrcId) {
- if (err == OK) {
- notifyListener_l(srcId, MEDIA2_PREPARED);
- } else {
- notifyListener_l(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
- }
- return;
- }
-
- if (mState != STATE_PREPARING) {
- // We were preparing asynchronously when the client called
- // reset(), we sent a premature "prepared" notification and
- // then initiated the reset. This notification is stale.
- CHECK(mState == STATE_RESET_IN_PROGRESS || mState == STATE_IDLE);
- return;
- }
-
- CHECK_EQ(mState, STATE_PREPARING);
-
- mAsyncResult = err;
-
- if (err == OK) {
- // update state before notifying client, so that if client calls back into NuPlayer2Driver
- // in response, NuPlayer2Driver has the right state
- mState = STATE_PREPARED;
- notifyListener_l(srcId, MEDIA2_PREPARED);
- } else {
- mState = STATE_UNPREPARED;
- notifyListener_l(srcId, MEDIA2_ERROR, MEDIA2_ERROR_UNKNOWN, err);
- }
-
- sp<MetaData> meta = mPlayer->getFileMeta();
- int32_t loop;
- if (meta != NULL
- && meta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
- mAutoLoop = true;
- }
-
- mCondition.broadcast();
-}
-
-void NuPlayer2Driver::notifyFlagsChanged(int64_t /* srcId */, uint32_t flags) {
- Mutex::Autolock autoLock(mLock);
-
- mPlayerFlags = flags;
-}
-
-// Modular DRM
-status_t NuPlayer2Driver::prepareDrm(
- int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId)
-{
- ALOGV("prepareDrm(%p) state: %d", this, mState);
-
- // leaving the state verification for mediaplayer.cpp
- status_t ret = mPlayer->prepareDrm(srcId, uuid, drmSessionId);
-
- ALOGV("prepareDrm ret: %d", ret);
-
- return ret;
-}
-
-status_t NuPlayer2Driver::releaseDrm(int64_t srcId)
-{
- ALOGV("releaseDrm(%p) state: %d", this, mState);
-
- // leaving the state verification for mediaplayer.cpp
- status_t ret = mPlayer->releaseDrm(srcId);
-
- ALOGV("releaseDrm ret: %d", ret);
-
- return ret;
-}
-
-std::string NuPlayer2Driver::stateString(State state) {
- const char *rval = NULL;
- char rawbuffer[16]; // allows "%d"
-
- switch (state) {
- case STATE_IDLE: rval = "IDLE"; break;
- case STATE_SET_DATASOURCE_PENDING: rval = "SET_DATASOURCE_PENDING"; break;
- case STATE_UNPREPARED: rval = "UNPREPARED"; break;
- case STATE_PREPARING: rval = "PREPARING"; break;
- case STATE_PREPARED: rval = "PREPARED"; break;
- case STATE_RUNNING: rval = "RUNNING"; break;
- case STATE_PAUSED: rval = "PAUSED"; break;
- case STATE_RESET_IN_PROGRESS: rval = "RESET_IN_PROGRESS"; break;
- default:
- // yes, this buffer is shared and vulnerable to races
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", state);
- rval = rawbuffer;
- break;
- }
-
- return rval;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
deleted file mode 100644
index c97e247..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Driver.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <mediaplayer2/MediaPlayer2Interface.h>
-
-#include <media/MediaMetrics.h>
-#include <media/stagefright/foundation/ABase.h>
-#include <mediaplayer2/JObjectHolder.h>
-
-namespace android {
-
-struct ALooper;
-struct MediaClock;
-struct NuPlayer2;
-
-struct NuPlayer2Driver : public MediaPlayer2Interface {
- explicit NuPlayer2Driver(pid_t pid, uid_t uid, const sp<JObjectHolder> &context);
-
- virtual status_t initCheck() override;
-
- virtual status_t setDataSource(const sp<DataSourceDesc> &dsd) override;
- virtual status_t prepareNextDataSource(const sp<DataSourceDesc> &dsd) override;
- virtual status_t playNextDataSource(int64_t srcId) override;
-
- virtual status_t setVideoSurfaceTexture(const sp<ANativeWindowWrapper> &nww) override;
-
- virtual status_t getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) override;
- virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
-
- virtual status_t prepareAsync() override;
- virtual status_t start() override;
- virtual status_t pause() override;
- virtual bool isPlaying() override;
- virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate) override;
- virtual status_t getPlaybackSettings(AudioPlaybackRate *rate) override;
- virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) override;
- virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps) override;
- virtual status_t seekTo(
- int64_t msec,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
- virtual status_t getCurrentPosition(int64_t *msec) override;
- virtual status_t getDuration(int64_t *msec) override;
- virtual status_t reset() override;
- virtual status_t notifyAt(int64_t mediaTimeUs) override;
- virtual status_t setLooping(int loop) override;
- virtual status_t invoke(const PlayerMessage &request, PlayerMessage *response) override;
- virtual void setAudioSink(const sp<AudioSink> &audioSink) override;
- virtual status_t setParameter(int key, const Parcel &request) override;
- virtual status_t getParameter(int key, Parcel *reply) override;
- virtual status_t getMetrics(char **buf, size_t *length) override;
-
- virtual status_t dump(int fd, const Vector<String16> &args) const override;
-
- virtual void onMessageReceived(const sp<AMessage> &msg) override;
-
- void notifySetDataSourceCompleted(int64_t srcId, status_t err);
- void notifyPrepareCompleted(int64_t srcId, status_t err);
- void notifyResetComplete(int64_t srcId);
- void notifySetSurfaceComplete(int64_t srcId);
- void notifyDuration(int64_t srcId, int64_t durationUs);
- void notifyMorePlayingTimeUs(int64_t srcId, int64_t timeUs);
- void notifyMoreRebufferingTimeUs(int64_t srcId, int64_t timeUs);
- void notifyRebufferingWhenExit(int64_t srcId, bool status);
- void notifySeekComplete(int64_t srcId);
- void notifyListener(int64_t srcId, int msg, int ext1 = 0, int ext2 = 0,
- const PlayerMessage *in = NULL);
- void notifyFlagsChanged(int64_t srcId, uint32_t flags);
-
- // Modular DRM
- virtual status_t prepareDrm(
- int64_t srcId, const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId);
- virtual status_t releaseDrm(int64_t srcId);
-
-protected:
- virtual ~NuPlayer2Driver();
-
-private:
- enum State {
- STATE_IDLE,
- STATE_SET_DATASOURCE_PENDING,
- STATE_UNPREPARED,
- STATE_PREPARING,
- STATE_PREPARED,
- STATE_RUNNING,
- STATE_PAUSED,
- STATE_RESET_IN_PROGRESS,
- };
-
- std::string stateString(State state);
-
- enum {
- kWhatNotifyListener,
- };
-
- mutable Mutex mLock;
- Condition mCondition;
-
- State mState;
-
- status_t mAsyncResult;
-
- // The following are protected through "mLock"
- // >>>
- int64_t mSrcId;
- bool mSetSurfaceInProgress;
- int64_t mDurationUs;
- int64_t mPositionUs;
- bool mSeekInProgress;
- int64_t mPlayingTimeUs;
- int64_t mRebufferingTimeUs;
- int32_t mRebufferingEvents;
- bool mRebufferingAtExit;
- // <<<
-
- sp<ALooper> mLooper;
- sp<ALooper> mNuPlayer2Looper;
- const sp<MediaClock> mMediaClock;
- const sp<NuPlayer2> mPlayer;
- sp<AudioSink> mAudioSink;
- uint32_t mPlayerFlags;
-
- mediametrics_handle_t mMetricsHandle;
- int64_t mPlayerVersion;
- uid_t mClientUid;
-
- bool mAtEOS;
- bool mLooping;
- bool mAutoLoop;
-
- void updateMetrics(const char *where);
- void logMetrics(const char *where);
-
- status_t start_l();
- void notifyListener_l(int64_t srcId, int msg, int ext1 = 0, int ext2 = 0,
- const PlayerMessage *in = NULL);
-
- DISALLOW_EVIL_CONSTRUCTORS(NuPlayer2Driver);
-};
-
-} // namespace android
-
-
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp
deleted file mode 100644
index f41a431..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.cpp
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2Drm"
-
-#include "NuPlayer2Drm.h"
-
-#include <media/NdkWrapper.h>
-#include <utils/Log.h>
-#include <sstream>
-
-namespace android {
-
-Vector<DrmUUID> NuPlayer2Drm::parsePSSH(const void *pssh, size_t psshsize)
-{
- Vector<DrmUUID> drmSchemes, empty;
- const int DATALEN_SIZE = 4;
-
- // the format of the buffer is 1 or more of:
- // {
- // 16 byte uuid
- // 4 byte data length N
- // N bytes of data
- // }
- // Determine the number of entries in the source data.
- // Since we got the data from stagefright, we trust it is valid and properly formatted.
-
- const uint8_t *data = (const uint8_t*)pssh;
- size_t len = psshsize;
- size_t numentries = 0;
- while (len > 0) {
- if (len < DrmUUID::UUID_SIZE) {
- ALOGE("ParsePSSH: invalid PSSH data");
- return empty;
- }
-
- const uint8_t *uuidPtr = data;
-
- // skip uuid
- data += DrmUUID::UUID_SIZE;
- len -= DrmUUID::UUID_SIZE;
-
- // get data length
- if (len < DATALEN_SIZE) {
- ALOGE("ParsePSSH: invalid PSSH data");
- return empty;
- }
-
- uint32_t datalen = *((uint32_t*)data);
- data += DATALEN_SIZE;
- len -= DATALEN_SIZE;
-
- if (len < datalen) {
- ALOGE("ParsePSSH: invalid PSSH data");
- return empty;
- }
-
- // skip the data
- data += datalen;
- len -= datalen;
-
- DrmUUID _uuid(uuidPtr);
- drmSchemes.add(_uuid);
-
- ALOGV("ParsePSSH[%zu]: %s: %s", numentries,
- _uuid.toHexString().string(),
- DrmUUID::arrayToHex(data, datalen).string()
- );
-
- numentries++;
- }
-
- return drmSchemes;
-}
-
-Vector<DrmUUID> NuPlayer2Drm::getSupportedDrmSchemes(const void *pssh, size_t psshsize)
-{
- Vector<DrmUUID> psshDRMs = parsePSSH(pssh, psshsize);
-
- Vector<DrmUUID> supportedDRMs;
- for (size_t i = 0; i < psshDRMs.size(); i++) {
- DrmUUID uuid = psshDRMs[i];
- if (AMediaDrmWrapper::isCryptoSchemeSupported(uuid.ptr(), NULL)) {
- supportedDRMs.add(uuid);
- }
- }
-
- ALOGV("getSupportedDrmSchemes: psshDRMs: %zu supportedDRMs: %zu",
- psshDRMs.size(), supportedDRMs.size());
-
- return supportedDRMs;
-}
-
-sp<ABuffer> NuPlayer2Drm::retrieveDrmInfo(const void *pssh, uint32_t psshsize)
-{
- std::ostringstream buf;
-
- // 1) PSSH bytes
- buf.write(reinterpret_cast<const char *>(&psshsize), sizeof(psshsize));
- buf.write(reinterpret_cast<const char *>(pssh), psshsize);
-
- ALOGV("retrieveDrmInfo: MEDIA2_DRM_INFO PSSH: size: %u %s", psshsize,
- DrmUUID::arrayToHex((uint8_t*)pssh, psshsize).string());
-
- // 2) supportedDRMs
- Vector<DrmUUID> supportedDRMs = getSupportedDrmSchemes(pssh, psshsize);
- uint32_t n = supportedDRMs.size();
- buf.write(reinterpret_cast<char *>(&n), sizeof(n));
- for (size_t i = 0; i < n; i++) {
- DrmUUID uuid = supportedDRMs[i];
- buf.write(reinterpret_cast<const char *>(&n), sizeof(n));
- buf.write(reinterpret_cast<const char *>(uuid.ptr()), DrmUUID::UUID_SIZE);
-
- ALOGV("retrieveDrmInfo: MEDIA2_DRM_INFO supportedScheme[%zu] %s", i,
- uuid.toHexString().string());
- }
-
- sp<ABuffer> drmInfoBuffer = ABuffer::CreateAsCopy(buf.str().c_str(), buf.tellp());
- return drmInfoBuffer;
-}
-
-status_t NuPlayer2Drm::retrieveDrmInfo(PsshInfo *psshInfo, PlayerMessage *playerMsg)
-{
- std::ostringstream pssh, drmInfo;
-
- // 0) Generate PSSH bytes
- for (size_t i = 0; i < psshInfo->numentries; i++) {
- PsshEntry *entry = &psshInfo->entries[i];
- uint32_t datalen = entry->datalen;
- pssh.write(reinterpret_cast<const char *>(&entry->uuid), sizeof(entry->uuid));
- pssh.write(reinterpret_cast<const char *>(&datalen), sizeof(datalen));
- pssh.write(reinterpret_cast<const char *>(entry->data), datalen);
- }
-
- uint32_t psshSize = pssh.tellp();
- std::string psshBase = pssh.str();
- const auto* psshPtr = reinterpret_cast<const uint8_t*>(psshBase.c_str());
- ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO PSSH: size: %u %s", psshSize,
- DrmUUID::arrayToHex(psshPtr, psshSize).string());
-
- // 1) Write PSSH bytes
- playerMsg->add_values()->set_bytes_value(
- reinterpret_cast<const char *>(pssh.str().c_str()), psshSize);
-
- // 2) Write supportedDRMs
- uint32_t numentries = psshInfo->numentries;
- playerMsg->add_values()->set_int32_value(numentries);
- for (size_t i = 0; i < numentries; i++) {
- PsshEntry *entry = &psshInfo->entries[i];
- playerMsg->add_values()->set_bytes_value(
- reinterpret_cast<const char *>(&entry->uuid), sizeof(entry->uuid));
- ALOGV("retrieveDrmInfo: MEDIA_DRM_INFO supportedScheme[%zu] %s", i,
- DrmUUID::arrayToHex((const uint8_t*)&entry->uuid, sizeof(AMediaUUID)).string());
- }
- return OK;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h
deleted file mode 100644
index 968d1be..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Drm.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_DRM_H_
-#define NUPLAYER2_DRM_H_
-
-#include <media/NdkMediaExtractor.h>
-#include <media/stagefright/foundation/ABuffer.h>
-
-#include <utils/String8.h>
-#include <utils/Vector.h>
-
-#include "mediaplayer2.pb.h"
-
-using android::media::MediaPlayer2Proto::PlayerMessage;
-
-namespace android {
-
- struct DrmUUID {
- static const int UUID_SIZE = 16;
-
- DrmUUID() {
- memset(this->uuid, 0, sizeof(uuid));
- }
-
- // to allow defining Vector/KeyedVector of UUID type
- DrmUUID(const DrmUUID &a) {
- memcpy(this->uuid, a.uuid, sizeof(uuid));
- }
-
- // to allow defining Vector/KeyedVector of UUID type
- DrmUUID(const uint8_t uuid_in[UUID_SIZE]) {
- memcpy(this->uuid, uuid_in, sizeof(uuid));
- }
-
- const uint8_t *ptr() const {
- return uuid;
- }
-
- String8 toHexString() const {
- return arrayToHex(uuid, UUID_SIZE);
- }
-
- static String8 toHexString(const uint8_t uuid_in[UUID_SIZE]) {
- return arrayToHex(uuid_in, UUID_SIZE);
- }
-
- static String8 arrayToHex(const uint8_t *array, int bytes) {
- String8 result;
- for (int i = 0; i < bytes; i++) {
- result.appendFormat("%02x", array[i]);
- }
-
- return result;
- }
-
- protected:
- uint8_t uuid[UUID_SIZE];
- };
-
-
- struct NuPlayer2Drm {
-
- // static helpers - internal
-
- protected:
- static Vector<DrmUUID> parsePSSH(const void *pssh, size_t psshsize);
- static Vector<DrmUUID> getSupportedDrmSchemes(const void *pssh, size_t psshsize);
-
- // static helpers - public
-
- public:
- static sp<ABuffer> retrieveDrmInfo(const void *pssh, uint32_t psshsize);
- static status_t retrieveDrmInfo(PsshInfo *, PlayerMessage *);
-
- }; // NuPlayer2Drm
-
-} // android
-
-#endif //NUPLAYER2_DRM_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
deleted file mode 100644
index fd459df..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ /dev/null
@@ -1,2096 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "NuPlayer2Renderer"
-#include <utils/Log.h>
-
-#include "JWakeLock.h"
-#include "NuPlayer2Renderer.h"
-#include <algorithm>
-#include <cutils/properties.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/AUtils.h>
-#include <media/stagefright/MediaClock.h>
-#include <media/stagefright/MediaCodecConstants.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/VideoFrameScheduler2.h>
-#include <media/MediaCodecBuffer.h>
-
-#include <inttypes.h>
-
-namespace android {
-
-/*
- * Example of common configuration settings in shell script form
-
- #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
- adb shell setprop audio.offload.disable 1
-
- #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
- adb shell setprop audio.offload.video 1
-
- #Use audio callbacks for PCM data
- adb shell setprop media.stagefright.audio.cbk 1
-
- #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
- adb shell setprop media.stagefright.audio.deep 1
-
- #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
- adb shell setprop media.stagefright.audio.sink 1000
-
- * These configurations take effect for the next track played (not the current track).
- */
-
-static inline bool getUseAudioCallbackSetting() {
- return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
-}
-
-static inline int32_t getAudioSinkPcmMsSetting() {
- return property_get_int32(
- "media.stagefright.audio.sink", 500 /* default_value */);
-}
-
-// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
-// is closed to allow the audio DSP to power down.
-static const int64_t kOffloadPauseMaxUs = 10000000LL;
-
-// Maximum allowed delay from AudioSink, 1.5 seconds.
-static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000LL;
-
-static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
-
-// Default video frame display duration when only video exists.
-// Used to set max media time in MediaClock.
-static const int64_t kDefaultVideoFrameIntervalUs = 100000LL;
-
-// static
-const NuPlayer2::Renderer::PcmInfo NuPlayer2::Renderer::AUDIO_PCMINFO_INITIALIZER = {
- AUDIO_CHANNEL_NONE,
- AUDIO_OUTPUT_FLAG_NONE,
- AUDIO_FORMAT_INVALID,
- 0, // mNumChannels
- 0 // mSampleRate
-};
-
-// static
-const int64_t NuPlayer2::Renderer::kMinPositionUpdateDelayUs = 100000LL;
-
-static audio_format_t constexpr audioFormatFromEncoding(int32_t pcmEncoding) {
- switch (pcmEncoding) {
- case kAudioEncodingPcmFloat:
- return AUDIO_FORMAT_PCM_FLOAT;
- case kAudioEncodingPcm16bit:
- return AUDIO_FORMAT_PCM_16_BIT;
- case kAudioEncodingPcm8bit:
- return AUDIO_FORMAT_PCM_8_BIT; // TODO: do we want to support this?
- default:
- ALOGE("%s: Invalid encoding: %d", __func__, pcmEncoding);
- return AUDIO_FORMAT_INVALID;
- }
-}
-
-NuPlayer2::Renderer::Renderer(
- const sp<MediaPlayer2Interface::AudioSink> &sink,
- const sp<MediaClock> &mediaClock,
- const sp<AMessage> ¬ify,
- const sp<JObjectHolder> &context,
- uint32_t flags)
- : mAudioSink(sink),
- mUseVirtualAudioSink(false),
- mNotify(notify),
- mFlags(flags),
- mNumFramesWritten(0),
- mDrainAudioQueuePending(false),
- mDrainVideoQueuePending(false),
- mAudioQueueGeneration(0),
- mVideoQueueGeneration(0),
- mAudioDrainGeneration(0),
- mVideoDrainGeneration(0),
- mAudioEOSGeneration(0),
- mMediaClock(mediaClock),
- mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
- mAudioFirstAnchorTimeMediaUs(-1),
- mAnchorTimeMediaUs(-1),
- mAnchorNumFramesWritten(-1),
- mVideoLateByUs(0LL),
- mNextVideoTimeMediaUs(-1),
- mHasAudio(false),
- mHasVideo(false),
- mNotifyCompleteAudio(false),
- mNotifyCompleteVideo(false),
- mSyncQueues(false),
- mPaused(true),
- mPauseDrainAudioAllowedUs(0),
- mVideoSampleReceived(false),
- mVideoRenderingStarted(false),
- mVideoRenderingStartGeneration(0),
- mAudioRenderingStartGeneration(0),
- mRenderingDataDelivered(false),
- mNextAudioClockUpdateTimeUs(-1),
- mLastAudioMediaTimeUs(-1),
- mAudioOffloadPauseTimeoutGeneration(0),
- mAudioTornDown(false),
- mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
- mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
- mTotalBuffersQueued(0),
- mLastAudioBufferDrained(0),
- mUseAudioCallback(false),
- mWakeLock(new JWakeLock(context)) {
- CHECK(mediaClock != NULL);
- mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
-}
-
-NuPlayer2::Renderer::~Renderer() {
- if (offloadingAudio()) {
- mAudioSink->stop();
- mAudioSink->flush();
- mAudioSink->close();
- }
-
- // Try to avoid racing condition in case callback is still on.
- Mutex::Autolock autoLock(mLock);
- if (mUseAudioCallback) {
- flushQueue(&mAudioQueue);
- flushQueue(&mVideoQueue);
- }
- mWakeLock.clear();
- mVideoScheduler.clear();
- mNotify.clear();
- mAudioSink.clear();
-}
-
-void NuPlayer2::Renderer::queueBuffer(
- bool audio,
- const sp<MediaCodecBuffer> &buffer,
- const sp<AMessage> ¬ifyConsumed) {
- sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
- msg->setInt32("queueGeneration", getQueueGeneration(audio));
- msg->setInt32("audio", static_cast<int32_t>(audio));
- msg->setObject("buffer", buffer);
- msg->setMessage("notifyConsumed", notifyConsumed);
- msg->post();
-}
-
-void NuPlayer2::Renderer::queueEOS(bool audio, status_t finalResult) {
- CHECK_NE(finalResult, (status_t)OK);
-
- sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
- msg->setInt32("queueGeneration", getQueueGeneration(audio));
- msg->setInt32("audio", static_cast<int32_t>(audio));
- msg->setInt32("finalResult", finalResult);
- msg->post();
-}
-
-status_t NuPlayer2::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
- sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
- writeToAMessage(msg, rate);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-status_t NuPlayer2::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
- if (rate.mSpeed <= 0.f) {
- ALOGW("playback rate cannot be %f", rate.mSpeed);
- return BAD_VALUE;
- }
-
- if (mAudioSink != NULL && mAudioSink->ready()) {
- status_t err = mAudioSink->setPlaybackRate(rate);
- if (err != OK) {
- ALOGW("failed to get playback rate from audio sink, err(%d)", err);
- return err;
- }
- }
- mPlaybackSettings = rate;
- mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
- return OK;
-}
-
-status_t NuPlayer2::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
- sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- if (err == OK) {
- readFromAMessage(response, rate);
- }
- }
- return err;
-}
-
-status_t NuPlayer2::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
- if (mAudioSink != NULL && mAudioSink->ready()) {
- status_t err = mAudioSink->getPlaybackRate(rate);
- if (err == OK) {
- if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
- ALOGW("correcting mismatch in internal/external playback rate, %f vs %f",
- rate->mSpeed, mPlaybackSettings.mSpeed);
- }
- // get playback settings used by audiosink, as it may be
- // slightly off due to audiosink not taking small changes.
- mPlaybackSettings = *rate;
- }
- return err;
- }
- *rate = mPlaybackSettings;
- return OK;
-}
-
-status_t NuPlayer2::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
- sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
- writeToAMessage(msg, sync, videoFpsHint);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
- return err;
-}
-
-status_t NuPlayer2::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
- if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
- return BAD_VALUE;
- }
- // TODO: support sync sources
- return INVALID_OPERATION;
-}
-
-status_t NuPlayer2::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
- sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- if (err == OK) {
- readFromAMessage(response, sync, videoFps);
- }
- }
- return err;
-}
-
-status_t NuPlayer2::Renderer::onGetSyncSettings(
- AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
- *sync = mSyncSettings;
- *videoFps = -1.f;
- return OK;
-}
-
-void NuPlayer2::Renderer::flush(bool audio, bool notifyComplete) {
- {
- Mutex::Autolock autoLock(mLock);
- if (audio) {
- mNotifyCompleteAudio |= notifyComplete;
- clearAudioFirstAnchorTime_l();
- ++mAudioQueueGeneration;
- ++mAudioDrainGeneration;
- } else {
- mNotifyCompleteVideo |= notifyComplete;
- ++mVideoQueueGeneration;
- ++mVideoDrainGeneration;
- mNextVideoTimeMediaUs = -1;
- }
-
- mMediaClock->clearAnchor();
- mVideoLateByUs = 0;
- mSyncQueues = false;
- }
-
- sp<AMessage> msg = new AMessage(kWhatFlush, this);
- msg->setInt32("audio", static_cast<int32_t>(audio));
- msg->post();
-}
-
-void NuPlayer2::Renderer::signalTimeDiscontinuity() {
-}
-
-void NuPlayer2::Renderer::signalDisableOffloadAudio() {
- (new AMessage(kWhatDisableOffloadAudio, this))->post();
-}
-
-void NuPlayer2::Renderer::signalEnableOffloadAudio() {
- (new AMessage(kWhatEnableOffloadAudio, this))->post();
-}
-
-void NuPlayer2::Renderer::pause() {
- (new AMessage(kWhatPause, this))->post();
-}
-
-void NuPlayer2::Renderer::resume() {
- (new AMessage(kWhatResume, this))->post();
-}
-
-void NuPlayer2::Renderer::setVideoFrameRate(float fps) {
- sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
- msg->setFloat("frame-rate", fps);
- msg->post();
-}
-
-// Called on any threads without mLock acquired.
-status_t NuPlayer2::Renderer::getCurrentPosition(int64_t *mediaUs) {
- status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
- if (result == OK) {
- return result;
- }
-
- // MediaClock has not started yet. Try to start it if possible.
- {
- Mutex::Autolock autoLock(mLock);
- if (mAudioFirstAnchorTimeMediaUs == -1) {
- return result;
- }
-
- AudioTimestamp ts;
- status_t res = mAudioSink->getTimestamp(ts);
- if (res != OK) {
- return result;
- }
-
- // AudioSink has rendered some frames.
- int64_t nowUs = ALooper::GetNowUs();
- int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
- + mAudioFirstAnchorTimeMediaUs;
- mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
- }
-
- return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
-}
-
-void NuPlayer2::Renderer::clearAudioFirstAnchorTime_l() {
- mAudioFirstAnchorTimeMediaUs = -1;
- mMediaClock->setStartingTimeMedia(-1);
-}
-
-void NuPlayer2::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
- if (mAudioFirstAnchorTimeMediaUs == -1) {
- mAudioFirstAnchorTimeMediaUs = mediaUs;
- mMediaClock->setStartingTimeMedia(mediaUs);
- }
-}
-
-// Called on renderer looper.
-void NuPlayer2::Renderer::clearAnchorTime() {
- mMediaClock->clearAnchor();
- mAnchorTimeMediaUs = -1;
- mAnchorNumFramesWritten = -1;
-}
-
-void NuPlayer2::Renderer::setVideoLateByUs(int64_t lateUs) {
- Mutex::Autolock autoLock(mLock);
- mVideoLateByUs = lateUs;
-}
-
-int64_t NuPlayer2::Renderer::getVideoLateByUs() {
- Mutex::Autolock autoLock(mLock);
- return mVideoLateByUs;
-}
-
-status_t NuPlayer2::Renderer::openAudioSink(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool *isOffloaded,
- bool isStreaming) {
- sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
- msg->setMessage("format", format);
- msg->setInt32("offload-only", offloadOnly);
- msg->setInt32("has-video", hasVideo);
- msg->setInt32("flags", flags);
- msg->setInt32("isStreaming", isStreaming);
-
- sp<AMessage> response;
- status_t postStatus = msg->postAndAwaitResponse(&response);
-
- int32_t err;
- if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) {
- err = INVALID_OPERATION;
- } else if (err == OK && isOffloaded != NULL) {
- int32_t offload;
- CHECK(response->findInt32("offload", &offload));
- *isOffloaded = (offload != 0);
- }
- return err;
-}
-
-void NuPlayer2::Renderer::closeAudioSink() {
- sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
-
- sp<AMessage> response;
- msg->postAndAwaitResponse(&response);
-}
-
-void NuPlayer2::Renderer::changeAudioFormat(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool isStreaming,
- const sp<AMessage> ¬ify) {
- sp<AMessage> meta = new AMessage;
- meta->setMessage("format", format);
- meta->setInt32("offload-only", offloadOnly);
- meta->setInt32("has-video", hasVideo);
- meta->setInt32("flags", flags);
- meta->setInt32("isStreaming", isStreaming);
-
- sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
- msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
- msg->setMessage("notify", notify);
- msg->setMessage("meta", meta);
- msg->post();
-}
-
-void NuPlayer2::Renderer::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatOpenAudioSink:
- {
- sp<AMessage> format;
- CHECK(msg->findMessage("format", &format));
-
- int32_t offloadOnly;
- CHECK(msg->findInt32("offload-only", &offloadOnly));
-
- int32_t hasVideo;
- CHECK(msg->findInt32("has-video", &hasVideo));
-
- uint32_t flags;
- CHECK(msg->findInt32("flags", (int32_t *)&flags));
-
- uint32_t isStreaming;
- CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
-
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
-
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->setInt32("offload", offloadingAudio());
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-
- break;
- }
-
- case kWhatCloseAudioSink:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- onCloseAudioSink();
-
- sp<AMessage> response = new AMessage;
- response->postReply(replyID);
- break;
- }
-
- case kWhatStopAudioSink:
- {
- mAudioSink->stop();
- break;
- }
-
- case kWhatChangeAudioFormat:
- {
- int32_t queueGeneration;
- CHECK(msg->findInt32("queueGeneration", &queueGeneration));
-
- sp<AMessage> notify;
- CHECK(msg->findMessage("notify", ¬ify));
-
- if (offloadingAudio()) {
- ALOGW("changeAudioFormat should NOT be called in offload mode");
- notify->setInt32("err", INVALID_OPERATION);
- notify->post();
- break;
- }
-
- sp<AMessage> meta;
- CHECK(msg->findMessage("meta", &meta));
-
- if (queueGeneration != getQueueGeneration(true /* audio */)
- || mAudioQueue.empty()) {
- onChangeAudioFormat(meta, notify);
- break;
- }
-
- QueueEntry entry;
- entry.mNotifyConsumed = notify;
- entry.mMeta = meta;
-
- Mutex::Autolock autoLock(mLock);
- mAudioQueue.push_back(entry);
- postDrainAudioQueue_l();
-
- break;
- }
-
- case kWhatDrainAudioQueue:
- {
- mDrainAudioQueuePending = false;
-
- int32_t generation;
- CHECK(msg->findInt32("drainGeneration", &generation));
- if (generation != getDrainGeneration(true /* audio */)) {
- break;
- }
-
- if (onDrainAudioQueue()) {
- uint32_t numFramesPlayed;
- CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
- (status_t)OK);
-
- // Handle AudioTrack race when start is immediately called after flush.
- uint32_t numFramesPendingPlayout =
- (mNumFramesWritten > numFramesPlayed ?
- mNumFramesWritten - numFramesPlayed : 0);
-
- // This is how long the audio sink will have data to
- // play back.
- int64_t delayUs =
- mAudioSink->msecsPerFrame()
- * numFramesPendingPlayout * 1000ll;
- if (mPlaybackSettings.mSpeed > 1.0f) {
- delayUs /= mPlaybackSettings.mSpeed;
- }
-
- // Let's give it more data after about half that time
- // has elapsed.
- delayUs /= 2;
- // check the buffer size to estimate maximum delay permitted.
- const int64_t maxDrainDelayUs = std::max(
- mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
- ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
- (long long)delayUs, (long long)maxDrainDelayUs);
- Mutex::Autolock autoLock(mLock);
- postDrainAudioQueue_l(delayUs);
- }
- break;
- }
-
- case kWhatDrainVideoQueue:
- {
- int32_t generation;
- CHECK(msg->findInt32("drainGeneration", &generation));
- if (generation != getDrainGeneration(false /* audio */)) {
- break;
- }
-
- mDrainVideoQueuePending = false;
-
- onDrainVideoQueue();
-
- postDrainVideoQueue();
- break;
- }
-
- case kWhatPostDrainVideoQueue:
- {
- int32_t generation;
- CHECK(msg->findInt32("drainGeneration", &generation));
- if (generation != getDrainGeneration(false /* audio */)) {
- break;
- }
-
- mDrainVideoQueuePending = false;
- postDrainVideoQueue();
- break;
- }
-
- case kWhatQueueBuffer:
- {
- onQueueBuffer(msg);
- break;
- }
-
- case kWhatQueueEOS:
- {
- onQueueEOS(msg);
- break;
- }
-
- case kWhatEOS:
- {
- int32_t generation;
- CHECK(msg->findInt32("audioEOSGeneration", &generation));
- if (generation != mAudioEOSGeneration) {
- break;
- }
- status_t finalResult;
- CHECK(msg->findInt32("finalResult", &finalResult));
- notifyEOS(true /* audio */, finalResult);
- break;
- }
-
- case kWhatConfigPlayback:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AudioPlaybackRate rate;
- readFromAMessage(msg, &rate);
- status_t err = onConfigPlayback(rate);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatGetPlaybackSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
- status_t err = onGetPlaybackSettings(&rate);
- sp<AMessage> response = new AMessage;
- if (err == OK) {
- writeToAMessage(response, rate);
- }
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatConfigSync:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- AVSyncSettings sync;
- float videoFpsHint;
- readFromAMessage(msg, &sync, &videoFpsHint);
- status_t err = onConfigSync(sync, videoFpsHint);
- sp<AMessage> response = new AMessage;
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatGetSyncSettings:
- {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- ALOGV("kWhatGetSyncSettings");
- AVSyncSettings sync;
- float videoFps = -1.f;
- status_t err = onGetSyncSettings(&sync, &videoFps);
- sp<AMessage> response = new AMessage;
- if (err == OK) {
- writeToAMessage(response, sync, videoFps);
- }
- response->setInt32("err", err);
- response->postReply(replyID);
- break;
- }
-
- case kWhatFlush:
- {
- onFlush(msg);
- break;
- }
-
- case kWhatDisableOffloadAudio:
- {
- onDisableOffloadAudio();
- break;
- }
-
- case kWhatEnableOffloadAudio:
- {
- onEnableOffloadAudio();
- break;
- }
-
- case kWhatPause:
- {
- onPause();
- break;
- }
-
- case kWhatResume:
- {
- onResume();
- break;
- }
-
- case kWhatSetVideoFrameRate:
- {
- float fps;
- CHECK(msg->findFloat("frame-rate", &fps));
- onSetVideoFrameRate(fps);
- break;
- }
-
- case kWhatAudioTearDown:
- {
- int32_t reason;
- CHECK(msg->findInt32("reason", &reason));
-
- onAudioTearDown((AudioTearDownReason)reason);
- break;
- }
-
- case kWhatAudioOffloadPauseTimeout:
- {
- int32_t generation;
- CHECK(msg->findInt32("drainGeneration", &generation));
- if (generation != mAudioOffloadPauseTimeoutGeneration) {
- break;
- }
- ALOGV("Audio Offload tear down due to pause timeout.");
- onAudioTearDown(kDueToTimeout);
- mWakeLock->release();
- break;
- }
-
- default:
- TRESPASS();
- break;
- }
-}
-
-void NuPlayer2::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
- if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
- return;
- }
-
- if (mAudioQueue.empty()) {
- return;
- }
-
- // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
- if (mPaused) {
- const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
- if (diffUs > delayUs) {
- delayUs = diffUs;
- }
- }
-
- mDrainAudioQueuePending = true;
- sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
- msg->setInt32("drainGeneration", mAudioDrainGeneration);
- msg->post(delayUs);
-}
-
-void NuPlayer2::Renderer::prepareForMediaRenderingStart_l() {
- mAudioRenderingStartGeneration = mAudioDrainGeneration;
- mVideoRenderingStartGeneration = mVideoDrainGeneration;
- mRenderingDataDelivered = false;
-}
-
-void NuPlayer2::Renderer::notifyIfMediaRenderingStarted_l() {
- if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
- mAudioRenderingStartGeneration == mAudioDrainGeneration) {
- mRenderingDataDelivered = true;
- if (mPaused) {
- return;
- }
- mVideoRenderingStartGeneration = -1;
- mAudioRenderingStartGeneration = -1;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatMediaRenderingStart);
- notify->post();
- }
-}
-
-// static
-size_t NuPlayer2::Renderer::AudioSinkCallback(
- MediaPlayer2Interface::AudioSink * /* audioSink */,
- void *buffer,
- size_t size,
- void *cookie,
- MediaPlayer2Interface::AudioSink::cb_event_t event) {
- NuPlayer2::Renderer *me = (NuPlayer2::Renderer *)cookie;
-
- switch (event) {
- case MediaPlayer2Interface::AudioSink::CB_EVENT_FILL_BUFFER:
- {
- return me->fillAudioBuffer(buffer, size);
- break;
- }
-
- case MediaPlayer2Interface::AudioSink::CB_EVENT_STREAM_END:
- {
- ALOGV("AudioSink::CB_EVENT_STREAM_END");
- me->notifyEOSCallback();
- break;
- }
-
- case MediaPlayer2Interface::AudioSink::CB_EVENT_TEAR_DOWN:
- {
- ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
- me->notifyAudioTearDown(kDueToError);
- break;
- }
- }
-
- return 0;
-}
-
-void NuPlayer2::Renderer::notifyEOSCallback() {
- Mutex::Autolock autoLock(mLock);
-
- if (!mUseAudioCallback) {
- return;
- }
-
- notifyEOS_l(true /* audio */, ERROR_END_OF_STREAM);
-}
-
-size_t NuPlayer2::Renderer::fillAudioBuffer(void *buffer, size_t size) {
- Mutex::Autolock autoLock(mLock);
-
- if (!mUseAudioCallback) {
- return 0;
- }
-
- bool hasEOS = false;
-
- size_t sizeCopied = 0;
- bool firstEntry = true;
- QueueEntry *entry; // will be valid after while loop if hasEOS is set.
- while (sizeCopied < size && !mAudioQueue.empty()) {
- entry = &*mAudioQueue.begin();
-
- if (entry->mBuffer == NULL) { // EOS
- hasEOS = true;
- mAudioQueue.erase(mAudioQueue.begin());
- break;
- }
-
- if (firstEntry && entry->mOffset == 0) {
- firstEntry = false;
- int64_t mediaTimeUs;
- CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
- setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
- }
-
- size_t copy = entry->mBuffer->size() - entry->mOffset;
- size_t sizeRemaining = size - sizeCopied;
- if (copy > sizeRemaining) {
- copy = sizeRemaining;
- }
-
- memcpy((char *)buffer + sizeCopied,
- entry->mBuffer->data() + entry->mOffset,
- copy);
-
- entry->mOffset += copy;
- if (entry->mOffset == entry->mBuffer->size()) {
- entry->mNotifyConsumed->post();
- mAudioQueue.erase(mAudioQueue.begin());
- entry = NULL;
- }
- sizeCopied += copy;
-
- notifyIfMediaRenderingStarted_l();
- }
-
- if (mAudioFirstAnchorTimeMediaUs >= 0) {
- int64_t nowUs = ALooper::GetNowUs();
- int64_t nowMediaUs =
- mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
- // we don't know how much data we are queueing for offloaded tracks.
- mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
- }
-
- // for non-offloaded audio, we need to compute the frames written because
- // there is no EVENT_STREAM_END notification. The frames written gives
- // an estimate on the pending played out duration.
- if (!offloadingAudio()) {
- mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
- }
-
- if (hasEOS) {
- (new AMessage(kWhatStopAudioSink, this))->post();
- // As there is currently no EVENT_STREAM_END callback notification for
- // non-offloaded audio tracks, we need to post the EOS ourselves.
- if (!offloadingAudio()) {
- int64_t postEOSDelayUs = 0;
- if (mAudioSink->needsTrailingPadding()) {
- postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
- }
- ALOGV("fillAudioBuffer: notifyEOS_l "
- "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld",
- mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
- notifyEOS_l(true /* audio */, entry->mFinalResult, postEOSDelayUs);
- }
- }
- return sizeCopied;
-}
-
-void NuPlayer2::Renderer::drainAudioQueueUntilLastEOS() {
- List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
- bool foundEOS = false;
- while (it != mAudioQueue.end()) {
- int32_t eos;
- QueueEntry *entry = &*it++;
- if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
- || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
- itEOS = it;
- foundEOS = true;
- }
- }
-
- if (foundEOS) {
- // post all replies before EOS and drop the samples
- for (it = mAudioQueue.begin(); it != itEOS; it++) {
- if (it->mBuffer == nullptr) {
- if (it->mNotifyConsumed == nullptr) {
- // delay doesn't matter as we don't even have an AudioTrack
- notifyEOS(true /* audio */, it->mFinalResult);
- } else {
- // TAG for re-opening audio sink.
- onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
- }
- } else {
- it->mNotifyConsumed->post();
- }
- }
- mAudioQueue.erase(mAudioQueue.begin(), itEOS);
- }
-}
-
-bool NuPlayer2::Renderer::onDrainAudioQueue() {
- // do not drain audio during teardown as queued buffers may be invalid.
- if (mAudioTornDown) {
- return false;
- }
- // TODO: This call to getPosition checks if AudioTrack has been created
- // in AudioSink before draining audio. If AudioTrack doesn't exist, then
- // CHECKs on getPosition will fail.
- // We still need to figure out why AudioTrack is not created when
- // this function is called. One possible reason could be leftover
- // audio. Another possible place is to check whether decoder
- // has received INFO_FORMAT_CHANGED as the first buffer since
- // AudioSink is opened there, and possible interactions with flush
- // immediately after start. Investigate error message
- // "vorbis_dsp_synthesis returned -135", along with RTSP.
- uint32_t numFramesPlayed;
- if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
- // When getPosition fails, renderer will not reschedule the draining
- // unless new samples are queued.
- // If we have pending EOS (or "eos" marker for discontinuities), we need
- // to post these now as NuPlayer2Decoder might be waiting for it.
- drainAudioQueueUntilLastEOS();
-
- ALOGW("onDrainAudioQueue(): audio sink is not ready");
- return false;
- }
-
-#if 0
- ssize_t numFramesAvailableToWrite =
- mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
-
- if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
- ALOGI("audio sink underrun");
- } else {
- ALOGV("audio queue has %d frames left to play",
- mAudioSink->frameCount() - numFramesAvailableToWrite);
- }
-#endif
-
- uint32_t prevFramesWritten = mNumFramesWritten;
- while (!mAudioQueue.empty()) {
- QueueEntry *entry = &*mAudioQueue.begin();
-
- if (entry->mBuffer == NULL) {
- if (entry->mNotifyConsumed != nullptr) {
- // TAG for re-open audio sink.
- onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
- mAudioQueue.erase(mAudioQueue.begin());
- continue;
- }
-
- // EOS
- if (mPaused) {
- // Do not notify EOS when paused.
- // This is needed to avoid switch to next clip while in pause.
- ALOGV("onDrainAudioQueue(): Do not notify EOS when paused");
- return false;
- }
-
- int64_t postEOSDelayUs = 0;
- if (mAudioSink->needsTrailingPadding()) {
- postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
- }
- notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
- mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
-
- mAudioQueue.erase(mAudioQueue.begin());
- entry = NULL;
- if (mAudioSink->needsTrailingPadding()) {
- // If we're not in gapless playback (i.e. through setNextPlayer), we
- // need to stop the track here, because that will play out the last
- // little bit at the end of the file. Otherwise short files won't play.
- mAudioSink->stop();
- mNumFramesWritten = 0;
- }
- return false;
- }
-
- mLastAudioBufferDrained = entry->mBufferOrdinal;
-
- // ignore 0-sized buffer which could be EOS marker with no data
- if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
- int64_t mediaTimeUs;
- CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
- ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
- mediaTimeUs / 1E6);
- onNewAudioMediaTime(mediaTimeUs);
- }
-
- size_t copy = entry->mBuffer->size() - entry->mOffset;
-
- ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
- copy, false /* blocking */);
- if (written < 0) {
- // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
- if (written == WOULD_BLOCK) {
- ALOGV("AudioSink write would block when writing %zu bytes", copy);
- } else {
- ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
- // This can only happen when AudioSink was opened with doNotReconnect flag set to
- // true, in which case the NuPlayer2 will handle the reconnect.
- notifyAudioTearDown(kDueToError);
- }
- break;
- }
-
- entry->mOffset += written;
- size_t remainder = entry->mBuffer->size() - entry->mOffset;
- if ((ssize_t)remainder < mAudioSink->frameSize()) {
- if (remainder > 0) {
- ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
- remainder);
- entry->mOffset += remainder;
- copy -= remainder;
- }
-
- entry->mNotifyConsumed->post();
- mAudioQueue.erase(mAudioQueue.begin());
-
- entry = NULL;
- }
-
- size_t copiedFrames = written / mAudioSink->frameSize();
- mNumFramesWritten += copiedFrames;
-
- {
- Mutex::Autolock autoLock(mLock);
- int64_t maxTimeMedia;
- maxTimeMedia =
- mAnchorTimeMediaUs +
- (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
- * 1000LL * mAudioSink->msecsPerFrame());
- mMediaClock->updateMaxTimeMedia(maxTimeMedia);
-
- notifyIfMediaRenderingStarted_l();
- }
-
- if (written != (ssize_t)copy) {
- // A short count was received from AudioSink::write()
- //
- // AudioSink write is called in non-blocking mode.
- // It may return with a short count when:
- //
- // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
- // discarded.
- // 2) The data to be copied exceeds the available buffer in AudioSink.
- // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
- // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
-
- // (Case 1)
- // Must be a multiple of the frame size. If it is not a multiple of a frame size, it
- // needs to fail, as we should not carry over fractional frames between calls.
- CHECK_EQ(copy % mAudioSink->frameSize(), 0u);
-
- // (Case 2, 3, 4)
- // Return early to the caller.
- // Beware of calling immediately again as this may busy-loop if you are not careful.
- ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
- break;
- }
- }
-
- // calculate whether we need to reschedule another write.
- bool reschedule = !mAudioQueue.empty()
- && (!mPaused
- || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
- //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u",
- // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
- return reschedule;
-}
-
-int64_t NuPlayer2::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
- int32_t sampleRate = offloadingAudio() ?
- mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
- if (sampleRate == 0) {
- ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
- return 0;
- }
- return (int64_t)(numFrames * 1000000LL / sampleRate);
-}
-
-// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
-int64_t NuPlayer2::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
- int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
- if (mUseVirtualAudioSink) {
- int64_t nowUs = ALooper::GetNowUs();
- int64_t mediaUs;
- if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
- return 0LL;
- } else {
- return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
- }
- }
-
- const int64_t audioSinkPlayedUs = mAudioSink->getPlayedOutDurationUs(nowUs);
- int64_t pendingUs = writtenAudioDurationUs - audioSinkPlayedUs;
- if (pendingUs < 0) {
- // This shouldn't happen unless the timestamp is stale.
- ALOGW("%s: pendingUs %lld < 0, clamping to zero, potential resume after pause "
- "writtenAudioDurationUs: %lld, audioSinkPlayedUs: %lld",
- __func__, (long long)pendingUs,
- (long long)writtenAudioDurationUs, (long long)audioSinkPlayedUs);
- pendingUs = 0;
- }
- return pendingUs;
-}
-
-int64_t NuPlayer2::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
- int64_t realUs;
- if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
- // If failed to get current position, e.g. due to audio clock is
- // not ready, then just play out video immediately without delay.
- return nowUs;
- }
- return realUs;
-}
-
-void NuPlayer2::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
- Mutex::Autolock autoLock(mLock);
- // TRICKY: vorbis decoder generates multiple frames with the same
- // timestamp, so only update on the first frame with a given timestamp
- if (mediaTimeUs == mAnchorTimeMediaUs) {
- return;
- }
- setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
-
- // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
- if (mNextAudioClockUpdateTimeUs == -1) {
- AudioTimestamp ts;
- if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
- mNextAudioClockUpdateTimeUs = 0; // start our clock updates
- }
- }
- int64_t nowUs = ALooper::GetNowUs();
- if (mNextAudioClockUpdateTimeUs >= 0) {
- if (nowUs >= mNextAudioClockUpdateTimeUs) {
- int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
- mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
- mUseVirtualAudioSink = false;
- mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
- }
- } else {
- int64_t unused;
- if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
- && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
- > kMaxAllowedAudioSinkDelayUs)) {
- // Enough data has been sent to AudioSink, but AudioSink has not rendered
- // any data yet. Something is wrong with AudioSink, e.g., the device is not
- // connected to audio out.
- // Switch to system clock. This essentially creates a virtual AudioSink with
- // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
- // This virtual AudioSink renders audio data starting from the very first sample
- // and it's paced by system clock.
- ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
- mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
- mUseVirtualAudioSink = true;
- }
- }
- mAnchorNumFramesWritten = mNumFramesWritten;
- mAnchorTimeMediaUs = mediaTimeUs;
-}
-
-// Called without mLock acquired.
-void NuPlayer2::Renderer::postDrainVideoQueue() {
- if (mDrainVideoQueuePending
- || getSyncQueues()
- || (mPaused && mVideoSampleReceived)) {
- return;
- }
-
- if (mVideoQueue.empty()) {
- return;
- }
-
- QueueEntry &entry = *mVideoQueue.begin();
-
- sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
- msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
-
- if (entry.mBuffer == NULL) {
- // EOS doesn't carry a timestamp.
- msg->post();
- mDrainVideoQueuePending = true;
- return;
- }
-
- int64_t nowUs = ALooper::GetNowUs();
- if (mFlags & FLAG_REAL_TIME) {
- int64_t realTimeUs;
- CHECK(entry.mBuffer->meta()->findInt64("timeUs", &realTimeUs));
-
- realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
-
- int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
-
- int64_t delayUs = realTimeUs - nowUs;
-
- ALOGW_IF(delayUs > 500000, "unusually high delayUs: %lld", (long long)delayUs);
- // post 2 display refreshes before rendering is due
- msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
-
- mDrainVideoQueuePending = true;
- return;
- }
-
- int64_t mediaTimeUs;
- CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
- {
- Mutex::Autolock autoLock(mLock);
- if (mAnchorTimeMediaUs < 0) {
- mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
- mAnchorTimeMediaUs = mediaTimeUs;
- }
- }
- mNextVideoTimeMediaUs = mediaTimeUs;
- if (!mHasAudio) {
- // smooth out videos >= 10fps
- mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
- }
-
- if (!mVideoSampleReceived || mediaTimeUs < mAudioFirstAnchorTimeMediaUs) {
- msg->post();
- } else {
- int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
-
- // post 2 display refreshes before rendering is due
- mMediaClock->addTimer(msg, mediaTimeUs, -twoVsyncsUs);
- }
-
- mDrainVideoQueuePending = true;
-}
-
-void NuPlayer2::Renderer::onDrainVideoQueue() {
- if (mVideoQueue.empty()) {
- return;
- }
-
- QueueEntry *entry = &*mVideoQueue.begin();
-
- if (entry->mBuffer == NULL) {
- // EOS
-
- notifyEOS(false /* audio */, entry->mFinalResult);
-
- mVideoQueue.erase(mVideoQueue.begin());
- entry = NULL;
-
- setVideoLateByUs(0);
- return;
- }
-
- int64_t nowUs = ALooper::GetNowUs();
- int64_t realTimeUs;
- int64_t mediaTimeUs = -1;
- if (mFlags & FLAG_REAL_TIME) {
- CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
- } else {
- CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-
- realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
- }
- realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
-
- bool tooLate = false;
-
- if (!mPaused) {
- setVideoLateByUs(nowUs - realTimeUs);
- tooLate = (mVideoLateByUs > 40000);
-
- if (tooLate) {
- ALOGV("video late by %lld us (%.2f secs)",
- (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
- } else {
- int64_t mediaUs = 0;
- mMediaClock->getMediaTime(realTimeUs, &mediaUs);
- ALOGV("rendering video at media time %.2f secs",
- (mFlags & FLAG_REAL_TIME ? realTimeUs :
- mediaUs) / 1E6);
-
- if (!(mFlags & FLAG_REAL_TIME)
- && mLastAudioMediaTimeUs != -1
- && mediaTimeUs > mLastAudioMediaTimeUs) {
- // If audio ends before video, video continues to drive media clock.
- // Also smooth out videos >= 10fps.
- mMediaClock->updateMaxTimeMedia(mediaTimeUs + kDefaultVideoFrameIntervalUs);
- }
- }
- } else {
- setVideoLateByUs(0);
- if (!mVideoSampleReceived && !mHasAudio) {
- // This will ensure that the first frame after a flush won't be used as anchor
- // when renderer is in paused state, because resume can happen any time after seek.
- clearAnchorTime();
- }
- }
-
- // Always render the first video frame while keeping stats on A/V sync.
- if (!mVideoSampleReceived) {
- realTimeUs = nowUs;
- tooLate = false;
- }
-
- entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000LL);
- entry->mNotifyConsumed->setInt32("render", !tooLate);
- entry->mNotifyConsumed->post();
- mVideoQueue.erase(mVideoQueue.begin());
- entry = NULL;
-
- mVideoSampleReceived = true;
-
- if (!mPaused) {
- if (!mVideoRenderingStarted) {
- mVideoRenderingStarted = true;
- notifyVideoRenderingStart();
- }
- Mutex::Autolock autoLock(mLock);
- notifyIfMediaRenderingStarted_l();
- }
-}
-
-void NuPlayer2::Renderer::notifyVideoRenderingStart() {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatVideoRenderingStart);
- notify->post();
-}
-
-void NuPlayer2::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
- Mutex::Autolock autoLock(mLock);
- notifyEOS_l(audio, finalResult, delayUs);
-}
-
-void NuPlayer2::Renderer::notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs) {
- if (audio && delayUs > 0) {
- sp<AMessage> msg = new AMessage(kWhatEOS, this);
- msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
- msg->setInt32("finalResult", finalResult);
- msg->post(delayUs);
- return;
- }
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatEOS);
- notify->setInt32("audio", static_cast<int32_t>(audio));
- notify->setInt32("finalResult", finalResult);
- notify->post(delayUs);
-
- if (audio) {
- // Video might outlive audio. Clear anchor to enable video only case.
- mAnchorTimeMediaUs = -1;
- mHasAudio = false;
- if (mNextVideoTimeMediaUs >= 0) {
- int64_t mediaUs = 0;
- int64_t nowUs = ALooper::GetNowUs();
- status_t result = mMediaClock->getMediaTime(nowUs, &mediaUs);
- if (result == OK) {
- if (mNextVideoTimeMediaUs > mediaUs) {
- mMediaClock->updateMaxTimeMedia(mNextVideoTimeMediaUs);
- }
- } else {
- mMediaClock->updateAnchor(
- mNextVideoTimeMediaUs, nowUs,
- mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
- }
- }
- }
-}
-
-void NuPlayer2::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
- sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
- msg->setInt32("reason", reason);
- msg->post();
-}
-
-void NuPlayer2::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- if (dropBufferIfStale(audio, msg)) {
- return;
- }
-
- if (audio) {
- mHasAudio = true;
- } else {
- mHasVideo = true;
- }
-
- if (mHasVideo) {
- if (mVideoScheduler == NULL) {
- mVideoScheduler = new VideoFrameScheduler2();
- mVideoScheduler->init();
- }
- }
-
- sp<RefBase> obj;
- CHECK(msg->findObject("buffer", &obj));
- sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
-
- sp<AMessage> notifyConsumed;
- CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed));
-
- QueueEntry entry;
- entry.mBuffer = buffer;
- entry.mNotifyConsumed = notifyConsumed;
- entry.mOffset = 0;
- entry.mFinalResult = OK;
- entry.mBufferOrdinal = ++mTotalBuffersQueued;
-
- if (audio) {
- Mutex::Autolock autoLock(mLock);
- mAudioQueue.push_back(entry);
- postDrainAudioQueue_l();
- } else {
- mVideoQueue.push_back(entry);
- postDrainVideoQueue();
- }
-
- Mutex::Autolock autoLock(mLock);
- if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
- return;
- }
-
- sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
- sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
-
- if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
- // EOS signalled on either queue.
- syncQueuesDone_l();
- return;
- }
-
- int64_t firstAudioTimeUs;
- int64_t firstVideoTimeUs;
- CHECK(firstAudioBuffer->meta()
- ->findInt64("timeUs", &firstAudioTimeUs));
- CHECK(firstVideoBuffer->meta()
- ->findInt64("timeUs", &firstVideoTimeUs));
-
- int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
-
- ALOGV("queueDiff = %.2f secs", diff / 1E6);
-
- if (diff > 100000LL) {
- // Audio data starts More than 0.1 secs before video.
- // Drop some audio.
-
- (*mAudioQueue.begin()).mNotifyConsumed->post();
- mAudioQueue.erase(mAudioQueue.begin());
- return;
- }
-
- syncQueuesDone_l();
-}
-
-void NuPlayer2::Renderer::syncQueuesDone_l() {
- if (!mSyncQueues) {
- return;
- }
-
- mSyncQueues = false;
-
- if (!mAudioQueue.empty()) {
- postDrainAudioQueue_l();
- }
-
- if (!mVideoQueue.empty()) {
- mLock.unlock();
- postDrainVideoQueue();
- mLock.lock();
- }
-}
-
-void NuPlayer2::Renderer::onQueueEOS(const sp<AMessage> &msg) {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- if (dropBufferIfStale(audio, msg)) {
- return;
- }
-
- int32_t finalResult;
- CHECK(msg->findInt32("finalResult", &finalResult));
-
- QueueEntry entry;
- entry.mOffset = 0;
- entry.mFinalResult = finalResult;
-
- if (audio) {
- Mutex::Autolock autoLock(mLock);
- if (mAudioQueue.empty() && mSyncQueues) {
- syncQueuesDone_l();
- }
- mAudioQueue.push_back(entry);
- postDrainAudioQueue_l();
- } else {
- if (mVideoQueue.empty() && getSyncQueues()) {
- Mutex::Autolock autoLock(mLock);
- syncQueuesDone_l();
- }
- mVideoQueue.push_back(entry);
- postDrainVideoQueue();
- }
-}
-
-void NuPlayer2::Renderer::onFlush(const sp<AMessage> &msg) {
- int32_t audio, notifyComplete;
- CHECK(msg->findInt32("audio", &audio));
-
- {
- Mutex::Autolock autoLock(mLock);
- if (audio) {
- notifyComplete = mNotifyCompleteAudio;
- mNotifyCompleteAudio = false;
- mLastAudioMediaTimeUs = -1;
-
- mHasAudio = false;
- if (mNextVideoTimeMediaUs >= 0) {
- int64_t nowUs = ALooper::GetNowUs();
- mMediaClock->updateAnchor(
- mNextVideoTimeMediaUs, nowUs,
- mNextVideoTimeMediaUs + kDefaultVideoFrameIntervalUs);
- }
- } else {
- notifyComplete = mNotifyCompleteVideo;
- mNotifyCompleteVideo = false;
- mVideoRenderingStarted = false;
- }
-
- // If we're currently syncing the queues, i.e. dropping audio while
- // aligning the first audio/video buffer times and only one of the
- // two queues has data, we may starve that queue by not requesting
- // more buffers from the decoder. If the other source then encounters
- // a discontinuity that leads to flushing, we'll never find the
- // corresponding discontinuity on the other queue.
- // Therefore we'll stop syncing the queues if at least one of them
- // is flushed.
- syncQueuesDone_l();
- }
- clearAnchorTime();
-
- ALOGV("flushing %s", audio ? "audio" : "video");
- if (audio) {
- {
- Mutex::Autolock autoLock(mLock);
- flushQueue(&mAudioQueue);
-
- ++mAudioDrainGeneration;
- ++mAudioEOSGeneration;
- prepareForMediaRenderingStart_l();
-
- // the frame count will be reset after flush.
- clearAudioFirstAnchorTime_l();
- }
-
- mDrainAudioQueuePending = false;
-
- if (offloadingAudio()) {
- mAudioSink->pause();
- mAudioSink->flush();
- if (!mPaused) {
- mAudioSink->start();
- }
- } else {
- mAudioSink->pause();
- mAudioSink->flush();
- // Call stop() to signal to the AudioSink to completely fill the
- // internal buffer before resuming playback.
- // FIXME: this is ignored after flush().
- mAudioSink->stop();
- if (mPaused) {
- // Race condition: if renderer is paused and audio sink is stopped,
- // we need to make sure that the audio track buffer fully drains
- // before delivering data.
- // FIXME: remove this if we can detect if stop() is complete.
- const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
- mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
- } else {
- mAudioSink->start();
- }
- mNumFramesWritten = 0;
- }
- mNextAudioClockUpdateTimeUs = -1;
- } else {
- flushQueue(&mVideoQueue);
-
- mDrainVideoQueuePending = false;
-
- if (mVideoScheduler != NULL) {
- mVideoScheduler->restart();
- }
-
- Mutex::Autolock autoLock(mLock);
- ++mVideoDrainGeneration;
- prepareForMediaRenderingStart_l();
- }
-
- mVideoSampleReceived = false;
-
- if (notifyComplete) {
- notifyFlushComplete(audio);
- }
-}
-
-void NuPlayer2::Renderer::flushQueue(List<QueueEntry> *queue) {
- while (!queue->empty()) {
- QueueEntry *entry = &*queue->begin();
-
- if (entry->mBuffer != NULL) {
- entry->mNotifyConsumed->post();
- } else if (entry->mNotifyConsumed != nullptr) {
- // Is it needed to open audio sink now?
- onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
- }
-
- queue->erase(queue->begin());
- entry = NULL;
- }
-}
-
-void NuPlayer2::Renderer::notifyFlushComplete(bool audio) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatFlushComplete);
- notify->setInt32("audio", static_cast<int32_t>(audio));
- notify->post();
-}
-
-bool NuPlayer2::Renderer::dropBufferIfStale(
- bool audio, const sp<AMessage> &msg) {
- int32_t queueGeneration;
- CHECK(msg->findInt32("queueGeneration", &queueGeneration));
-
- if (queueGeneration == getQueueGeneration(audio)) {
- return false;
- }
-
- sp<AMessage> notifyConsumed;
- if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) {
- notifyConsumed->post();
- }
-
- return true;
-}
-
-void NuPlayer2::Renderer::onAudioSinkChanged() {
- if (offloadingAudio()) {
- return;
- }
- CHECK(!mDrainAudioQueuePending);
- mNumFramesWritten = 0;
- mAnchorNumFramesWritten = -1;
- uint32_t written;
- if (mAudioSink->getFramesWritten(&written) == OK) {
- mNumFramesWritten = written;
- }
-}
-
-void NuPlayer2::Renderer::onDisableOffloadAudio() {
- Mutex::Autolock autoLock(mLock);
- mFlags &= ~FLAG_OFFLOAD_AUDIO;
- ++mAudioDrainGeneration;
- if (mAudioRenderingStartGeneration != -1) {
- prepareForMediaRenderingStart_l();
- }
-}
-
-void NuPlayer2::Renderer::onEnableOffloadAudio() {
- Mutex::Autolock autoLock(mLock);
- mFlags |= FLAG_OFFLOAD_AUDIO;
- ++mAudioDrainGeneration;
- if (mAudioRenderingStartGeneration != -1) {
- prepareForMediaRenderingStart_l();
- }
-}
-
-void NuPlayer2::Renderer::onPause() {
- if (mPaused) {
- return;
- }
-
- {
- Mutex::Autolock autoLock(mLock);
- // we do not increment audio drain generation so that we fill audio buffer during pause.
- ++mVideoDrainGeneration;
- prepareForMediaRenderingStart_l();
- mPaused = true;
- mMediaClock->setPlaybackRate(0.0);
- }
-
- mDrainAudioQueuePending = false;
- mDrainVideoQueuePending = false;
-
- // Note: audio data may not have been decoded, and the AudioSink may not be opened.
- mAudioSink->pause();
- startAudioOffloadPauseTimeout();
-
- ALOGV("now paused audio queue has %zu entries, video has %zu entries",
- mAudioQueue.size(), mVideoQueue.size());
-}
-
-void NuPlayer2::Renderer::onResume() {
- if (!mPaused) {
- return;
- }
-
- // Note: audio data may not have been decoded, and the AudioSink may not be opened.
- cancelAudioOffloadPauseTimeout();
- if (mAudioSink->ready()) {
- status_t err = mAudioSink->start();
- if (err != OK) {
- ALOGE("cannot start AudioSink err %d", err);
- notifyAudioTearDown(kDueToError);
- }
- }
-
- {
- Mutex::Autolock autoLock(mLock);
- mPaused = false;
- // rendering started message may have been delayed if we were paused.
- if (mRenderingDataDelivered) {
- notifyIfMediaRenderingStarted_l();
- }
- // configure audiosink as we did not do it when pausing
- if (mAudioSink != NULL && mAudioSink->ready()) {
- mAudioSink->setPlaybackRate(mPlaybackSettings);
- }
-
- mMediaClock->setPlaybackRate(mPlaybackSettings.mSpeed);
-
- if (!mAudioQueue.empty()) {
- postDrainAudioQueue_l();
- }
- }
-
- if (!mVideoQueue.empty()) {
- postDrainVideoQueue();
- }
-}
-
-void NuPlayer2::Renderer::onSetVideoFrameRate(float fps) {
- if (mVideoScheduler == NULL) {
- mVideoScheduler = new VideoFrameScheduler2();
- }
- mVideoScheduler->init(fps);
-}
-
-int32_t NuPlayer2::Renderer::getQueueGeneration(bool audio) {
- Mutex::Autolock autoLock(mLock);
- return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
-}
-
-int32_t NuPlayer2::Renderer::getDrainGeneration(bool audio) {
- Mutex::Autolock autoLock(mLock);
- return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
-}
-
-bool NuPlayer2::Renderer::getSyncQueues() {
- Mutex::Autolock autoLock(mLock);
- return mSyncQueues;
-}
-
-void NuPlayer2::Renderer::onAudioTearDown(AudioTearDownReason reason) {
- if (mAudioTornDown) {
- return;
- }
- mAudioTornDown = true;
-
- int64_t currentPositionUs;
- sp<AMessage> notify = mNotify->dup();
- if (getCurrentPosition(¤tPositionUs) == OK) {
- notify->setInt64("positionUs", currentPositionUs);
- }
-
- mAudioSink->stop();
- mAudioSink->flush();
-
- notify->setInt32("what", kWhatAudioTearDown);
- notify->setInt32("reason", reason);
- notify->post();
-}
-
-void NuPlayer2::Renderer::startAudioOffloadPauseTimeout() {
- if (offloadingAudio()) {
- mWakeLock->acquire();
- sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
- msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
- msg->post(kOffloadPauseMaxUs);
- }
-}
-
-void NuPlayer2::Renderer::cancelAudioOffloadPauseTimeout() {
- // We may have called startAudioOffloadPauseTimeout() without
- // the AudioSink open and with offloadingAudio enabled.
- //
- // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
- // we always release the wakelock and increment the pause timeout generation.
- //
- // Note: The acquired wakelock prevents the device from suspending
- // immediately after offload pause (in case a resume happens shortly thereafter).
- mWakeLock->release(true);
- ++mAudioOffloadPauseTimeoutGeneration;
-}
-
-status_t NuPlayer2::Renderer::onOpenAudioSink(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool isStreaming) {
- ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
- offloadOnly, offloadingAudio());
-
- bool audioSinkChanged = false;
-
- int32_t numChannels;
- CHECK(format->findInt32("channel-count", &numChannels));
-
- int32_t channelMask;
- if (!format->findInt32("channel-mask", &channelMask)) {
- // signal to the AudioSink to derive the mask from count.
- channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
- }
-
- int32_t sampleRate;
- CHECK(format->findInt32("sample-rate", &sampleRate));
-
- // read pcm encoding from MediaCodec output format, if available
- int32_t pcmEncoding;
- audio_format_t audioFormat =
- format->findInt32(KEY_PCM_ENCODING, &pcmEncoding) ?
- audioFormatFromEncoding(pcmEncoding) : AUDIO_FORMAT_PCM_16_BIT;
-
- if (offloadingAudio()) {
- AString mime;
- CHECK(format->findString("mime", &mime));
- status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
-
- if (err != OK) {
- ALOGE("Couldn't map mime \"%s\" to a valid "
- "audio_format", mime.c_str());
- onDisableOffloadAudio();
- } else {
- ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
- mime.c_str(), audioFormat);
-
- int avgBitRate = -1;
- format->findInt32("bitrate", &avgBitRate);
-
- int32_t aacProfile = -1;
- if (audioFormat == AUDIO_FORMAT_AAC
- && format->findInt32("aac-profile", &aacProfile)) {
- // Redefine AAC format as per aac profile
- mapAACProfileToAudioFormat(
- audioFormat,
- aacProfile);
- }
-
- audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
- offloadInfo.duration_us = -1;
- format->findInt64(
- "durationUs", &offloadInfo.duration_us);
- offloadInfo.sample_rate = sampleRate;
- offloadInfo.channel_mask = channelMask;
- offloadInfo.format = audioFormat;
- offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
- offloadInfo.bit_rate = avgBitRate;
- offloadInfo.has_video = hasVideo;
- offloadInfo.is_streaming = isStreaming;
-
- if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
- ALOGV("openAudioSink: no change in offload mode");
- // no change from previous configuration, everything ok.
- return OK;
- }
- mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
-
- ALOGV("openAudioSink: try to open AudioSink in offload mode");
- uint32_t offloadFlags = flags;
- offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
- offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- audioSinkChanged = true;
- mAudioSink->close();
-
- err = mAudioSink->open(
- sampleRate,
- numChannels,
- (audio_channel_mask_t)channelMask,
- audioFormat,
- &NuPlayer2::Renderer::AudioSinkCallback,
- this,
- (audio_output_flags_t)offloadFlags,
- &offloadInfo);
-
- if (err == OK) {
- err = mAudioSink->setPlaybackRate(mPlaybackSettings);
- }
-
- if (err == OK) {
- // If the playback is offloaded to h/w, we pass
- // the HAL some metadata information.
- // We don't want to do this for PCM because it
- // will be going through the AudioFlinger mixer
- // before reaching the hardware.
- // TODO
- mCurrentOffloadInfo = offloadInfo;
- if (!mPaused) { // for preview mode, don't start if paused
- err = mAudioSink->start();
- }
- ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
- }
- if (err != OK) {
- // Clean up, fall back to non offload mode.
- mAudioSink->close();
- onDisableOffloadAudio();
- mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
- ALOGV("openAudioSink: offload failed");
- if (offloadOnly) {
- notifyAudioTearDown(kForceNonOffload);
- }
- } else {
- mUseAudioCallback = true; // offload mode transfers data through callback
- ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
- }
- }
- }
- if (!offloadOnly && !offloadingAudio()) {
- ALOGV("openAudioSink: open AudioSink in NON-offload mode");
- uint32_t pcmFlags = flags;
- pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-
- const PcmInfo info = {
- (audio_channel_mask_t)channelMask,
- (audio_output_flags_t)pcmFlags,
- audioFormat,
- numChannels,
- sampleRate
- };
- if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
- ALOGV("openAudioSink: no change in pcm mode");
- // no change from previous configuration, everything ok.
- return OK;
- }
-
- audioSinkChanged = true;
- mAudioSink->close();
- mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
- // Note: It is possible to set up the callback, but not use it to send audio data.
- // This requires a fix in AudioSink to explicitly specify the transfer mode.
- mUseAudioCallback = getUseAudioCallbackSetting();
- if (mUseAudioCallback) {
- ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
- }
-
- // Compute the desired buffer size.
- // For callback mode, the amount of time before wakeup is about half the buffer size.
- const uint32_t frameCount =
- (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
-
- // We should always be able to set our playback settings if the sink is closed.
- LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
- "onOpenAudioSink: can't set playback rate on closed sink");
- status_t err = mAudioSink->open(
- sampleRate,
- numChannels,
- (audio_channel_mask_t)channelMask,
- audioFormat,
- mUseAudioCallback ? &NuPlayer2::Renderer::AudioSinkCallback : NULL,
- mUseAudioCallback ? this : NULL,
- (audio_output_flags_t)pcmFlags,
- NULL,
- frameCount);
- if (err != OK) {
- ALOGW("openAudioSink: non offloaded open failed status: %d", err);
- mAudioSink->close();
- mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
- return err;
- }
- mCurrentPcmInfo = info;
- if (!mPaused) { // for preview mode, don't start if paused
- mAudioSink->start();
- }
- }
- if (audioSinkChanged) {
- onAudioSinkChanged();
- }
- mAudioTornDown = false;
- return OK;
-}
-
-void NuPlayer2::Renderer::onCloseAudioSink() {
- mAudioSink->close();
- mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
- mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
-}
-
-void NuPlayer2::Renderer::onChangeAudioFormat(
- const sp<AMessage> &meta, const sp<AMessage> ¬ify) {
- sp<AMessage> format;
- CHECK(meta->findMessage("format", &format));
-
- int32_t offloadOnly;
- CHECK(meta->findInt32("offload-only", &offloadOnly));
-
- int32_t hasVideo;
- CHECK(meta->findInt32("has-video", &hasVideo));
-
- uint32_t flags;
- CHECK(meta->findInt32("flags", (int32_t *)&flags));
-
- uint32_t isStreaming;
- CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
-
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
-
- if (err != OK) {
- notify->setInt32("err", err);
- }
- notify->post();
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
deleted file mode 100644
index d065dee..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.h
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_RENDERER_H_
-
-#define NUPLAYER2_RENDERER_H_
-
-#include <media/AudioResamplerPublic.h>
-#include <media/AVSyncSettings.h>
-#include <mediaplayer2/JObjectHolder.h>
-
-#include "NuPlayer2.h"
-
-namespace android {
-
-class JWakeLock;
-struct MediaClock;
-class MediaCodecBuffer;
-struct VideoFrameSchedulerBase;
-
-struct NuPlayer2::Renderer : public AHandler {
- enum Flags {
- FLAG_REAL_TIME = 1,
- FLAG_OFFLOAD_AUDIO = 2,
- };
- Renderer(const sp<MediaPlayer2Interface::AudioSink> &sink,
- const sp<MediaClock> &mediaClock,
- const sp<AMessage> ¬ify,
- const sp<JObjectHolder> &context,
- uint32_t flags = 0);
-
- static size_t AudioSinkCallback(
- MediaPlayer2Interface::AudioSink *audioSink,
- void *data, size_t size, void *me,
- MediaPlayer2Interface::AudioSink::cb_event_t event);
-
- void queueBuffer(
- bool audio,
- const sp<MediaCodecBuffer> &buffer,
- const sp<AMessage> ¬ifyConsumed);
-
- void queueEOS(bool audio, status_t finalResult);
-
- status_t setPlaybackSettings(const AudioPlaybackRate &rate /* sanitized */);
- status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
- status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
- status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
-
- void flush(bool audio, bool notifyComplete);
-
- void signalTimeDiscontinuity();
-
- void signalDisableOffloadAudio();
- void signalEnableOffloadAudio();
-
- void pause();
- void resume();
-
- void setVideoFrameRate(float fps);
-
- status_t getCurrentPosition(int64_t *mediaUs);
- int64_t getVideoLateByUs();
-
- status_t openAudioSink(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool *isOffloaded,
- bool isStreaming);
- void closeAudioSink();
-
- // re-open audio sink after all pending audio buffers played.
- void changeAudioFormat(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool isStreaming,
- const sp<AMessage> ¬ify);
-
- enum {
- kWhatEOS = 'eos ',
- kWhatFlushComplete = 'fluC',
- kWhatPosition = 'posi',
- kWhatVideoRenderingStart = 'vdrd',
- kWhatMediaRenderingStart = 'mdrd',
- kWhatAudioTearDown = 'adTD',
- kWhatAudioOffloadPauseTimeout = 'aOPT',
- };
-
- enum AudioTearDownReason {
- kDueToError = 0, // Could restart with either offload or non-offload.
- kDueToTimeout,
- kForceNonOffload, // Restart only with non-offload.
- };
-
-protected:
- virtual ~Renderer();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
- enum {
- kWhatDrainAudioQueue = 'draA',
- kWhatDrainVideoQueue = 'draV',
- kWhatPostDrainVideoQueue = 'pDVQ',
- kWhatQueueBuffer = 'queB',
- kWhatQueueEOS = 'qEOS',
- kWhatConfigPlayback = 'cfPB',
- kWhatConfigSync = 'cfSy',
- kWhatGetPlaybackSettings = 'gPbS',
- kWhatGetSyncSettings = 'gSyS',
- kWhatFlush = 'flus',
- kWhatPause = 'paus',
- kWhatResume = 'resm',
- kWhatOpenAudioSink = 'opnA',
- kWhatCloseAudioSink = 'clsA',
- kWhatChangeAudioFormat = 'chgA',
- kWhatStopAudioSink = 'stpA',
- kWhatDisableOffloadAudio = 'noOA',
- kWhatEnableOffloadAudio = 'enOA',
- kWhatSetVideoFrameRate = 'sVFR',
- };
-
- // if mBuffer != nullptr, it's a buffer containing real data.
- // else if mNotifyConsumed == nullptr, it's EOS.
- // else it's a tag for re-opening audio sink in different format.
- struct QueueEntry {
- sp<MediaCodecBuffer> mBuffer;
- sp<AMessage> mMeta;
- sp<AMessage> mNotifyConsumed;
- size_t mOffset;
- status_t mFinalResult;
- int32_t mBufferOrdinal;
- };
-
- static const int64_t kMinPositionUpdateDelayUs;
-
- sp<MediaPlayer2Interface::AudioSink> mAudioSink;
- bool mUseVirtualAudioSink;
- sp<AMessage> mNotify;
- Mutex mLock;
- uint32_t mFlags;
- List<QueueEntry> mAudioQueue;
- List<QueueEntry> mVideoQueue;
- uint32_t mNumFramesWritten;
- sp<VideoFrameSchedulerBase> mVideoScheduler;
-
- bool mDrainAudioQueuePending;
- bool mDrainVideoQueuePending;
- int32_t mAudioQueueGeneration;
- int32_t mVideoQueueGeneration;
- int32_t mAudioDrainGeneration;
- int32_t mVideoDrainGeneration;
- int32_t mAudioEOSGeneration;
-
- const sp<MediaClock> mMediaClock;
-
- AudioPlaybackRate mPlaybackSettings;
- AVSyncSettings mSyncSettings;
- float mVideoFpsHint;
-
- int64_t mAudioFirstAnchorTimeMediaUs;
- int64_t mAnchorTimeMediaUs;
- int64_t mAnchorNumFramesWritten;
- int64_t mVideoLateByUs;
- int64_t mNextVideoTimeMediaUs;
- bool mHasAudio;
- bool mHasVideo;
-
- bool mNotifyCompleteAudio;
- bool mNotifyCompleteVideo;
-
- bool mSyncQueues;
-
- // modified on only renderer's thread.
- bool mPaused;
- int64_t mPauseDrainAudioAllowedUs; // time when we can drain/deliver audio in pause mode.
-
- bool mVideoSampleReceived;
- bool mVideoRenderingStarted;
- int32_t mVideoRenderingStartGeneration;
- int32_t mAudioRenderingStartGeneration;
- bool mRenderingDataDelivered;
-
- int64_t mNextAudioClockUpdateTimeUs;
- // the media timestamp of last audio sample right before EOS.
- int64_t mLastAudioMediaTimeUs;
-
- int32_t mAudioOffloadPauseTimeoutGeneration;
- bool mAudioTornDown;
- audio_offload_info_t mCurrentOffloadInfo;
-
- struct PcmInfo {
- audio_channel_mask_t mChannelMask;
- audio_output_flags_t mFlags;
- audio_format_t mFormat;
- int32_t mNumChannels;
- int32_t mSampleRate;
- };
- PcmInfo mCurrentPcmInfo;
- static const PcmInfo AUDIO_PCMINFO_INITIALIZER;
-
- int32_t mTotalBuffersQueued;
- int32_t mLastAudioBufferDrained;
- bool mUseAudioCallback;
-
- sp<JWakeLock> mWakeLock;
-
- status_t getCurrentPositionOnLooper(int64_t *mediaUs);
- status_t getCurrentPositionOnLooper(
- int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
- bool getCurrentPositionIfPaused_l(int64_t *mediaUs);
- status_t getCurrentPositionFromAnchor(
- int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
-
- void notifyEOSCallback();
- size_t fillAudioBuffer(void *buffer, size_t size);
-
- bool onDrainAudioQueue();
- void drainAudioQueueUntilLastEOS();
- int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
- void postDrainAudioQueue_l(int64_t delayUs = 0);
-
- void clearAnchorTime();
- void clearAudioFirstAnchorTime_l();
- void setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs);
- void setVideoLateByUs(int64_t lateUs);
-
- void onNewAudioMediaTime(int64_t mediaTimeUs);
- int64_t getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs);
-
- void onDrainVideoQueue();
- void postDrainVideoQueue();
-
- void prepareForMediaRenderingStart_l();
- void notifyIfMediaRenderingStarted_l();
-
- void onQueueBuffer(const sp<AMessage> &msg);
- void onQueueEOS(const sp<AMessage> &msg);
- void onFlush(const sp<AMessage> &msg);
- void onAudioSinkChanged();
- void onDisableOffloadAudio();
- void onEnableOffloadAudio();
- status_t onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */);
- status_t onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
- status_t onConfigSync(const AVSyncSettings &sync, float videoFpsHint);
- status_t onGetSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
-
- void onPause();
- void onResume();
- void onSetVideoFrameRate(float fps);
- int32_t getQueueGeneration(bool audio);
- int32_t getDrainGeneration(bool audio);
- bool getSyncQueues();
- void onAudioTearDown(AudioTearDownReason reason);
- status_t onOpenAudioSink(
- const sp<AMessage> &format,
- bool offloadOnly,
- bool hasVideo,
- uint32_t flags,
- bool isStreaming);
- void onCloseAudioSink();
- void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> ¬ify);
-
- void notifyEOS(bool audio, status_t finalResult, int64_t delayUs = 0);
- void notifyEOS_l(bool audio, status_t finalResult, int64_t delayUs = 0);
- void notifyFlushComplete(bool audio);
- void notifyPosition();
- void notifyVideoLateBy(int64_t lateByUs);
- void notifyVideoRenderingStart();
- void notifyAudioTearDown(AudioTearDownReason reason);
-
- void flushQueue(List<QueueEntry> *queue);
- bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
- void syncQueuesDone_l();
-
- bool offloadingAudio() const { return (mFlags & FLAG_OFFLOAD_AUDIO) != 0; }
-
- void startAudioOffloadPauseTimeout();
- void cancelAudioOffloadPauseTimeout();
-
- int64_t getDurationUsIfPlayedAtSampleRate(uint32_t numFrames);
-
- DISALLOW_EVIL_CONSTRUCTORS(Renderer);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_RENDERER_H_
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h b/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h
deleted file mode 100644
index 9298a99..0000000
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Source.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NUPLAYER2_SOURCE_H_
-
-#define NUPLAYER2_SOURCE_H_
-
-#include "NuPlayer2.h"
-
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MetaData.h>
-#include <mediaplayer2/mediaplayer2.h>
-#include <utils/Vector.h>
-
-namespace android {
-
-struct ABuffer;
-struct AMediaCryptoWrapper;
-class MediaBuffer;
-
-struct NuPlayer2::Source : public AHandler {
- enum Flags {
- FLAG_CAN_PAUSE = 1,
- FLAG_CAN_SEEK_BACKWARD = 2, // the "10 sec back button"
- FLAG_CAN_SEEK_FORWARD = 4, // the "10 sec forward button"
- FLAG_CAN_SEEK = 8, // the "seek bar"
- FLAG_DYNAMIC_DURATION = 16,
- FLAG_SECURE = 32, // Secure codec is required.
- FLAG_PROTECTED = 64, // The screen needs to be protected (screenshot is disabled).
- };
-
- enum {
- kWhatPrepared,
- kWhatFlagsChanged,
- kWhatVideoSizeChanged,
- kWhatBufferingUpdate,
- kWhatPauseOnBufferingStart,
- kWhatResumeOnBufferingEnd,
- kWhatCacheStats,
- kWhatSubtitleData,
- kWhatTimedTextData,
- kWhatTimedMetaData,
- kWhatQueueDecoderShutdown,
- kWhatDrmNoLicense,
- // Modular DRM
- kWhatDrmInfo,
- };
-
- // The provides message is used to notify the player about various
- // events.
- explicit Source(const sp<AMessage> ¬ify)
- : mNotify(notify) {
- }
-
- virtual status_t getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) = 0;
- virtual status_t setBufferingSettings(const BufferingSettings& buffering) = 0;
-
- virtual void prepareAsync(int64_t startTimeUs) = 0;
-
- virtual void start() = 0;
- virtual void stop() {}
- virtual void pause() {}
- virtual void resume() {}
-
- // Explicitly disconnect the underling data source
- virtual void disconnect() {}
-
- // Returns OK iff more data was available,
- // an error or ERROR_END_OF_STREAM if not.
- virtual status_t feedMoreTSData() = 0;
-
- // Returns non-NULL format when the specified track exists.
- // When the format has "err" set to -EWOULDBLOCK, source needs more time to get valid meta data.
- // Returns NULL if the specified track doesn't exist or is invalid;
- virtual sp<AMessage> getFormat(bool audio);
-
- virtual sp<MetaData> getFormatMeta(bool /* audio */) { return NULL; }
- virtual sp<MetaData> getFileFormatMeta() const { return NULL; }
-
- virtual status_t dequeueAccessUnit(
- bool audio, sp<ABuffer> *accessUnit) = 0;
-
- virtual status_t getDuration(int64_t * /* durationUs */) {
- return INVALID_OPERATION;
- }
-
- virtual size_t getTrackCount() const {
- return 0;
- }
-
- virtual sp<AMessage> getTrackInfo(size_t /* trackIndex */) const {
- return NULL;
- }
-
- virtual ssize_t getSelectedTrack(media_track_type /* type */) const {
- return INVALID_OPERATION;
- }
-
- virtual status_t selectTrack(size_t /* trackIndex */, bool /* select */, int64_t /* timeUs*/) {
- return INVALID_OPERATION;
- }
-
- virtual status_t seekTo(
- int64_t /* seekTimeUs */,
- MediaPlayer2SeekMode /* mode */ = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) {
- return INVALID_OPERATION;
- }
-
- virtual bool isRealTime() const {
- return false;
- }
-
- virtual bool isStreaming() const {
- return true;
- }
-
- virtual void setOffloadAudio(bool /* offload */) {}
-
- // Modular DRM
- virtual status_t prepareDrm(
- const uint8_t /* uuid */[16], const Vector<uint8_t> & /* drmSessionId */,
- sp<AMediaCryptoWrapper> * /* crypto */) {
- return INVALID_OPERATION;
- }
-
- virtual status_t releaseDrm() {
- return INVALID_OPERATION;
- }
-
-protected:
- virtual ~Source() {}
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- sp<AMessage> dupNotify() const { return mNotify->dup(); }
-
- void notifyFlagsChanged(uint32_t flags);
- void notifyVideoSizeChanged(const sp<AMessage> &format = NULL);
- void notifyPrepared(status_t err = OK);
- // Modular DRM
- void notifyDrmInfo(const sp<ABuffer> &buffer);
-
-private:
- sp<AMessage> mNotify;
-
- DISALLOW_EVIL_CONSTRUCTORS(Source);
-};
-
-} // namespace android
-
-#endif // NUPLAYER2_SOURCE_H_
-
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp b/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
deleted file mode 100644
index a70269e..0000000
--- a/media/libmediaplayer2/nuplayer2/RTSPSource2.cpp
+++ /dev/null
@@ -1,903 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "RTSPSource2"
-#include <utils/Log.h>
-
-#include "RTSPSource2.h"
-
-#include "AnotherPacketSource.h"
-#include "MyHandler.h"
-#include "SDPLoader.h"
-
-#include <media/MediaHTTPService.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MetaData.h>
-
-namespace android {
-
-const int64_t kNearEOSTimeoutUs = 2000000LL; // 2 secs
-
-// Default Buffer Underflow/Prepare/StartServer/Overflow Marks
-static const int kUnderflowMarkMs = 1000; // 1 second
-static const int kPrepareMarkMs = 3000; // 3 seconds
-//static const int kStartServerMarkMs = 5000;
-static const int kOverflowMarkMs = 10000; // 10 seconds
-
-NuPlayer2::RTSPSource2::RTSPSource2(
- const sp<AMessage> ¬ify,
- const sp<MediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers,
- uid_t uid,
- bool isSDP)
- : Source(notify),
- mHTTPService(httpService),
- mURL(url),
- mUID(uid),
- mFlags(0),
- mIsSDP(isSDP),
- mState(DISCONNECTED),
- mFinalResult(OK),
- mDisconnectReplyID(0),
- mBuffering(false),
- mInPreparationPhase(true),
- mEOSPending(false),
- mSeekGeneration(0),
- mEOSTimeoutAudio(0),
- mEOSTimeoutVideo(0) {
- mBufferingSettings.mInitialMarkMs = kPrepareMarkMs;
- mBufferingSettings.mResumePlaybackMarkMs = kOverflowMarkMs;
- if (headers) {
- mExtraHeaders = *headers;
-
- ssize_t index =
- mExtraHeaders.indexOfKey(String8("x-hide-urls-from-log"));
-
- if (index >= 0) {
- mFlags |= kFlagIncognito;
-
- mExtraHeaders.removeItemsAt(index);
- }
- }
-}
-
-NuPlayer2::RTSPSource2::~RTSPSource2() {
- if (mLooper != NULL) {
- mLooper->unregisterHandler(id());
- mLooper->stop();
- }
-}
-
-status_t NuPlayer2::RTSPSource2::getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) {
- Mutex::Autolock _l(mBufferingSettingsLock);
- *buffering = mBufferingSettings;
- return OK;
-}
-
-status_t NuPlayer2::RTSPSource2::setBufferingSettings(const BufferingSettings& buffering) {
- Mutex::Autolock _l(mBufferingSettingsLock);
- mBufferingSettings = buffering;
- return OK;
-}
-
-// TODO: fetch data starting from |startTimeUs|
-void NuPlayer2::RTSPSource2::prepareAsync(int64_t /* startTimeUs */) {
- if (mIsSDP && mHTTPService == NULL) {
- notifyPrepared(BAD_VALUE);
- return;
- }
-
- if (mLooper == NULL) {
- mLooper = new ALooper;
- mLooper->setName("rtsp2");
- mLooper->start();
-
- mLooper->registerHandler(this);
- }
-
- CHECK(mHandler == NULL);
- CHECK(mSDPLoader == NULL);
-
- sp<AMessage> notify = new AMessage(kWhatNotify, this);
-
- CHECK_EQ(mState, (int)DISCONNECTED);
- mState = CONNECTING;
-
- if (mIsSDP) {
- mSDPLoader = new SDPLoader(notify,
- (mFlags & kFlagIncognito) ? SDPLoader::kFlagIncognito : 0,
- mHTTPService);
-
- mSDPLoader->load(
- mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
- } else {
- mHandler = new MyHandler(mURL.c_str(), notify, true /* uidValid */, mUID);
- mLooper->registerHandler(mHandler);
-
- mHandler->connect();
- }
-
- startBufferingIfNecessary();
-}
-
-void NuPlayer2::RTSPSource2::start() {
-}
-
-void NuPlayer2::RTSPSource2::stop() {
- if (mLooper == NULL) {
- return;
- }
- sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
-
- sp<AMessage> dummy;
- msg->postAndAwaitResponse(&dummy);
-}
-
-status_t NuPlayer2::RTSPSource2::feedMoreTSData() {
- Mutex::Autolock _l(mBufferingLock);
- return mFinalResult;
-}
-
-sp<MetaData> NuPlayer2::RTSPSource2::getFormatMeta(bool audio) {
- sp<AnotherPacketSource> source = getSource(audio);
-
- if (source == NULL) {
- return NULL;
- }
-
- return source->getFormat();
-}
-
-bool NuPlayer2::RTSPSource2::haveSufficientDataOnAllTracks() {
- // We're going to buffer at least 2 secs worth data on all tracks before
- // starting playback (both at startup and after a seek).
-
- static const int64_t kMinDurationUs = 2000000LL;
-
- int64_t mediaDurationUs = 0;
- getDuration(&mediaDurationUs);
- if ((mAudioTrack != NULL && mAudioTrack->isFinished(mediaDurationUs))
- || (mVideoTrack != NULL && mVideoTrack->isFinished(mediaDurationUs))) {
- return true;
- }
-
- status_t err;
- int64_t durationUs;
- if (mAudioTrack != NULL
- && (durationUs = mAudioTrack->getBufferedDurationUs(&err))
- < kMinDurationUs
- && err == OK) {
- ALOGV("audio track doesn't have enough data yet. (%.2f secs buffered)",
- durationUs / 1E6);
- return false;
- }
-
- if (mVideoTrack != NULL
- && (durationUs = mVideoTrack->getBufferedDurationUs(&err))
- < kMinDurationUs
- && err == OK) {
- ALOGV("video track doesn't have enough data yet. (%.2f secs buffered)",
- durationUs / 1E6);
- return false;
- }
-
- return true;
-}
-
-status_t NuPlayer2::RTSPSource2::dequeueAccessUnit(
- bool audio, sp<ABuffer> *accessUnit) {
- if (!stopBufferingIfNecessary()) {
- return -EWOULDBLOCK;
- }
-
- sp<AnotherPacketSource> source = getSource(audio);
-
- if (source == NULL) {
- return -EWOULDBLOCK;
- }
-
- status_t finalResult;
- if (!source->hasBufferAvailable(&finalResult)) {
- if (finalResult == OK) {
-
- // If other source already signaled EOS, this source should also return EOS
- if (sourceReachedEOS(!audio)) {
- return ERROR_END_OF_STREAM;
- }
-
- // If this source has detected near end, give it some time to retrieve more
- // data before returning EOS
- int64_t mediaDurationUs = 0;
- getDuration(&mediaDurationUs);
- if (source->isFinished(mediaDurationUs)) {
- int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
- if (eosTimeout == 0) {
- setEOSTimeout(audio, ALooper::GetNowUs());
- } else if ((ALooper::GetNowUs() - eosTimeout) > kNearEOSTimeoutUs) {
- setEOSTimeout(audio, 0);
- return ERROR_END_OF_STREAM;
- }
- return -EWOULDBLOCK;
- }
-
- if (!sourceNearEOS(!audio)) {
- // We should not enter buffering mode
- // if any of the sources already have detected EOS.
- startBufferingIfNecessary();
- }
-
- return -EWOULDBLOCK;
- }
- return finalResult;
- }
-
- setEOSTimeout(audio, 0);
-
- return source->dequeueAccessUnit(accessUnit);
-}
-
-sp<AnotherPacketSource> NuPlayer2::RTSPSource2::getSource(bool audio) {
- if (mTSParser != NULL) {
- sp<MediaSource> source = mTSParser->getSource(
- audio ? ATSParser::AUDIO : ATSParser::VIDEO);
-
- return static_cast<AnotherPacketSource *>(source.get());
- }
-
- return audio ? mAudioTrack : mVideoTrack;
-}
-
-void NuPlayer2::RTSPSource2::setEOSTimeout(bool audio, int64_t timeout) {
- if (audio) {
- mEOSTimeoutAudio = timeout;
- } else {
- mEOSTimeoutVideo = timeout;
- }
-}
-
-status_t NuPlayer2::RTSPSource2::getDuration(int64_t *durationUs) {
- *durationUs = -1LL;
-
- int64_t audioDurationUs;
- if (mAudioTrack != NULL
- && mAudioTrack->getFormat()->findInt64(
- kKeyDuration, &audioDurationUs)
- && audioDurationUs > *durationUs) {
- *durationUs = audioDurationUs;
- }
-
- int64_t videoDurationUs;
- if (mVideoTrack != NULL
- && mVideoTrack->getFormat()->findInt64(
- kKeyDuration, &videoDurationUs)
- && videoDurationUs > *durationUs) {
- *durationUs = videoDurationUs;
- }
-
- return OK;
-}
-
-status_t NuPlayer2::RTSPSource2::seekTo(int64_t seekTimeUs, MediaPlayer2SeekMode mode) {
- sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
- msg->setInt32("generation", ++mSeekGeneration);
- msg->setInt64("timeUs", seekTimeUs);
- msg->setInt32("mode", mode);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
-}
-
-void NuPlayer2::RTSPSource2::performSeek(int64_t seekTimeUs) {
- if (mState != CONNECTED) {
- finishSeek(INVALID_OPERATION);
- return;
- }
-
- mState = SEEKING;
- mHandler->seek(seekTimeUs);
- mEOSPending = false;
-}
-
-void NuPlayer2::RTSPSource2::schedulePollBuffering() {
- sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
- msg->post(1000000LL); // 1 second intervals
-}
-
-void NuPlayer2::RTSPSource2::checkBuffering(
- bool *prepared, bool *underflow, bool *overflow, bool *startServer, bool *finished) {
- size_t numTracks = mTracks.size();
- size_t preparedCount, underflowCount, overflowCount, startCount, finishedCount;
- preparedCount = underflowCount = overflowCount = startCount = finishedCount = 0;
-
- size_t count = numTracks;
- for (size_t i = 0; i < count; ++i) {
- status_t finalResult;
- TrackInfo *info = &mTracks.editItemAt(i);
- sp<AnotherPacketSource> src = info->mSource;
- if (src == NULL) {
- --numTracks;
- continue;
- }
- int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
-
- int64_t initialMarkUs;
- int64_t maxRebufferingMarkUs;
- {
- Mutex::Autolock _l(mBufferingSettingsLock);
- initialMarkUs = mBufferingSettings.mInitialMarkMs * 1000LL;
- // TODO: maxRebufferingMarkUs could be larger than
- // mBufferingSettings.mResumePlaybackMarkMs * 1000ll.
- maxRebufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL;
- }
- // isFinished when duration is 0 checks for EOS result only
- if (bufferedDurationUs > initialMarkUs
- || src->isFinished(/* duration */ 0)) {
- ++preparedCount;
- }
-
- if (src->isFinished(/* duration */ 0)) {
- ++overflowCount;
- ++finishedCount;
- } else {
- // TODO: redefine kUnderflowMarkMs to a fair value,
- if (bufferedDurationUs < kUnderflowMarkMs * 1000) {
- ++underflowCount;
- }
- if (bufferedDurationUs > maxRebufferingMarkUs) {
- ++overflowCount;
- }
- int64_t startServerMarkUs =
- (kUnderflowMarkMs * 1000LL + maxRebufferingMarkUs) / 2;
- if (bufferedDurationUs < startServerMarkUs) {
- ++startCount;
- }
- }
- }
-
- *prepared = (preparedCount == numTracks);
- *underflow = (underflowCount > 0);
- *overflow = (overflowCount == numTracks);
- *startServer = (startCount > 0);
- *finished = (finishedCount > 0);
-}
-
-void NuPlayer2::RTSPSource2::onPollBuffering() {
- bool prepared, underflow, overflow, startServer, finished;
- checkBuffering(&prepared, &underflow, &overflow, &startServer, &finished);
-
- if (prepared && mInPreparationPhase) {
- mInPreparationPhase = false;
- notifyPrepared();
- }
-
- if (!mInPreparationPhase && underflow) {
- startBufferingIfNecessary();
- }
-
- if (haveSufficientDataOnAllTracks()) {
- stopBufferingIfNecessary();
- }
-
- if (overflow && mHandler != NULL) {
- mHandler->pause();
- }
-
- if (startServer && mHandler != NULL) {
- mHandler->resume();
- }
-
- if (finished && mHandler != NULL) {
- mHandler->cancelAccessUnitTimeoutCheck();
- }
-
- schedulePollBuffering();
-}
-
-void NuPlayer2::RTSPSource2::signalSourceEOS(status_t result) {
- const bool audio = true;
- const bool video = false;
-
- sp<AnotherPacketSource> source = getSource(audio);
- if (source != NULL) {
- source->signalEOS(result);
- }
-
- source = getSource(video);
- if (source != NULL) {
- source->signalEOS(result);
- }
-}
-
-bool NuPlayer2::RTSPSource2::sourceReachedEOS(bool audio) {
- sp<AnotherPacketSource> source = getSource(audio);
- status_t finalResult;
- return (source != NULL &&
- !source->hasBufferAvailable(&finalResult) &&
- finalResult == ERROR_END_OF_STREAM);
-}
-
-bool NuPlayer2::RTSPSource2::sourceNearEOS(bool audio) {
- sp<AnotherPacketSource> source = getSource(audio);
- int64_t mediaDurationUs = 0;
- getDuration(&mediaDurationUs);
- return (source != NULL && source->isFinished(mediaDurationUs));
-}
-
-void NuPlayer2::RTSPSource2::onSignalEOS(const sp<AMessage> &msg) {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
-
- if (generation != mSeekGeneration) {
- return;
- }
-
- if (mEOSPending) {
- signalSourceEOS(ERROR_END_OF_STREAM);
- mEOSPending = false;
- }
-}
-
-void NuPlayer2::RTSPSource2::postSourceEOSIfNecessary() {
- const bool audio = true;
- const bool video = false;
- // If a source has detected near end, give it some time to retrieve more
- // data before signaling EOS
- if (sourceNearEOS(audio) || sourceNearEOS(video)) {
- if (!mEOSPending) {
- sp<AMessage> msg = new AMessage(kWhatSignalEOS, this);
- msg->setInt32("generation", mSeekGeneration);
- msg->post(kNearEOSTimeoutUs);
- mEOSPending = true;
- }
- }
-}
-
-void NuPlayer2::RTSPSource2::onMessageReceived(const sp<AMessage> &msg) {
- if (msg->what() == kWhatDisconnect) {
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
-
- mDisconnectReplyID = replyID;
- finishDisconnectIfPossible();
- return;
- } else if (msg->what() == kWhatPerformSeek) {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
-
- if (generation != mSeekGeneration) {
- // obsolete.
- finishSeek(OK);
- return;
- }
-
- int64_t seekTimeUs;
- int32_t mode;
- CHECK(msg->findInt64("timeUs", &seekTimeUs));
- CHECK(msg->findInt32("mode", &mode));
-
- // TODO: add "mode" to performSeek.
- performSeek(seekTimeUs/*, (MediaPlayer2SeekMode)mode */);
- return;
- } else if (msg->what() == kWhatPollBuffering) {
- onPollBuffering();
- return;
- } else if (msg->what() == kWhatSignalEOS) {
- onSignalEOS(msg);
- return;
- }
-
- CHECK_EQ(msg->what(), kWhatNotify);
-
- int32_t what;
- CHECK(msg->findInt32("what", &what));
-
- switch (what) {
- case MyHandler::kWhatConnected:
- {
- onConnected();
-
- notifyVideoSizeChanged();
-
- uint32_t flags = 0;
-
- if (mHandler->isSeekable()) {
- flags = FLAG_CAN_PAUSE
- | FLAG_CAN_SEEK
- | FLAG_CAN_SEEK_BACKWARD
- | FLAG_CAN_SEEK_FORWARD;
- }
-
- notifyFlagsChanged(flags);
- schedulePollBuffering();
- break;
- }
-
- case MyHandler::kWhatDisconnected:
- {
- onDisconnected(msg);
- break;
- }
-
- case MyHandler::kWhatSeekDone:
- {
- mState = CONNECTED;
- // Unblock seekTo here in case we attempted to seek in a live stream
- finishSeek(OK);
- break;
- }
-
- case MyHandler::kWhatSeekPaused:
- {
- sp<AnotherPacketSource> source = getSource(true /* audio */);
- if (source != NULL) {
- source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
- /* extra */ NULL,
- /* discard */ true);
- }
- source = getSource(false /* video */);
- if (source != NULL) {
- source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
- /* extra */ NULL,
- /* discard */ true);
- };
-
- status_t err = OK;
- msg->findInt32("err", &err);
-
- if (err == OK) {
- int64_t timeUs;
- CHECK(msg->findInt64("time", &timeUs));
- mHandler->continueSeekAfterPause(timeUs);
- } else {
- finishSeek(err);
- }
- break;
- }
-
- case MyHandler::kWhatAccessUnit:
- {
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- if (mTSParser == NULL) {
- CHECK_LT(trackIndex, mTracks.size());
- } else {
- CHECK_EQ(trackIndex, 0u);
- }
-
- sp<ABuffer> accessUnit;
- CHECK(msg->findBuffer("accessUnit", &accessUnit));
-
- int32_t damaged;
- if (accessUnit->meta()->findInt32("damaged", &damaged)
- && damaged) {
- ALOGI("dropping damaged access unit.");
- break;
- }
-
- if (mTSParser != NULL) {
- size_t offset = 0;
- status_t err = OK;
- while (offset + 188 <= accessUnit->size()) {
- err = mTSParser->feedTSPacket(
- accessUnit->data() + offset, 188);
- if (err != OK) {
- break;
- }
-
- offset += 188;
- }
-
- if (offset < accessUnit->size()) {
- err = ERROR_MALFORMED;
- }
-
- if (err != OK) {
- signalSourceEOS(err);
- }
-
- postSourceEOSIfNecessary();
- break;
- }
-
- TrackInfo *info = &mTracks.editItemAt(trackIndex);
-
- sp<AnotherPacketSource> source = info->mSource;
- if (source != NULL) {
- uint32_t rtpTime;
- CHECK(accessUnit->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
-
- if (!info->mNPTMappingValid) {
- // This is a live stream, we didn't receive any normal
- // playtime mapping. We won't map to npt time.
- source->queueAccessUnit(accessUnit);
- break;
- }
-
- int64_t nptUs =
- ((double)rtpTime - (double)info->mRTPTime)
- / info->mTimeScale
- * 1000000LL
- + info->mNormalPlaytimeUs;
-
- accessUnit->meta()->setInt64("timeUs", nptUs);
-
- source->queueAccessUnit(accessUnit);
- }
- postSourceEOSIfNecessary();
- break;
- }
-
- case MyHandler::kWhatEOS:
- {
- int32_t finalResult;
- CHECK(msg->findInt32("finalResult", &finalResult));
- CHECK_NE(finalResult, (status_t)OK);
-
- if (mTSParser != NULL) {
- signalSourceEOS(finalResult);
- }
-
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK_LT(trackIndex, mTracks.size());
-
- TrackInfo *info = &mTracks.editItemAt(trackIndex);
- sp<AnotherPacketSource> source = info->mSource;
- if (source != NULL) {
- source->signalEOS(finalResult);
- }
-
- break;
- }
-
- case MyHandler::kWhatSeekDiscontinuity:
- {
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK_LT(trackIndex, mTracks.size());
-
- TrackInfo *info = &mTracks.editItemAt(trackIndex);
- sp<AnotherPacketSource> source = info->mSource;
- if (source != NULL) {
- source->queueDiscontinuity(
- ATSParser::DISCONTINUITY_TIME,
- NULL,
- true /* discard */);
- }
-
- break;
- }
-
- case MyHandler::kWhatNormalPlayTimeMapping:
- {
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
- CHECK_LT(trackIndex, mTracks.size());
-
- uint32_t rtpTime;
- CHECK(msg->findInt32("rtpTime", (int32_t *)&rtpTime));
-
- int64_t nptUs;
- CHECK(msg->findInt64("nptUs", &nptUs));
-
- TrackInfo *info = &mTracks.editItemAt(trackIndex);
- info->mRTPTime = rtpTime;
- info->mNormalPlaytimeUs = nptUs;
- info->mNPTMappingValid = true;
- break;
- }
-
- case SDPLoader::kWhatSDPLoaded:
- {
- onSDPLoaded(msg);
- break;
- }
-
- default:
- TRESPASS();
- }
-}
-
-void NuPlayer2::RTSPSource2::onConnected() {
- CHECK(mAudioTrack == NULL);
- CHECK(mVideoTrack == NULL);
-
- size_t numTracks = mHandler->countTracks();
- for (size_t i = 0; i < numTracks; ++i) {
- int32_t timeScale;
- sp<MetaData> format = mHandler->getTrackFormat(i, &timeScale);
-
- const char *mime;
- CHECK(format->findCString(kKeyMIMEType, &mime));
-
- if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
- // Very special case for MPEG2 Transport Streams.
- CHECK_EQ(numTracks, 1u);
-
- mTSParser = new ATSParser;
- return;
- }
-
- bool isAudio = !strncasecmp(mime, "audio/", 6);
- bool isVideo = !strncasecmp(mime, "video/", 6);
-
- TrackInfo info;
- info.mTimeScale = timeScale;
- info.mRTPTime = 0;
- info.mNormalPlaytimeUs = 0LL;
- info.mNPTMappingValid = false;
-
- if ((isAudio && mAudioTrack == NULL)
- || (isVideo && mVideoTrack == NULL)) {
- sp<AnotherPacketSource> source = new AnotherPacketSource(format);
-
- if (isAudio) {
- mAudioTrack = source;
- } else {
- mVideoTrack = source;
- }
-
- info.mSource = source;
- }
-
- mTracks.push(info);
- }
-
- mState = CONNECTED;
-}
-
-void NuPlayer2::RTSPSource2::onSDPLoaded(const sp<AMessage> &msg) {
- status_t err;
- CHECK(msg->findInt32("result", &err));
-
- mSDPLoader.clear();
-
- if (mDisconnectReplyID != 0) {
- err = UNKNOWN_ERROR;
- }
-
- if (err == OK) {
- sp<ASessionDescription> desc;
- sp<RefBase> obj;
- CHECK(msg->findObject("description", &obj));
- desc = static_cast<ASessionDescription *>(obj.get());
-
- AString rtspUri;
- if (!desc->findAttribute(0, "a=control", &rtspUri)) {
- ALOGE("Unable to find url in SDP");
- err = UNKNOWN_ERROR;
- } else {
- sp<AMessage> notify = new AMessage(kWhatNotify, this);
-
- mHandler = new MyHandler(rtspUri.c_str(), notify, true /* uidValid */, mUID);
- mLooper->registerHandler(mHandler);
-
- mHandler->loadSDP(desc);
- }
- }
-
- if (err != OK) {
- if (mState == CONNECTING) {
- // We're still in the preparation phase, signal that it
- // failed.
- notifyPrepared(err);
- }
-
- mState = DISCONNECTED;
- setError(err);
-
- if (mDisconnectReplyID != 0) {
- finishDisconnectIfPossible();
- }
- }
-}
-
-void NuPlayer2::RTSPSource2::onDisconnected(const sp<AMessage> &msg) {
- if (mState == DISCONNECTED) {
- return;
- }
-
- status_t err;
- CHECK(msg->findInt32("result", &err));
- CHECK_NE(err, (status_t)OK);
-
- mLooper->unregisterHandler(mHandler->id());
- mHandler.clear();
-
- if (mState == CONNECTING) {
- // We're still in the preparation phase, signal that it
- // failed.
- notifyPrepared(err);
- }
-
- mState = DISCONNECTED;
- setError(err);
-
- if (mDisconnectReplyID != 0) {
- finishDisconnectIfPossible();
- }
-}
-
-void NuPlayer2::RTSPSource2::finishDisconnectIfPossible() {
- if (mState != DISCONNECTED) {
- if (mHandler != NULL) {
- mHandler->disconnect();
- } else if (mSDPLoader != NULL) {
- mSDPLoader->cancel();
- }
- return;
- }
-
- (new AMessage)->postReply(mDisconnectReplyID);
- mDisconnectReplyID = 0;
-}
-
-void NuPlayer2::RTSPSource2::setError(status_t err) {
- Mutex::Autolock _l(mBufferingLock);
- mFinalResult = err;
-}
-
-void NuPlayer2::RTSPSource2::startBufferingIfNecessary() {
- Mutex::Autolock _l(mBufferingLock);
-
- if (!mBuffering) {
- mBuffering = true;
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatPauseOnBufferingStart);
- notify->post();
- }
-}
-
-bool NuPlayer2::RTSPSource2::stopBufferingIfNecessary() {
- Mutex::Autolock _l(mBufferingLock);
-
- if (mBuffering) {
- if (!haveSufficientDataOnAllTracks()) {
- return false;
- }
-
- mBuffering = false;
-
- sp<AMessage> notify = dupNotify();
- notify->setInt32("what", kWhatResumeOnBufferingEnd);
- notify->post();
- }
-
- return true;
-}
-
-void NuPlayer2::RTSPSource2::finishSeek(status_t err) {
- if (mSeekReplyID == NULL) {
- return;
- }
- sp<AMessage> seekReply = new AMessage;
- seekReply->setInt32("err", err);
- seekReply->postReply(mSeekReplyID);
- mSeekReplyID = NULL;
-}
-
-} // namespace android
diff --git a/media/libmediaplayer2/nuplayer2/RTSPSource2.h b/media/libmediaplayer2/nuplayer2/RTSPSource2.h
deleted file mode 100644
index e5f1716..0000000
--- a/media/libmediaplayer2/nuplayer2/RTSPSource2.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef RTSP_SOURCE2_H_
-
-#define RTSP_SOURCE2_H_
-
-#include "NuPlayer2Source.h"
-
-#include "ATSParser.h"
-
-namespace android {
-
-struct ALooper;
-struct AReplyToken;
-struct AnotherPacketSource;
-struct MyHandler;
-struct SDPLoader;
-
-struct NuPlayer2::RTSPSource2 : public NuPlayer2::Source {
- RTSPSource2(
- const sp<AMessage> ¬ify,
- const sp<MediaHTTPService> &httpService,
- const char *url,
- const KeyedVector<String8, String8> *headers,
- uid_t uid = 0,
- bool isSDP = false);
-
- virtual status_t getBufferingSettings(
- BufferingSettings* buffering /* nonnull */) override;
- virtual status_t setBufferingSettings(const BufferingSettings& buffering) override;
-
- virtual void prepareAsync(int64_t startTimeUs);
- virtual void start();
- virtual void stop();
-
- virtual status_t feedMoreTSData();
-
- virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
-
- virtual status_t getDuration(int64_t *durationUs);
- virtual status_t seekTo(
- int64_t seekTimeUs,
- MediaPlayer2SeekMode mode = MediaPlayer2SeekMode::SEEK_PREVIOUS_SYNC) override;
-
- void onMessageReceived(const sp<AMessage> &msg);
-
-protected:
- virtual ~RTSPSource2();
-
- virtual sp<MetaData> getFormatMeta(bool audio);
-
-private:
- enum {
- kWhatNotify = 'noti',
- kWhatDisconnect = 'disc',
- kWhatPerformSeek = 'seek',
- kWhatPollBuffering = 'poll',
- kWhatSignalEOS = 'eos ',
- };
-
- enum State {
- DISCONNECTED,
- CONNECTING,
- CONNECTED,
- SEEKING,
- };
-
- enum Flags {
- // Don't log any URLs.
- kFlagIncognito = 1,
- };
-
- struct TrackInfo {
- sp<AnotherPacketSource> mSource;
-
- int32_t mTimeScale;
- uint32_t mRTPTime;
- int64_t mNormalPlaytimeUs;
- bool mNPTMappingValid;
- };
-
- sp<MediaHTTPService> mHTTPService;
- AString mURL;
- KeyedVector<String8, String8> mExtraHeaders;
- uid_t mUID;
- uint32_t mFlags;
- bool mIsSDP;
- State mState;
- status_t mFinalResult;
- sp<AReplyToken> mDisconnectReplyID;
- Mutex mBufferingLock;
- bool mBuffering;
- bool mInPreparationPhase;
- bool mEOSPending;
-
- Mutex mBufferingSettingsLock;
- BufferingSettings mBufferingSettings;
-
- sp<ALooper> mLooper;
- sp<MyHandler> mHandler;
- sp<SDPLoader> mSDPLoader;
-
- Vector<TrackInfo> mTracks;
- sp<AnotherPacketSource> mAudioTrack;
- sp<AnotherPacketSource> mVideoTrack;
-
- sp<ATSParser> mTSParser;
-
- int32_t mSeekGeneration;
-
- int64_t mEOSTimeoutAudio;
- int64_t mEOSTimeoutVideo;
-
- sp<AReplyToken> mSeekReplyID;
-
- sp<AnotherPacketSource> getSource(bool audio);
-
- void onConnected();
- void onSDPLoaded(const sp<AMessage> &msg);
- void onDisconnected(const sp<AMessage> &msg);
- void finishDisconnectIfPossible();
-
- void performSeek(int64_t seekTimeUs);
- void schedulePollBuffering();
- void checkBuffering(
- bool *prepared,
- bool *underflow,
- bool *overflow,
- bool *startServer,
- bool *finished);
- void onPollBuffering();
-
- bool haveSufficientDataOnAllTracks();
-
- void setEOSTimeout(bool audio, int64_t timeout);
- void setError(status_t err);
- void startBufferingIfNecessary();
- bool stopBufferingIfNecessary();
- void finishSeek(status_t err);
-
- void postSourceEOSIfNecessary();
- void signalSourceEOS(status_t result);
- void onSignalEOS(const sp<AMessage> &msg);
-
- bool sourceNearEOS(bool audio);
- bool sourceReachedEOS(bool audio);
-
- DISALLOW_EVIL_CONSTRUCTORS(RTSPSource2);
-};
-
-} // namespace android
-
-#endif // RTSP_SOURCE2_H_
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 6709585..5301f5c 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -7,6 +7,7 @@
"MediaPlayerService.cpp",
"MediaRecorderClient.cpp",
"MetadataRetrieverClient.cpp",
+ "StagefrightMetadataRetriever.cpp",
"StagefrightRecorder.cpp",
"TestPlayerStub.cpp",
],
@@ -21,11 +22,14 @@
"libcodec2_client",
"libcrypto",
"libcutils",
+ "libdatasource",
"libdl",
+ "libdrmframework",
"libgui",
"libhidlbase",
"liblog",
"libmedia",
+ "libmedia_codeclist",
"libmedia_omx",
"libmediadrm",
"libmediametrics",
@@ -44,6 +48,7 @@
],
static_libs: [
+ "libplayerservice_datasource",
"libstagefright_nuplayer",
"libstagefright_rtsp",
"libstagefright_timedtext",
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 1376ccc..05f7365 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -20,9 +20,9 @@
#include <utils/Log.h>
#include <cutils/properties.h>
+#include <datasource/FileSource.h>
#include <media/DataSource.h>
#include <media/IMediaPlayer.h>
-#include <media/stagefright/FileSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <utils/Errors.h>
#include <utils/misc.h>
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index dfd3933..81ffcbc 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -48,6 +48,7 @@
#include <utils/Vector.h>
#include <codec2/hidl/client.h>
+#include <datasource/HTTPBase.h>
#include <media/IMediaHTTPService.h>
#include <media/IRemoteDisplay.h>
#include <media/IRemoteDisplayClient.h>
@@ -58,9 +59,11 @@
#include <media/AudioTrack.h>
#include <media/MemoryLeakTrackUtil.h>
#include <media/stagefright/InterfaceUtils.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooperRoster.h>
#include <media/stagefright/SurfaceUtils.h>
@@ -80,7 +83,6 @@
#include "TestPlayerStub.h"
#include "nuplayer/NuPlayerDriver.h"
-#include "HTTPBase.h"
static const int kDumpLockRetries = 50;
static const int kDumpLockSleepUs = 20000;
@@ -264,6 +266,172 @@
return ok;
}
+static void dumpCodecDetails(int fd, const sp<IMediaCodecList> &codecList, bool queryDecoders) {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+ const char *codecType = queryDecoders? "Decoder" : "Encoder";
+ snprintf(buffer, SIZE - 1, "\n%s infos by media types:\n"
+ "=============================\n", codecType);
+ result.append(buffer);
+
+ size_t numCodecs = codecList->countCodecs();
+
+ // gather all media types supported by codec class, and link to codecs that support them
+ KeyedVector<AString, Vector<sp<MediaCodecInfo>>> allMediaTypes;
+ for (size_t codec_ix = 0; codec_ix < numCodecs; ++codec_ix) {
+ sp<MediaCodecInfo> info = codecList->getCodecInfo(codec_ix);
+ if (info->isEncoder() == !queryDecoders) {
+ Vector<AString> supportedMediaTypes;
+ info->getSupportedMediaTypes(&supportedMediaTypes);
+ if (!supportedMediaTypes.size()) {
+ snprintf(buffer, SIZE - 1, "warning: %s does not support any media types\n",
+ info->getCodecName());
+ result.append(buffer);
+ } else {
+ for (const AString &mediaType : supportedMediaTypes) {
+ if (allMediaTypes.indexOfKey(mediaType) < 0) {
+ allMediaTypes.add(mediaType, Vector<sp<MediaCodecInfo>>());
+ }
+ allMediaTypes.editValueFor(mediaType).add(info);
+ }
+ }
+ }
+ }
+
+ KeyedVector<AString, bool> visitedCodecs;
+ for (size_t type_ix = 0; type_ix < allMediaTypes.size(); ++type_ix) {
+ const AString &mediaType = allMediaTypes.keyAt(type_ix);
+ snprintf(buffer, SIZE - 1, "\nMedia type '%s':\n", mediaType.c_str());
+ result.append(buffer);
+
+ for (const sp<MediaCodecInfo> &info : allMediaTypes.valueAt(type_ix)) {
+ sp<MediaCodecInfo::Capabilities> caps = info->getCapabilitiesFor(mediaType.c_str());
+ if (caps == NULL) {
+ snprintf(buffer, SIZE - 1, "warning: %s does not have capabilities for type %s\n",
+ info->getCodecName(), mediaType.c_str());
+ result.append(buffer);
+ continue;
+ }
+ snprintf(buffer, SIZE - 1, " %s \"%s\" supports\n",
+ codecType, info->getCodecName());
+ result.append(buffer);
+
+ auto printList = [&](const char *type, const Vector<AString> &values){
+ snprintf(buffer, SIZE - 1, " %s: [", type);
+ result.append(buffer);
+ for (size_t j = 0; j < values.size(); ++j) {
+ snprintf(buffer, SIZE - 1, "\n %s%s", values[j].c_str(),
+ j == values.size() - 1 ? " " : ",");
+ result.append(buffer);
+ }
+ result.append("]\n");
+ };
+
+ if (visitedCodecs.indexOfKey(info->getCodecName()) < 0) {
+ visitedCodecs.add(info->getCodecName(), true);
+ {
+ Vector<AString> aliases;
+ info->getAliases(&aliases);
+ // quote alias
+ for (AString &alias : aliases) {
+ alias.insert("\"", 1, 0);
+ alias.append('"');
+ }
+ printList("aliases", aliases);
+ }
+ {
+ uint32_t attrs = info->getAttributes();
+ Vector<AString> list;
+ list.add(AStringPrintf("encoder: %d",
+ !!(attrs & MediaCodecInfo::kFlagIsEncoder)));
+ list.add(AStringPrintf("vendor: %d",
+ !!(attrs & MediaCodecInfo::kFlagIsVendor)));
+ list.add(AStringPrintf("software-only: %d",
+ !!(attrs & MediaCodecInfo::kFlagIsSoftwareOnly)));
+ list.add(AStringPrintf("hw-accelerated: %d",
+ !!(attrs & MediaCodecInfo::kFlagIsHardwareAccelerated)));
+ printList(AStringPrintf("attributes: %#x", attrs).c_str(), list);
+ }
+
+ snprintf(buffer, SIZE - 1, " owner: \"%s\"\n", info->getOwnerName());
+ result.append(buffer);
+ snprintf(buffer, SIZE - 1, " rank: %u\n", info->getRank());
+ result.append(buffer);
+ } else {
+ result.append(" aliases, attributes, owner, rank: see above\n");
+ }
+
+ {
+ Vector<AString> list;
+ Vector<MediaCodecInfo::ProfileLevel> profileLevels;
+ caps->getSupportedProfileLevels(&profileLevels);
+ for (const MediaCodecInfo::ProfileLevel &pl : profileLevels) {
+ const char *niceProfile =
+ mediaType.equalsIgnoreCase(MIMETYPE_AUDIO_AAC)
+ ? asString_AACObject(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2)
+ ? asString_MPEG2Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263)
+ ? asString_H263Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4)
+ ? asString_MPEG4Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC)
+ ? asString_AVCProfile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8)
+ ? asString_VP8Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC)
+ ? asString_HEVCProfile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9)
+ ? asString_VP9Profile(pl.mProfile) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1)
+ ? asString_AV1Profile(pl.mProfile) : "??";
+ const char *niceLevel =
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG2)
+ ? asString_MPEG2Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_H263)
+ ? asString_H263Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_MPEG4)
+ ? asString_MPEG4Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AVC)
+ ? asString_AVCLevel(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP8)
+ ? asString_VP8Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_HEVC)
+ ? asString_HEVCTierLevel(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_VP9)
+ ? asString_VP9Level(pl.mLevel) :
+ mediaType.equalsIgnoreCase(MIMETYPE_VIDEO_AV1)
+ ? asString_AV1Level(pl.mLevel) : "??";
+
+ list.add(AStringPrintf("% 5u/% 5u (%s/%s)",
+ pl.mProfile, pl.mLevel, niceProfile, niceLevel));
+ }
+ printList("profile/levels", list);
+ }
+
+ {
+ Vector<AString> list;
+ Vector<uint32_t> colors;
+ caps->getSupportedColorFormats(&colors);
+ for (uint32_t color : colors) {
+ list.add(AStringPrintf("%#x (%s)", color,
+ asString_ColorFormat((int32_t)color)));
+ }
+ printList("colors", list);
+ }
+
+ result.append(" details: ");
+ result.append(caps->getDetails()->debugString(6).c_str());
+ result.append("\n");
+ }
+ }
+ result.append("\n");
+ ::write(fd, result.string(), result.size());
+}
+
+
// TODO: Find real cause of Audio/Video delay in PV framework and remove this workaround
/* static */ int MediaPlayerService::AudioOutput::mMinBufferCount = 4;
/* static */ bool MediaPlayerService::AudioOutput::mIsOnEmulator = false;
@@ -423,7 +591,7 @@
SortedVector< sp<MediaRecorderClient> > mediaRecorderClients;
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- snprintf(buffer, SIZE, "Permission Denial: "
+ snprintf(buffer, SIZE - 1, "Permission Denial: "
"can't dump MediaPlayerService from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
@@ -452,11 +620,11 @@
}
result.append(" Files opened and/or mapped:\n");
- snprintf(buffer, SIZE, "/proc/%d/maps", getpid());
+ snprintf(buffer, SIZE - 1, "/proc/%d/maps", getpid());
FILE *f = fopen(buffer, "r");
if (f) {
while (!feof(f)) {
- fgets(buffer, SIZE, f);
+ fgets(buffer, SIZE - 1, f);
if (strstr(buffer, " /storage/") ||
strstr(buffer, " /system/sounds/") ||
strstr(buffer, " /data/") ||
@@ -472,13 +640,13 @@
result.append("\n");
}
- snprintf(buffer, SIZE, "/proc/%d/fd", getpid());
+ snprintf(buffer, SIZE - 1, "/proc/%d/fd", getpid());
DIR *d = opendir(buffer);
if (d) {
struct dirent *ent;
while((ent = readdir(d)) != NULL) {
if (strcmp(ent->d_name,".") && strcmp(ent->d_name,"..")) {
- snprintf(buffer, SIZE, "/proc/%d/fd/%s", getpid(), ent->d_name);
+ snprintf(buffer, SIZE - 1, "/proc/%d/fd/%s", getpid(), ent->d_name);
struct stat s;
if (lstat(buffer, &s) == 0) {
if ((s.st_mode & S_IFMT) == S_IFLNK) {
@@ -521,6 +689,10 @@
gLooperRoster.dump(fd, args);
+ sp<IMediaCodecList> codecList = getCodecList();
+ dumpCodecDetails(fd, codecList, true /* decoders */);
+ dumpCodecDetails(fd, codecList, false /* !decoders */);
+
bool dumpMem = false;
bool unreachableMemory = false;
for (size_t i = 0; i < args.size(); i++) {
@@ -543,6 +715,7 @@
}
}
write(fd, result.string(), result.size());
+
return NO_ERROR;
}
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index 49688ce..2562b8f 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -26,10 +26,12 @@
#include <utils/String8.h>
#include <utils/Vector.h>
+#include <media/AudioSystem.h>
#include <media/MediaPlayerInterface.h>
#include <media/Metadata.h>
#include <media/stagefright/foundation/ABase.h>
+
#include <system/audio.h>
namespace android {
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 703da4b..c61ed1b 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -32,6 +32,7 @@
#include <cutils/atomic.h>
#include <cutils/properties.h> // for property_get
#include <gui/IGraphicBufferProducer.h>
+#include <mediautils/ServiceUtilities.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <system/audio.h>
@@ -44,7 +45,6 @@
namespace android {
const char* cameraPermission = "android.permission.CAMERA";
-const char* recordAudioPermission = "android.permission.RECORD_AUDIO";
static bool checkPermission(const char* permissionString) {
if (getpid() == IPCThreadState::self()->getCallingPid()) return true;
@@ -118,7 +118,16 @@
status_t MediaRecorderClient::setAudioSource(int as)
{
ALOGV("setAudioSource(%d)", as);
- if (!checkPermission(recordAudioPermission)) {
+ if (as < AUDIO_SOURCE_DEFAULT
+ || (as >= AUDIO_SOURCE_CNT && as != AUDIO_SOURCE_FM_TUNER)) {
+ ALOGE("Invalid audio source: %d", as);
+ return BAD_VALUE;
+ }
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ uid_t uid = IPCThreadState::self()->getCallingUid();
+
+ if ((as == AUDIO_SOURCE_FM_TUNER && !captureAudioOutputAllowed(pid, uid))
+ || !recordingAllowed(String16(""), pid, uid)) {
return PERMISSION_DENIED;
}
Mutex::Autolock lock(mLock);
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 4a3c65e..fb228ca 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -37,6 +37,7 @@
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <private/media/VideoFrame.h>
#include "MetadataRetrieverClient.h"
#include "StagefrightMetadataRetriever.h"
@@ -292,7 +293,7 @@
delete albumArt;
return NULL;
}
- MediaAlbumArt::init((MediaAlbumArt *) mAlbumArt->pointer(),
+ MediaAlbumArt::init((MediaAlbumArt *) mAlbumArt->unsecurePointer(),
albumArt->size(), albumArt->data());
delete albumArt; // We've taken our copy.
return mAlbumArt;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
similarity index 98%
rename from media/libstagefright/StagefrightMetadataRetriever.cpp
rename to media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
index 6f536a9..41b6f72 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libmediaplayerservice/StagefrightMetadataRetriever.cpp
@@ -22,14 +22,14 @@
#include <utils/Log.h>
#include <cutils/properties.h>
-#include "include/FrameDecoder.h"
-#include "include/StagefrightMetadataRetriever.h"
+#include "StagefrightMetadataRetriever.h"
+#include "FrameDecoder.h"
+#include <datasource/PlayerServiceDataSourceFactory.h>
+#include <datasource/PlayerServiceFileSource.h>
#include <media/IMediaHTTPService.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
-#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -63,7 +63,8 @@
ALOGV("setDataSource(%s)", uri);
clearMetadata();
- mSource = DataSourceFactory::CreateFromURI(httpService, uri, headers);
+ mSource = PlayerServiceDataSourceFactory::getInstance()->CreateFromURI(
+ httpService, uri, headers);
if (mSource == NULL) {
ALOGE("Unable to create data source for '%s'.", uri);
@@ -91,7 +92,7 @@
ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
clearMetadata();
- mSource = new FileSource(fd, offset, length);
+ mSource = new PlayerServiceFileSource(fd, offset, length);
status_t err;
if ((err = mSource->initCheck()) != OK) {
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libmediaplayerservice/StagefrightMetadataRetriever.h
similarity index 100%
rename from media/libstagefright/include/StagefrightMetadataRetriever.h
rename to media/libmediaplayerservice/StagefrightMetadataRetriever.h
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 63681fa..954ccc9 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -232,11 +232,6 @@
status_t StagefrightRecorder::setAudioSource(audio_source_t as) {
ALOGV("setAudioSource: %d", as);
- if (as < AUDIO_SOURCE_DEFAULT ||
- (as >= AUDIO_SOURCE_CNT && as != AUDIO_SOURCE_FM_TUNER)) {
- ALOGE("Invalid audio source: %d", as);
- return BAD_VALUE;
- }
if (as == AUDIO_SOURCE_DEFAULT) {
mAudioSource = AUDIO_SOURCE_MIC;
diff --git a/media/libmediaplayerservice/datasource/Android.bp b/media/libmediaplayerservice/datasource/Android.bp
new file mode 100644
index 0000000..71fa50b
--- /dev/null
+++ b/media/libmediaplayerservice/datasource/Android.bp
@@ -0,0 +1,43 @@
+cc_library_static {
+ name: "libplayerservice_datasource",
+
+ srcs: [
+ "PlayerServiceDataSourceFactory.cpp",
+ "PlayerServiceFileSource.cpp",
+ "PlayerServiceMediaHTTP.cpp",
+ ],
+
+ header_libs: [
+ "media_ndk_headers",
+ "libmedia_headers",
+ ],
+
+ shared_libs: [
+ "libdatasource",
+ "libdrmframework",
+ "liblog",
+ "libutils",
+ ],
+
+ local_include_dirs: [
+ "include",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ },
+}
diff --git a/media/libmediaplayerservice/datasource/PlayerServiceDataSourceFactory.cpp b/media/libmediaplayerservice/datasource/PlayerServiceDataSourceFactory.cpp
new file mode 100644
index 0000000..ef946e9
--- /dev/null
+++ b/media/libmediaplayerservice/datasource/PlayerServiceDataSourceFactory.cpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PlayerServuceDataSourceFactory"
+
+
+#include <datasource/PlayerServiceDataSourceFactory.h>
+#include <datasource/PlayerServiceFileSource.h>
+#include <datasource/PlayerServiceMediaHTTP.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
+
+namespace android {
+
+// static
+sp<PlayerServiceDataSourceFactory> PlayerServiceDataSourceFactory::sInstance;
+// static
+Mutex PlayerServiceDataSourceFactory::sInstanceLock;
+
+// static
+sp<PlayerServiceDataSourceFactory> PlayerServiceDataSourceFactory::getInstance() {
+ Mutex::Autolock l(sInstanceLock);
+ if (!sInstance) {
+ sInstance = new PlayerServiceDataSourceFactory();
+ }
+ return sInstance;
+}
+
+sp<DataSource> PlayerServiceDataSourceFactory::CreateMediaHTTP(
+ const sp<MediaHTTPService> &httpService) {
+ if (httpService == NULL) {
+ return NULL;
+ }
+
+ sp<MediaHTTPConnection> conn = httpService->makeHTTPConnection();
+ if (conn == NULL) {
+ ALOGE("Failed to make http connection from http service!");
+ return NULL;
+ } else {
+ return new PlayerServiceMediaHTTP(conn);
+ }
+}
+
+sp<DataSource> PlayerServiceDataSourceFactory::CreateFileSource(const char *uri) {
+ return new PlayerServiceFileSource(uri);
+}
+
+} // namespace android
diff --git a/media/libstagefright/FileSource.cpp b/media/libmediaplayerservice/datasource/PlayerServiceFileSource.cpp
similarity index 84%
rename from media/libstagefright/FileSource.cpp
rename to media/libmediaplayerservice/datasource/PlayerServiceFileSource.cpp
index aee7fd8..1580891 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libmediaplayerservice/datasource/PlayerServiceFileSource.cpp
@@ -15,18 +15,17 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "FileSource"
+#define LOG_TAG "PlayerServiceFileSource"
#include <utils/Log.h>
+#include <datasource/PlayerServiceFileSource.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/Utils.h>
#include <private/android_filesystem_config.h>
namespace android {
-FileSource::FileSource(const char *filename)
- : ClearFileSource(filename),
+PlayerServiceFileSource::PlayerServiceFileSource(const char *filename)
+ : FileSource(filename),
mDecryptHandle(NULL),
mDrmManagerClient(NULL),
mDrmBufOffset(0),
@@ -34,8 +33,8 @@
mDrmBuf(NULL){
}
-FileSource::FileSource(int fd, int64_t offset, int64_t length)
- : ClearFileSource(fd, offset, length),
+PlayerServiceFileSource::PlayerServiceFileSource(int fd, int64_t offset, int64_t length)
+ : FileSource(fd, offset, length),
mDecryptHandle(NULL),
mDrmManagerClient(NULL),
mDrmBufOffset(0),
@@ -43,7 +42,7 @@
mDrmBuf(NULL) {
}
-FileSource::~FileSource() {
+PlayerServiceFileSource::~PlayerServiceFileSource() {
if (mDrmBuf != NULL) {
delete[] mDrmBuf;
mDrmBuf = NULL;
@@ -62,7 +61,7 @@
}
}
-ssize_t FileSource::readAt(off64_t offset, void *data, size_t size) {
+ssize_t PlayerServiceFileSource::readAt(off64_t offset, void *data, size_t size) {
if (mFd < 0) {
return NO_INIT;
}
@@ -87,7 +86,7 @@
}
}
-sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) {
+sp<DecryptHandle> PlayerServiceFileSource::DrmInitialization(const char *mime) {
if (getuid() == AID_MEDIA_EX) return nullptr; // no DRM in media extractor
if (mDrmManagerClient == NULL) {
mDrmManagerClient = new DrmManagerClient();
@@ -110,7 +109,7 @@
return mDecryptHandle;
}
-ssize_t FileSource::readAtDRM_l(off64_t offset, void *data, size_t size) {
+ssize_t PlayerServiceFileSource::readAtDRM_l(off64_t offset, void *data, size_t size) {
size_t DRM_CACHE_SIZE = 1024;
if (mDrmBuf == NULL) {
mDrmBuf = new unsigned char[DRM_CACHE_SIZE];
@@ -141,7 +140,7 @@
}
/* static */
-bool FileSource::requiresDrm(int fd, int64_t offset, int64_t length, const char *mime) {
+bool PlayerServiceFileSource::requiresDrm(int fd, int64_t offset, int64_t length, const char *mime) {
std::unique_ptr<DrmManagerClient> drmClient(new DrmManagerClient());
sp<DecryptHandle> decryptHandle =
drmClient->openDecryptSession(fd, offset, length, mime);
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libmediaplayerservice/datasource/PlayerServiceMediaHTTP.cpp
similarity index 78%
rename from media/libstagefright/http/MediaHTTP.cpp
rename to media/libmediaplayerservice/datasource/PlayerServiceMediaHTTP.cpp
index 0fba3dc..0124720 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libmediaplayerservice/datasource/PlayerServiceMediaHTTP.cpp
@@ -15,32 +15,32 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaHTTP"
+#define LOG_TAG "PlayerServiceMediaHTTP"
#include <utils/Log.h>
-#include <media/stagefright/MediaHTTP.h>
+#include <datasource/PlayerServiceMediaHTTP.h>
#include <binder/IServiceManager.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <media/MediaHTTPConnection.h>
namespace android {
-MediaHTTP::MediaHTTP(const sp<MediaHTTPConnection> &conn)
- : ClearMediaHTTP(conn),
+PlayerServiceMediaHTTP::PlayerServiceMediaHTTP(const sp<MediaHTTPConnection> &conn)
+ : MediaHTTP(conn),
mDrmManagerClient(NULL) {
}
-MediaHTTP::~MediaHTTP() {
+PlayerServiceMediaHTTP::~PlayerServiceMediaHTTP() {
clearDRMState_l();
}
// DRM...
-sp<DecryptHandle> MediaHTTP::DrmInitialization(const char* mime) {
+sp<DecryptHandle> PlayerServiceMediaHTTP::DrmInitialization(const char* mime) {
if (mDrmManagerClient == NULL) {
mDrmManagerClient = new DrmManagerClient();
}
@@ -62,7 +62,7 @@
return mDecryptHandle;
}
-void MediaHTTP::clearDRMState_l() {
+void PlayerServiceMediaHTTP::clearDRMState_l() {
if (mDecryptHandle != NULL) {
// To release mDecryptHandle
CHECK(mDrmManagerClient);
diff --git a/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceDataSourceFactory.h b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceDataSourceFactory.h
new file mode 100644
index 0000000..7d58c5c
--- /dev/null
+++ b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceDataSourceFactory.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLAYER_SERVICE_DATA_SOURCE_FACTORY_H_
+
+#define PLAYER_SERVICE_DATA_SOURCE_FACTORY_H_
+
+#include <datasource/DataSourceFactory.h>
+#include <media/DataSource.h>
+#include <sys/types.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct MediaHTTPService;
+class String8;
+struct HTTPBase;
+
+class PlayerServiceDataSourceFactory : public DataSourceFactory {
+public:
+ static sp<PlayerServiceDataSourceFactory> getInstance();
+ virtual sp<DataSource> CreateMediaHTTP(const sp<MediaHTTPService> &httpService);
+
+protected:
+ virtual sp<DataSource> CreateFileSource(const char *uri);
+
+private:
+ static sp<PlayerServiceDataSourceFactory> sInstance;
+ static Mutex sInstanceLock;
+ PlayerServiceDataSourceFactory() {};
+};
+
+} // namespace android
+
+#endif // PLAYER_SERVICE_DATA_SOURCE_FACTORY_H_
diff --git a/media/libstagefright/include/media/stagefright/FileSource.h b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceFileSource.h
similarity index 65%
rename from media/libstagefright/include/media/stagefright/FileSource.h
rename to media/libmediaplayerservice/datasource/include/datasource/PlayerServiceFileSource.h
index b610eef..08a013e 100644
--- a/media/libstagefright/include/media/stagefright/FileSource.h
+++ b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceFileSource.h
@@ -14,24 +14,26 @@
* limitations under the License.
*/
-#ifndef FILE_SOURCE_H_
+#ifndef PLAYER_SERVICE_FILE_SOURCE_H_
-#define FILE_SOURCE_H_
+#define PLAYER_SERVICE_FILE_SOURCE_H_
#include <stdio.h>
-#include <media/stagefright/ClearFileSource.h>
+#include <datasource/FileSource.h>
#include <media/stagefright/MediaErrors.h>
#include <utils/threads.h>
#include <drm/DrmManagerClient.h>
namespace android {
-class FileSource : public ClearFileSource {
+// FileSource implementation which works on MediaPlayerService.
+// Supports OMA(forword-lock) files.
+class PlayerServiceFileSource : public FileSource {
public:
- FileSource(const char *filename);
- // FileSource takes ownership and will close the fd
- FileSource(int fd, int64_t offset, int64_t length);
+ PlayerServiceFileSource(const char *filename);
+ // PlayerServiceFileSource takes ownership and will close the fd
+ PlayerServiceFileSource(int fd, int64_t offset, int64_t length);
virtual ssize_t readAt(off64_t offset, void *data, size_t size);
@@ -40,7 +42,7 @@
static bool requiresDrm(int fd, int64_t offset, int64_t length, const char *mime);
protected:
- virtual ~FileSource();
+ virtual ~PlayerServiceFileSource();
private:
/*for DRM*/
@@ -52,11 +54,11 @@
ssize_t readAtDRM_l(off64_t offset, void *data, size_t size);
- FileSource(const FileSource &);
- FileSource &operator=(const FileSource &);
+ PlayerServiceFileSource(const PlayerServiceFileSource &);
+ PlayerServiceFileSource &operator=(const PlayerServiceFileSource &);
};
} // namespace android
-#endif // FILE_SOURCE_H_
+#endif // PLAYER_SERVICE_FILE_SOURCE_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaHTTP.h b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceMediaHTTP.h
similarity index 67%
rename from media/libstagefright/include/media/stagefright/MediaHTTP.h
rename to media/libmediaplayerservice/datasource/include/datasource/PlayerServiceMediaHTTP.h
index acaa6c4..0032cd7 100644
--- a/media/libstagefright/include/media/stagefright/MediaHTTP.h
+++ b/media/libmediaplayerservice/datasource/include/datasource/PlayerServiceMediaHTTP.h
@@ -14,22 +14,24 @@
* limitations under the License.
*/
-#ifndef MEDIA_HTTP_H_
+#ifndef PLAYER_SERVICE_MEDIA_HTTP_H_
-#define MEDIA_HTTP_H_
+#define PLAYER_SERVICE_MEDIA_HTTP_H_
+#include <datasource/MediaHTTP.h>
#include <media/stagefright/foundation/AString.h>
-#include <media/stagefright/ClearMediaHTTP.h>
namespace android {
struct MediaHTTPConnection;
-struct MediaHTTP : public ClearMediaHTTP {
- MediaHTTP(const sp<MediaHTTPConnection> &conn);
+// MediaHTTP implementation which works on MediaPlayerService.
+// Supports OMA(forword-lock) stream.
+struct PlayerServiceMediaHTTP : public MediaHTTP {
+ PlayerServiceMediaHTTP(const sp<MediaHTTPConnection> &conn);
protected:
- virtual ~MediaHTTP();
+ virtual ~PlayerServiceMediaHTTP();
virtual sp<DecryptHandle> DrmInitialization(const char* mime);
@@ -39,9 +41,9 @@
void clearDRMState_l();
- DISALLOW_EVIL_CONSTRUCTORS(MediaHTTP);
+ DISALLOW_EVIL_CONSTRUCTORS(PlayerServiceMediaHTTP);
};
} // namespace android
-#endif // MEDIA_HTTP_H_
+#endif // PLAYER_SERVICE_MEDIA_HTTP_H_
diff --git a/media/libmediaplayerservice/include/MediaPlayerInterface.h b/media/libmediaplayerservice/include/MediaPlayerInterface.h
index 0ad4d04..436cb31 100644
--- a/media/libmediaplayerservice/include/MediaPlayerInterface.h
+++ b/media/libmediaplayerservice/include/MediaPlayerInterface.h
@@ -27,7 +27,6 @@
#include <media/mediaplayer.h>
#include <media/AudioResamplerPublic.h>
-#include <media/AudioSystem.h>
#include <media/AudioTimestamp.h>
#include <media/AVSyncSettings.h>
#include <media/BufferingSettings.h>
diff --git a/media/libmediaplayerservice/nuplayer/Android.bp b/media/libmediaplayerservice/nuplayer/Android.bp
index 23a19e7..c8f48a2 100644
--- a/media/libmediaplayerservice/nuplayer/Android.bp
+++ b/media/libmediaplayerservice/nuplayer/Android.bp
@@ -18,6 +18,7 @@
],
header_libs: [
+ "libmediadrm_headers",
"media_plugin_headers",
],
@@ -45,6 +46,7 @@
shared_libs: [
"libbinder",
+ "libdatasource",
"libui",
"libgui",
"libmedia",
@@ -52,6 +54,10 @@
"libpowermanager",
],
+ static_libs: [
+ "libplayerservice_datasource",
+ ],
+
name: "libstagefright_nuplayer",
sanitize: {
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 4653711..00e3443 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -23,6 +23,10 @@
#include "AnotherPacketSource.h"
#include <binder/IServiceManager.h>
#include <cutils/properties.h>
+#include <datasource/PlayerServiceDataSourceFactory.h>
+#include <datasource/PlayerServiceFileSource.h>
+#include <datasource/HTTPBase.h>
+#include <datasource/NuCachedSource2.h>
#include <media/DataSource.h>
#include <media/MediaBufferHolder.h>
#include <media/MediaSource.h>
@@ -31,8 +35,6 @@
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
-#include <media/stagefright/FileSource.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaClock.h>
@@ -41,8 +43,6 @@
#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
-#include "../../libstagefright/include/NuCachedSource2.h"
-#include "../../libstagefright/include/HTTPBase.h"
namespace android {
@@ -385,7 +385,8 @@
if (!strncasecmp("http://", uri, 7) || !strncasecmp("https://", uri, 8)) {
sp<DataSource> httpSource;
mDisconnectLock.unlock();
- httpSource = DataSourceFactory::CreateMediaHTTP(mHTTPService);
+ httpSource = PlayerServiceDataSourceFactory::getInstance()
+ ->CreateMediaHTTP(mHTTPService);
if (httpSource == NULL) {
ALOGE("Failed to create http source!");
notifyPreparedAndCleanup(UNKNOWN_ERROR);
@@ -401,9 +402,9 @@
mLock.unlock();
mDisconnectLock.unlock();
// This might take long time if connection has some issue.
- sp<DataSource> dataSource = DataSourceFactory::CreateFromURI(
- mHTTPService, uri, &mUriHeaders, &contentType,
- static_cast<HTTPBase *>(mHttpSource.get()));
+ sp<DataSource> dataSource = PlayerServiceDataSourceFactory::getInstance()
+ ->CreateFromURI(mHTTPService, uri, &mUriHeaders, &contentType,
+ static_cast<HTTPBase *>(mHttpSource.get()));
mDisconnectLock.lock();
mLock.lock();
if (!mDisconnected) {
@@ -411,7 +412,8 @@
}
} else {
if (property_get_bool("media.stagefright.extractremote", true) &&
- !FileSource::requiresDrm(mFd, mOffset, mLength, nullptr /* mime */)) {
+ !PlayerServiceFileSource::requiresDrm(
+ mFd, mOffset, mLength, nullptr /* mime */)) {
sp<IBinder> binder =
defaultServiceManager()->getService(String16("media.extractor"));
if (binder != nullptr) {
@@ -438,7 +440,7 @@
}
if (mDataSource == nullptr) {
ALOGD("FileSource local");
- mDataSource = new FileSource(mFd, mOffset, mLength);
+ mDataSource = new PlayerServiceFileSource(mFd, mOffset, mLength);
}
// TODO: close should always be done on mFd, see the lines following
// CreateDataSourceFromIDataSource above,
@@ -782,7 +784,7 @@
return;
}
- int64_t nextSubTimeUs;
+ int64_t nextSubTimeUs = 0;
readBuffer(type, -1, MediaPlayerSeekMode::SEEK_PREVIOUS_SYNC /* mode */, &nextSubTimeUs);
sp<ABuffer> buffer;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 9f5be06..0e58ec2 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -19,7 +19,7 @@
#define NU_PLAYER_H_
#include <media/AudioResamplerPublic.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaPlayerInterface.h>
#include <media/stagefright/foundation/AHandler.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index ee463ce..f734439 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -28,7 +28,7 @@
#include "NuPlayerSource.h"
#include <cutils/properties.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaBufferHolder.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 0997e7d..793014e 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -24,7 +24,7 @@
#include "NuPlayerRenderer.h"
#include "NuPlayerSource.h"
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 865cb2a..95c973a 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -33,6 +33,7 @@
#include <media/stagefright/MediaClock.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <media/IMediaAnalyticsService.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
index 50f69ff..4360656 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.h
@@ -18,8 +18,8 @@
#define NUPLAYER_DRM_H_
#include <binder/Parcel.h>
-#include <media/ICrypto.h>
-#include <media/IDrm.h>
+#include <mediadrm/ICrypto.h>
+#include <mediadrm/IDrm.h>
#include <media/stagefright/MetaData.h> // for CryptInfo
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 39be40d..c30f048 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -33,6 +33,7 @@
#include <media/stagefright/Utils.h>
#include <media/stagefright/VideoFrameScheduler.h>
#include <media/MediaCodecBuffer.h>
+#include <utils/SystemClock.h>
#include <inttypes.h>
@@ -156,6 +157,7 @@
CHECK(mediaClock != NULL);
mPlaybackRate = mPlaybackSettings.mSpeed;
mMediaClock->setPlaybackRate(mPlaybackRate);
+ (void)mSyncFlag.test_and_set();
}
NuPlayer::Renderer::~Renderer() {
@@ -326,9 +328,27 @@
mSyncQueues = false;
}
+ // Wait until the current job in the message queue is done, to make sure
+ // buffer processing from the old generation is finished. After the current
+ // job is finished, access to buffers are protected by generation.
+ Mutex::Autolock syncLock(mSyncLock);
+ int64_t syncCount = mSyncCount;
+ mSyncFlag.clear();
+
+ // Make sure message queue is not empty after mSyncFlag is cleared.
sp<AMessage> msg = new AMessage(kWhatFlush, this);
msg->setInt32("audio", static_cast<int32_t>(audio));
msg->post();
+
+ int64_t uptimeMs = uptimeMillis();
+ while (mSyncCount == syncCount) {
+ (void)mSyncCondition.waitRelative(mSyncLock, ms2ns(1000));
+ if (uptimeMillis() - uptimeMs > 1000) {
+ ALOGW("flush(): no wake-up from sync point for 1s; stop waiting to "
+ "prevent being stuck indefinitely.");
+ break;
+ }
+ }
}
void NuPlayer::Renderer::signalTimeDiscontinuity() {
@@ -781,6 +801,11 @@
TRESPASS();
break;
}
+ if (!mSyncFlag.test_and_set()) {
+ Mutex::Autolock syncLock(mSyncLock);
+ ++mSyncCount;
+ mSyncCondition.broadcast();
+ }
}
void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index a521f62..3d2b033 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -18,6 +18,8 @@
#define NUPLAYER_RENDERER_H_
+#include <atomic>
+
#include <media/AudioResamplerPublic.h>
#include <media/AVSyncSettings.h>
@@ -220,6 +222,11 @@
sp<AWakeLock> mWakeLock;
+ std::atomic_flag mSyncFlag = ATOMIC_FLAG_INIT;
+ Mutex mSyncLock;
+ Condition mSyncCondition;
+ int64_t mSyncCount{0};
+
status_t getCurrentPositionOnLooper(int64_t *mediaUs);
status_t getCurrentPositionOnLooper(
int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo = false);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 9f5ef78..f137c52 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -20,7 +20,7 @@
#include "NuPlayer.h"
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/mediaplayer.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MetaData.h>
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
index ee70306..b5142ed 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
@@ -154,7 +154,7 @@
}
memcpy(data,
- (const uint8_t *)mem->pointer()
+ (const uint8_t *)mem->unsecurePointer()
+ entry->mOffset,
copy);
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index afdcd37..f21d2b3 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -130,29 +130,32 @@
} else if (n < 0) {
break;
} else {
- if (buffer[0] == 0x00) {
+ if (buffer[0] == 0x00) { // OK to access buffer[0] since n must be > 0 here
// XXX legacy
if (extra == NULL) {
extra = new AMessage;
}
- uint8_t type = buffer[1];
+ uint8_t type = 0;
+ if (n > 1) {
+ type = buffer[1];
- if (type & 2) {
- int64_t mediaTimeUs;
- memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
+ if ((type & 2) && (n >= 2 + sizeof(int64_t))) {
+ int64_t mediaTimeUs;
+ memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
- extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
+ extra->setInt64(kATSParserKeyMediaTimeUs, mediaTimeUs);
+ }
}
mTSParser->signalDiscontinuity(
((type & 1) == 0)
- ? ATSParser::DISCONTINUITY_TIME
- : ATSParser::DISCONTINUITY_FORMATCHANGE,
+ ? ATSParser::DISCONTINUITY_TIME
+ : ATSParser::DISCONTINUITY_FORMATCHANGE,
extra);
} else {
- status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
+ status_t err = mTSParser->feedTSPacket(buffer, n);
if (err != OK) {
ALOGE("TS Parser returned error %d", err);
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index f8c89e5..8357925 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -6,14 +6,22 @@
shared_libs: [
"liblog",
+ "libbinder",
+ "libmedia",
"libmediaplayerservice",
"libmediadrm",
+ "libresourcemanagerservice",
"libutils",
"android.hardware.drm@1.0",
"android.hardware.drm@1.1",
"android.hardware.drm@1.2",
],
+ include_dirs: [
+ "frameworks/av/include",
+ "frameworks/av/services/mediaresourcemanager",
+ ],
+
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
index d81ee05..262fe32 100644
--- a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -20,14 +20,33 @@
#include <gtest/gtest.h>
+#include <android/media/BnResourceManagerClient.h>
+#include <android/media/IResourceManagerService.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/ProcessInfoInterface.h>
#include <mediadrm/DrmHal.h>
#include <mediadrm/DrmSessionClientInterface.h>
#include <mediadrm/DrmSessionManager.h>
+#include <algorithm>
+#include <vector>
+
+#include "ResourceManagerService.h"
+
namespace android {
+using ::android::binder::Status;
+using ::android::media::BnResourceManagerClient;
+using ::android::media::ResourceManagerService;
+
+static Vector<uint8_t> toAndroidVector(const std::vector<uint8_t> &vec) {
+ Vector<uint8_t> aVec;
+ for (auto b : vec) {
+ aVec.push_back(b);
+ }
+ return aVec;
+}
+
struct FakeProcessInfo : public ProcessInfoInterface {
FakeProcessInfo() {}
virtual ~FakeProcessInfo() {}
@@ -47,173 +66,130 @@
DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
};
-struct FakeDrm : public DrmSessionClientInterface {
- FakeDrm() {}
+struct FakeDrm : public BnResourceManagerClient {
+ FakeDrm(const std::vector<uint8_t>& sessionId, const sp<DrmSessionManager>& manager)
+ : mSessionId(toAndroidVector(sessionId)),
+ mReclaimed(false),
+ mDrmSessionManager(manager) {}
+
virtual ~FakeDrm() {}
- virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
- mReclaimedSessions.push_back(sessionId);
- return true;
+ Status reclaimResource(bool* _aidl_return) {
+ mReclaimed = true;
+ mDrmSessionManager->removeSession(mSessionId);
+ *_aidl_return = true;
+ return Status::ok();
}
- const Vector<Vector<uint8_t> >& reclaimedSessions() const {
- return mReclaimedSessions;
+ Status getName(::std::string* _aidl_return) {
+ String8 name("FakeDrm[");
+ for (size_t i = 0; i < mSessionId.size(); ++i) {
+ name.appendFormat("%02x", mSessionId[i]);
+ }
+ name.append("]");
+ *_aidl_return = name;
+ return Status::ok();
}
+ bool isReclaimed() const {
+ return mReclaimed;
+ }
+
+ const Vector<uint8_t> mSessionId;
+
private:
- Vector<Vector<uint8_t> > mReclaimedSessions;
+ bool mReclaimed;
+ const sp<DrmSessionManager> mDrmSessionManager;
DISALLOW_EVIL_CONSTRUCTORS(FakeDrm);
};
+struct FakeSystemCallback :
+ public ResourceManagerService::SystemCallbackInterface {
+ FakeSystemCallback() {}
+
+ virtual void noteStartVideo(int /*uid*/) override {}
+
+ virtual void noteStopVideo(int /*uid*/) override {}
+
+ virtual void noteResetVideo() override {}
+
+ virtual bool requestCpusetBoost(
+ bool /*enable*/, const sp<IInterface> &/*client*/) override {
+ return true;
+ }
+
+protected:
+ virtual ~FakeSystemCallback() {}
+
+private:
+
+ DISALLOW_EVIL_CONSTRUCTORS(FakeSystemCallback);
+};
+
static const int kTestPid1 = 30;
static const int kTestPid2 = 20;
-static const uint8_t kTestSessionId1[] = {1, 2, 3};
-static const uint8_t kTestSessionId2[] = {4, 5, 6, 7, 8};
-static const uint8_t kTestSessionId3[] = {9, 0};
+static const std::vector<uint8_t> kTestSessionId1{1, 2, 3};
+static const std::vector<uint8_t> kTestSessionId2{4, 5, 6, 7, 8};
+static const std::vector<uint8_t> kTestSessionId3{9, 0};
class DrmSessionManagerTest : public ::testing::Test {
public:
DrmSessionManagerTest()
- : mDrmSessionManager(new DrmSessionManager(new FakeProcessInfo())),
- mTestDrm1(new FakeDrm()),
- mTestDrm2(new FakeDrm()) {
- GetSessionId(kTestSessionId1, ARRAY_SIZE(kTestSessionId1), &mSessionId1);
- GetSessionId(kTestSessionId2, ARRAY_SIZE(kTestSessionId2), &mSessionId2);
- GetSessionId(kTestSessionId3, ARRAY_SIZE(kTestSessionId3), &mSessionId3);
+ : mService(new ResourceManagerService(new FakeProcessInfo(), new FakeSystemCallback())),
+ mDrmSessionManager(new DrmSessionManager(mService)),
+ mTestDrm1(new FakeDrm(kTestSessionId1, mDrmSessionManager)),
+ mTestDrm2(new FakeDrm(kTestSessionId2, mDrmSessionManager)),
+ mTestDrm3(new FakeDrm(kTestSessionId3, mDrmSessionManager)) {
+ DrmSessionManager *ptr = new DrmSessionManager(mService);
+ EXPECT_NE(ptr, nullptr);
+ /* mDrmSessionManager = ptr; */
}
protected:
- static void GetSessionId(const uint8_t* ids, size_t num, Vector<uint8_t>* sessionId) {
- for (size_t i = 0; i < num; ++i) {
- sessionId->push_back(ids[i]);
- }
- }
-
- static void ExpectEqSessionInfo(const SessionInfo& info, sp<DrmSessionClientInterface> drm,
- const Vector<uint8_t>& sessionId, int64_t timeStamp) {
- EXPECT_EQ(drm, info.drm);
- EXPECT_TRUE(isEqualSessionId(sessionId, info.sessionId));
- EXPECT_EQ(timeStamp, info.timeStamp);
- }
-
void addSession() {
- mDrmSessionManager->addSession(kTestPid1, mTestDrm1, mSessionId1);
- mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId2);
- mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId3);
- const PidSessionInfosMap& map = sessionMap();
- EXPECT_EQ(2u, map.size());
- ssize_t index1 = map.indexOfKey(kTestPid1);
- ASSERT_GE(index1, 0);
- const SessionInfos& infos1 = map[index1];
- EXPECT_EQ(1u, infos1.size());
- ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 0);
-
- ssize_t index2 = map.indexOfKey(kTestPid2);
- ASSERT_GE(index2, 0);
- const SessionInfos& infos2 = map[index2];
- EXPECT_EQ(2u, infos2.size());
- ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId2, 1);
- ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 2);
+ mDrmSessionManager->addSession(kTestPid1, mTestDrm1, mTestDrm1->mSessionId);
+ mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mTestDrm2->mSessionId);
+ mDrmSessionManager->addSession(kTestPid2, mTestDrm3, mTestDrm3->mSessionId);
}
- const PidSessionInfosMap& sessionMap() {
- return mDrmSessionManager->mSessionMap;
- }
-
- void testGetLowestPriority() {
- int pid;
- int priority;
- EXPECT_FALSE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
-
- addSession();
- EXPECT_TRUE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
-
- EXPECT_EQ(kTestPid1, pid);
- FakeProcessInfo processInfo;
- int priority1;
- processInfo.getPriority(kTestPid1, &priority1);
- EXPECT_EQ(priority1, priority);
- }
-
- void testGetLeastUsedSession() {
- sp<DrmSessionClientInterface> drm;
- Vector<uint8_t> sessionId;
- EXPECT_FALSE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
-
- addSession();
-
- EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
- EXPECT_EQ(mTestDrm1, drm);
- EXPECT_TRUE(isEqualSessionId(mSessionId1, sessionId));
-
- EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
- EXPECT_EQ(mTestDrm2, drm);
- EXPECT_TRUE(isEqualSessionId(mSessionId2, sessionId));
-
- // mSessionId2 is no longer the least used session.
- mDrmSessionManager->useSession(mSessionId2);
- EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
- EXPECT_EQ(mTestDrm2, drm);
- EXPECT_TRUE(isEqualSessionId(mSessionId3, sessionId));
- }
-
+ sp<IResourceManagerService> mService;
sp<DrmSessionManager> mDrmSessionManager;
sp<FakeDrm> mTestDrm1;
sp<FakeDrm> mTestDrm2;
- Vector<uint8_t> mSessionId1;
- Vector<uint8_t> mSessionId2;
- Vector<uint8_t> mSessionId3;
+ sp<FakeDrm> mTestDrm3;
};
TEST_F(DrmSessionManagerTest, addSession) {
addSession();
+
+ EXPECT_EQ(3u, mDrmSessionManager->getSessionCount());
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm1->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm2->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm3->mSessionId));
}
TEST_F(DrmSessionManagerTest, useSession) {
addSession();
- mDrmSessionManager->useSession(mSessionId1);
- mDrmSessionManager->useSession(mSessionId3);
+ mDrmSessionManager->useSession(mTestDrm1->mSessionId);
+ mDrmSessionManager->useSession(mTestDrm3->mSessionId);
- const PidSessionInfosMap& map = sessionMap();
- const SessionInfos& infos1 = map.valueFor(kTestPid1);
- const SessionInfos& infos2 = map.valueFor(kTestPid2);
- ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 3);
- ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 4);
+ EXPECT_EQ(3u, mDrmSessionManager->getSessionCount());
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm1->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm2->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm3->mSessionId));
}
TEST_F(DrmSessionManagerTest, removeSession) {
addSession();
- mDrmSessionManager->removeSession(mSessionId2);
+ mDrmSessionManager->removeSession(mTestDrm2->mSessionId);
- const PidSessionInfosMap& map = sessionMap();
- EXPECT_EQ(2u, map.size());
- const SessionInfos& infos1 = map.valueFor(kTestPid1);
- const SessionInfos& infos2 = map.valueFor(kTestPid2);
- EXPECT_EQ(1u, infos1.size());
- EXPECT_EQ(1u, infos2.size());
- // mSessionId2 has been removed.
- ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId3, 2);
-}
-
-TEST_F(DrmSessionManagerTest, removeDrm) {
- addSession();
-
- sp<FakeDrm> drm = new FakeDrm;
- const uint8_t ids[] = {123};
- Vector<uint8_t> sessionId;
- GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
- mDrmSessionManager->addSession(kTestPid2, drm, sessionId);
-
- mDrmSessionManager->removeDrm(mTestDrm2);
-
- const PidSessionInfosMap& map = sessionMap();
- const SessionInfos& infos2 = map.valueFor(kTestPid2);
- EXPECT_EQ(1u, infos2.size());
- // mTestDrm2 has been removed.
- ExpectEqSessionInfo(infos2[0], drm, sessionId, 3);
+ EXPECT_EQ(2u, mDrmSessionManager->getSessionCount());
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm1->mSessionId));
+ EXPECT_FALSE(mDrmSessionManager->containsSession(mTestDrm2->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm3->mSessionId));
}
TEST_F(DrmSessionManagerTest, reclaimSession) {
@@ -224,30 +200,63 @@
EXPECT_FALSE(mDrmSessionManager->reclaimSession(50));
EXPECT_TRUE(mDrmSessionManager->reclaimSession(10));
- EXPECT_EQ(1u, mTestDrm1->reclaimedSessions().size());
- EXPECT_TRUE(isEqualSessionId(mSessionId1, mTestDrm1->reclaimedSessions()[0]));
-
- mDrmSessionManager->removeSession(mSessionId1);
+ EXPECT_TRUE(mTestDrm1->isReclaimed());
// add a session from a higher priority process.
- sp<FakeDrm> drm = new FakeDrm;
- const uint8_t ids[] = {1, 3, 5};
- Vector<uint8_t> sessionId;
- GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
- mDrmSessionManager->addSession(15, drm, sessionId);
+ const std::vector<uint8_t> sid{1, 3, 5};
+ sp<FakeDrm> drm = new FakeDrm(sid, mDrmSessionManager);
+ mDrmSessionManager->addSession(15, drm, drm->mSessionId);
+ // make sure mTestDrm2 is reclaimed next instead of mTestDrm3
+ mDrmSessionManager->useSession(mTestDrm3->mSessionId);
EXPECT_TRUE(mDrmSessionManager->reclaimSession(18));
- EXPECT_EQ(1u, mTestDrm2->reclaimedSessions().size());
- // mSessionId2 is reclaimed.
- EXPECT_TRUE(isEqualSessionId(mSessionId2, mTestDrm2->reclaimedSessions()[0]));
+ EXPECT_TRUE(mTestDrm2->isReclaimed());
+
+ EXPECT_EQ(2u, mDrmSessionManager->getSessionCount());
+ EXPECT_FALSE(mDrmSessionManager->containsSession(mTestDrm1->mSessionId));
+ EXPECT_FALSE(mDrmSessionManager->containsSession(mTestDrm2->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm3->mSessionId));
+ EXPECT_TRUE(mDrmSessionManager->containsSession(drm->mSessionId));
}
-TEST_F(DrmSessionManagerTest, getLowestPriority) {
- testGetLowestPriority();
-}
+TEST_F(DrmSessionManagerTest, reclaimAfterUse) {
+ // nothing to reclaim yet
+ EXPECT_FALSE(mDrmSessionManager->reclaimSession(kTestPid1));
+ EXPECT_FALSE(mDrmSessionManager->reclaimSession(kTestPid2));
-TEST_F(DrmSessionManagerTest, getLeastUsedSession_l) {
- testGetLeastUsedSession();
+ // add sessions from same pid
+ mDrmSessionManager->addSession(kTestPid2, mTestDrm1, mTestDrm1->mSessionId);
+ mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mTestDrm2->mSessionId);
+ mDrmSessionManager->addSession(kTestPid2, mTestDrm3, mTestDrm3->mSessionId);
+
+ // use some but not all sessions
+ mDrmSessionManager->useSession(mTestDrm1->mSessionId);
+ mDrmSessionManager->useSession(mTestDrm1->mSessionId);
+ mDrmSessionManager->useSession(mTestDrm2->mSessionId);
+
+ // calling pid priority is too low
+ int lowPriorityPid = kTestPid2 + 1;
+ EXPECT_FALSE(mDrmSessionManager->reclaimSession(lowPriorityPid));
+
+ // unused session is reclaimed first
+ int highPriorityPid = kTestPid2 - 1;
+ EXPECT_TRUE(mDrmSessionManager->reclaimSession(highPriorityPid));
+ EXPECT_FALSE(mTestDrm1->isReclaimed());
+ EXPECT_FALSE(mTestDrm2->isReclaimed());
+ EXPECT_TRUE(mTestDrm3->isReclaimed());
+ mDrmSessionManager->removeSession(mTestDrm3->mSessionId);
+
+ // less-used session is reclaimed next
+ EXPECT_TRUE(mDrmSessionManager->reclaimSession(highPriorityPid));
+ EXPECT_FALSE(mTestDrm1->isReclaimed());
+ EXPECT_TRUE(mTestDrm2->isReclaimed());
+ EXPECT_TRUE(mTestDrm3->isReclaimed());
+
+ // most-used session still open
+ EXPECT_EQ(1u, mDrmSessionManager->getSessionCount());
+ EXPECT_TRUE(mDrmSessionManager->containsSession(mTestDrm1->mSessionId));
+ EXPECT_FALSE(mDrmSessionManager->containsSession(mTestDrm2->mSessionId));
+ EXPECT_FALSE(mDrmSessionManager->containsSession(mTestDrm3->mSessionId));
}
} // namespace android
diff --git a/media/libnblog/Reader.cpp b/media/libnblog/Reader.cpp
index f556e37..67d028d 100644
--- a/media/libnblog/Reader.cpp
+++ b/media/libnblog/Reader.cpp
@@ -45,7 +45,12 @@
}
Reader::Reader(const sp<IMemory>& iMemory, size_t size, const std::string &name)
- : Reader(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size, name)
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ : Reader(iMemory != 0 ? (Shared *) iMemory->unsecurePointer() : NULL, size,
+ name)
{
mIMemory = iMemory;
}
@@ -156,7 +161,8 @@
bool Reader::isIMemory(const sp<IMemory>& iMemory) const
{
- return iMemory != 0 && mIMemory != 0 && iMemory->pointer() == mIMemory->pointer();
+ return iMemory != 0 && mIMemory != 0 &&
+ iMemory->unsecurePointer() == mIMemory->unsecurePointer();
}
// We make a set of the invalid types rather than the valid types when aligning
diff --git a/media/libnblog/Writer.cpp b/media/libnblog/Writer.cpp
index da3bd52..86d3b98 100644
--- a/media/libnblog/Writer.cpp
+++ b/media/libnblog/Writer.cpp
@@ -56,7 +56,11 @@
}
Writer::Writer(const sp<IMemory>& iMemory, size_t size)
- : Writer(iMemory != 0 ? (Shared *) iMemory->pointer() : NULL, size)
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ : Writer(iMemory != 0 ? (Shared *) iMemory->unsecurePointer() : NULL, size)
{
mIMemory = iMemory;
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d198d39..965e5a6 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -24,6 +24,8 @@
#include <inttypes.h>
#include <utils/Trace.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+
#include <gui/Surface.h>
#include <media/stagefright/ACodec.h>
@@ -45,6 +47,7 @@
#include <media/hardware/HardwareAPI.h>
#include <media/MediaBufferHolder.h>
#include <media/OMXBuffer.h>
+#include <media/omx/1.0/Conversion.h>
#include <media/omx/1.0/WOmxNode.h>
#include <hidlmemory/mapping.h>
@@ -63,7 +66,9 @@
namespace android {
-using binder::Status;
+typedef hardware::media::omx::V1_0::IGraphicBufferSource HGraphicBufferSource;
+
+using hardware::media::omx::V1_0::Status;
enum {
kMaxIndicesToCheck = 32, // used when enumerating supported formats and profiles
@@ -98,16 +103,11 @@
}
}
-static inline status_t statusFromBinderStatus(const Status &status) {
+static inline status_t statusFromBinderStatus(hardware::Return<Status> &&status) {
if (status.isOk()) {
- return OK;
- }
- status_t err;
- if ((err = status.serviceSpecificErrorCode()) != OK) {
- return err;
- }
- if ((err = status.transactionError()) != OK) {
- return err;
+ return static_cast<status_t>(status.withDefault(Status::UNKNOWN_ERROR));
+ } else if (status.isDeadObject()) {
+ return DEAD_OBJECT;
}
// Other exception
return UNKNOWN_ERROR;
@@ -1780,6 +1780,14 @@
}
}
+ int32_t lowLatency = 0;
+ if (msg->findInt32("low-latency", &lowLatency)) {
+ err = setLowLatency(lowLatency);
+ if (err != OK) {
+ return err;
+ }
+ }
+
int32_t prependSPSPPS = 0;
if (encoder && mIsVideo
&& msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
@@ -1826,6 +1834,23 @@
mRepeatFrameDelayUs = -1LL;
}
+ if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
+ float captureRate;
+ if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
+ mCaptureFps = captureRate;
+ } else {
+ mCaptureFps = -1.0;
+ }
+ }
+
+ if (!msg->findInt32(
+ KEY_CREATE_INPUT_SURFACE_SUSPENDED,
+ (int32_t*)&mCreateInputBuffersSuspended)) {
+ mCreateInputBuffersSuspended = false;
+ }
+ }
+
+ if (encoder && (mIsVideo || mIsImage)) {
// only allow 32-bit value, since we pass it as U32 to OMX.
if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
mMaxPtsGapUs = 0LL;
@@ -1842,16 +1867,6 @@
if (mMaxPtsGapUs < 0LL) {
mMaxFps = -1;
}
-
- if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
- mCaptureFps = -1.0;
- }
-
- if (!msg->findInt32(
- KEY_CREATE_INPUT_SURFACE_SUSPENDED,
- (int32_t*)&mCreateInputBuffersSuspended)) {
- mCreateInputBuffersSuspended = false;
- }
}
// NOTE: we only use native window for video decoders
@@ -2341,6 +2356,24 @@
return err;
}
+status_t ACodec::setLowLatency(int32_t lowLatency) {
+ if (mIsEncoder) {
+ ALOGE("encoder does not support low-latency");
+ return BAD_VALUE;
+ }
+
+ OMX_CONFIG_BOOLEANTYPE config;
+ InitOMXParams(&config);
+ config.bEnabled = (OMX_BOOL)(lowLatency != 0);
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigLowLatency,
+ &config, sizeof(config));
+ if (err != OK) {
+ ALOGE("decoder can not set low-latency to %d (err %d)", lowLatency, err);
+ }
+ return err;
+}
+
status_t ACodec::setLatency(uint32_t latency) {
OMX_PARAM_U32TYPE config;
InitOMXParams(&config);
@@ -6873,8 +6906,11 @@
return err;
}
+ using hardware::media::omx::V1_0::utils::TWOmxNode;
err = statusFromBinderStatus(
- mCodec->mGraphicBufferSource->configure(mCodec->mOMXNode, dataSpace));
+ mCodec->mGraphicBufferSource->configure(
+ new TWOmxNode(mCodec->mOMXNode),
+ static_cast<hardware::graphics::common::V1_0::Dataspace>(dataSpace)));
if (err != OK) {
ALOGE("[%s] Unable to configure for node (err %d)",
mCodec->mComponentName.c_str(), err);
@@ -6894,7 +6930,7 @@
}
}
- if (mCodec->mMaxPtsGapUs != 0LL) {
+ if (mCodec->mIsVideo && mCodec->mMaxPtsGapUs != 0LL) {
OMX_PARAM_U32TYPE maxPtsGapParams;
InitOMXParams(&maxPtsGapParams);
maxPtsGapParams.nPortIndex = kPortIndexInput;
@@ -6960,8 +6996,9 @@
}
err = statusFromBinderStatus(
- mCodec->mGraphicBufferSource->setColorAspects(ColorUtils::packToU32(
- *(ColorAspects *)colorAspectsBuffer->base())));
+ mCodec->mGraphicBufferSource->setColorAspects(
+ hardware::media::omx::V1_0::utils::toHardwareColorAspects(
+ *(ColorAspects *)colorAspectsBuffer->base())));
if (err != OK) {
ALOGE("[%s] Unable to configure color aspects (err %d)",
@@ -6977,8 +7014,10 @@
ALOGV("onCreateInputSurface");
sp<IGraphicBufferProducer> bufferProducer;
+ sp<HGraphicBufferSource> bufferSource;
status_t err = mCodec->mOMX->createInputSurface(
- &bufferProducer, &mCodec->mGraphicBufferSource);
+ &bufferProducer, &bufferSource);
+ mCodec->mGraphicBufferSource = bufferSource;
if (err == OK) {
err = setupInputSurface();
@@ -7011,8 +7050,12 @@
}
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
- mCodec->mGraphicBufferSource = surface->getBufferSource();
- status_t err = setupInputSurface();
+ sp<HGraphicBufferSource> hgbs = HGraphicBufferSource::castFrom(surface->getHidlTarget());
+ status_t err = BAD_VALUE;
+ if (hgbs) {
+ mCodec->mGraphicBufferSource = hgbs;
+ err = setupInputSurface();
+ }
if (err == OK) {
mCodec->mCallback->onInputSurfaceAccepted(
@@ -7531,8 +7574,14 @@
}
int64_t stopTimeOffsetUs;
- err = statusFromBinderStatus(
- mGraphicBufferSource->getStopTimeOffsetUs(&stopTimeOffsetUs));
+ hardware::Return<void> trans = mGraphicBufferSource->getStopTimeOffsetUs(
+ [&err, &stopTimeOffsetUs](auto status, auto result) {
+ err = static_cast<status_t>(status);
+ stopTimeOffsetUs = result;
+ });
+ if (!trans.isOk()) {
+ err = trans.isDeadObject() ? DEAD_OBJECT : UNKNOWN_ERROR;
+ }
if (err != OK) {
ALOGE("Failed to get stop time offset (err %d)", err);
@@ -7575,6 +7624,14 @@
}
}
+ int32_t lowLatency = 0;
+ if (params->findInt32("low-latency", &lowLatency)) {
+ status_t err = setLowLatency(lowLatency);
+ if (err != OK) {
+ return err;
+ }
+ }
+
int32_t latency = 0;
if (params->findInt32("latency", &latency) && latency > 0) {
status_t err = setLatency(latency);
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 266a240..dd6f7b4 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -153,7 +153,8 @@
}
if (destination.mType == ICrypto::kDestinationTypeSharedMemory) {
- memcpy(it->mCodecBuffer->base(), destination.mSharedMemory->pointer(), result);
+ memcpy(it->mCodecBuffer->base(),
+ destination.mSharedMemory->unsecurePointer(), result);
}
} else {
// Here we cast CryptoPlugin::SubSample to hardware::cas::native::V1_0::SubSample
@@ -219,7 +220,8 @@
if (dstBuffer.type == BufferType::SHARED_MEMORY) {
memcpy(it->mCodecBuffer->base(),
- (uint8_t*)it->mSharedEncryptedBuffer->pointer(), result);
+ (uint8_t*)it->mSharedEncryptedBuffer->unsecurePointer(),
+ result);
}
}
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 7eab230..a48faca 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -19,8 +19,10 @@
],
cfi: true,
},
-
- shared_libs: ["libmedia"],
+ shared_libs: [
+ "libstagefright_foundation",
+ "libutils"
+ ],
}
cc_library_static {
@@ -58,10 +60,14 @@
"-Wall",
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"libgui",
"liblog",
- "libmedia_omx",
+ "libmedia_codeclist",
"libstagefright_foundation",
"libui",
"libutils",
@@ -96,6 +102,10 @@
"include",
],
+ header_libs: [
+ "libmedia_helper_headers",
+ ],
+
cflags: [
"-Wno-multichar",
"-Werror",
@@ -112,6 +122,53 @@
},
}
+cc_library_shared {
+ name: "libstagefright_framecapture_utils",
+
+ srcs: [
+ "FrameCaptureLayer.cpp",
+ "FrameCaptureProcessor.cpp",
+ ],
+
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libEGL",
+ "libGLESv1_CM",
+ "libGLESv2",
+ "libgui",
+ "liblog",
+ "libprocessgroup",
+ "libstagefright_foundation",
+ "libsync",
+ "libui",
+ "libutils",
+ ],
+
+ static_libs: [
+ "librenderengine",
+ ],
+
+ export_include_dirs: [
+ "include",
+ ],
+
+ cflags: [
+ "-Wno-multichar",
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ "-Wall",
+ ],
+
+ sanitize: {
+ // TODO: re-enabled cfi for this lib after b/139945549 fixed
+ cfi: false,
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+}
cc_library {
name: "libstagefright",
@@ -121,7 +178,6 @@
"ACodecBufferChannel.cpp",
"AHierarchicalStateMachine.cpp",
"AMRWriter.cpp",
- "AudioPlayer.cpp",
"AudioSource.cpp",
"BufferImpl.cpp",
"CallbackDataSource.cpp",
@@ -129,12 +185,7 @@
"CameraSource.cpp",
"CameraSourceTimeLapse.cpp",
"DataConverter.cpp",
- "DataSourceFactory.cpp",
- "DataURISource.cpp",
- "ClearFileSource.cpp",
- "FileSource.cpp",
"FrameDecoder.cpp",
- "HTTPBase.cpp",
"HevcUtils.cpp",
"InterfaceUtils.cpp",
"JPEGSource.cpp",
@@ -151,10 +202,7 @@
"MediaSource.cpp",
"MediaSync.cpp",
"MediaTrack.cpp",
- "http/ClearMediaHTTP.cpp",
- "http/MediaHTTP.cpp",
"MediaMuxer.cpp",
- "NuCachedSource2.cpp",
"NuMediaExtractor.cpp",
"OggWriter.cpp",
"OMXClient.cpp",
@@ -164,32 +212,32 @@
"SimpleDecodingSource.cpp",
"SkipCutBuffer.cpp",
"StagefrightMediaScanner.cpp",
- "StagefrightMetadataRetriever.cpp",
"StagefrightPluginLoader.cpp",
"SurfaceUtils.cpp",
- "Utils.cpp",
"ThrottledSource.cpp",
+ "Utils.cpp",
"VideoFrameSchedulerBase.cpp",
"VideoFrameScheduler.cpp",
],
shared_libs: [
+ "libstagefright_framecapture_utils",
"libaudioutils",
"libbase",
"libbinder",
"libcamera_client",
"libcutils",
+ "libdatasource",
"libdl",
"libdl_android",
- "libdrmframework",
"libgui",
"liblog",
"libmedia",
+ "libmedia_codeclist",
"libmedia_omx",
"libmedia_omx_client",
"libaudioclient",
"libmediametrics",
- "libmediautils",
"libui",
"libutils",
"libmedia_helper",
@@ -206,6 +254,7 @@
],
static_libs: [
+ "libstagefright_esds",
"libstagefright_color_conversion",
"libyuv_static",
"libstagefright_mediafilter",
@@ -213,13 +262,12 @@
"libstagefright_timedtext",
"libogg",
"libwebm",
- "libstagefright_esds",
"libstagefright_id3",
- "libFLAC",
],
header_libs:[
- "libnativeloader-dummy-headers",
+ "libmediadrm_headers",
+ "libnativeloader-headers",
"libstagefright_xmlparser_headers",
"media_ndk_headers",
],
@@ -259,62 +307,3 @@
],
},
}
-
-cc_library_static {
- name: "libstagefright_player2",
-
- srcs: [
- "ClearFileSource.cpp",
- "DataURISource.cpp",
- "HTTPBase.cpp",
- "HevcUtils.cpp",
- "MediaClock.cpp",
- "MediaSource.cpp",
- "NdkUtils.cpp",
- "Utils.cpp",
- "VideoFrameSchedulerBase.cpp",
- "VideoFrameScheduler2.cpp",
- "http/ClearMediaHTTP.cpp",
- ],
-
- shared_libs: [
- "libgui",
- "liblog",
- "libnetd_client",
- "libutils",
- "libstagefright_foundation",
- "libandroid",
- ],
-
- static_libs: [
- "libmedia_player2_util",
- "libmedia2_jni_core",
- ],
-
- export_include_dirs: [
- "include",
- ],
-
- cflags: [
- "-Wno-multichar",
- "-Werror",
- "-Wno-error=deprecated-declarations",
- "-Wall",
- ],
-
- product_variables: {
- debuggable: {
- // enable experiments only in userdebug and eng builds
- cflags: ["-DENABLE_STAGEFRIGHT_EXPERIMENTS"],
- },
- },
-
- sanitize: {
- cfi: true,
- misc_undefined: [
- "unsigned-integer-overflow",
- "signed-integer-overflow",
- ],
- },
-}
-
diff --git a/media/libstagefright/BufferImpl.cpp b/media/libstagefright/BufferImpl.cpp
index b760273..b097324 100644
--- a/media/libstagefright/BufferImpl.cpp
+++ b/media/libstagefright/BufferImpl.cpp
@@ -21,7 +21,7 @@
#include <binder/IMemory.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <utils/NativeHandle.h>
#include "include/SecureBuffer.h"
@@ -32,7 +32,11 @@
// SharedMemoryBuffer
SharedMemoryBuffer::SharedMemoryBuffer(const sp<AMessage> &format, const sp<IMemory> &mem)
- : MediaCodecBuffer(format, new ABuffer(mem->pointer(), mem->size())),
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ : MediaCodecBuffer(format, new ABuffer(mem->unsecurePointer(), mem->size())),
mMemory(mem) {
}
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
index 92e6eb9..2f8e6af 100644
--- a/media/libstagefright/CallbackDataSource.cpp
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -81,7 +81,8 @@
return ERROR_OUT_OF_RANGE;
}
CHECK(numRead >= 0 && (size_t)numRead <= bufferSize);
- memcpy(((uint8_t*)data) + totalNumRead, mMemory->pointer(), numRead);
+ memcpy(((uint8_t*)data) + totalNumRead, mMemory->unsecurePointer(),
+ numRead);
numLeft -= numRead;
totalNumRead += numRead;
}
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 41f5db0..9b3f420 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -89,7 +89,7 @@
void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
camera_frame_metadata_t * /* metadata */) {
ALOGV("postData(%d, ptr:%p, size:%zu)",
- msgType, dataPtr->pointer(), dataPtr->size());
+ msgType, dataPtr->unsecurePointer(), dataPtr->size());
sp<CameraSource> source = mSource.promote();
if (source.get() != NULL) {
@@ -966,8 +966,12 @@
// Check if frame contains a VideoNativeHandleMetadata.
if (frame->size() == sizeof(VideoNativeHandleMetadata)) {
- VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(frame->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoNativeHandleMetadata *metadata =
+ (VideoNativeHandleMetadata*)(frame->unsecurePointer());
if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
handle = metadata->pHandle;
}
@@ -1047,7 +1051,7 @@
Mutex::Autolock autoLock(mLock);
for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
it != mFramesBeingEncoded.end(); ++it) {
- if ((*it)->pointer() == buffer->data()) {
+ if ((*it)->unsecurePointer() == buffer->data()) {
releaseOneRecordingFrame((*it));
mFramesBeingEncoded.erase(it);
++mNumFramesEncoded;
@@ -1102,7 +1106,11 @@
frameTime = *mFrameTimes.begin();
mFrameTimes.erase(mFrameTimes.begin());
mFramesBeingEncoded.push_back(frame);
- *buffer = new MediaBuffer(frame->pointer(), frame->size());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ *buffer = new MediaBuffer(frame->unsecurePointer(), frame->size());
(*buffer)->setObserver(this);
(*buffer)->add_ref();
(*buffer)->meta_data().setInt64(kKeyTime, frameTime);
@@ -1248,7 +1256,7 @@
mMemoryBases.erase(mMemoryBases.begin());
// Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
metadata->eType = kMetadataBufferTypeNativeHandleSource;
metadata->pHandle = handle;
@@ -1296,7 +1304,11 @@
mMemoryBases.erase(mMemoryBases.begin());
// Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->unsecurePointer());
metadata->eType = kMetadataBufferTypeNativeHandleSource;
metadata->pHandle = handle;
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 2a819ad..e0a6eb3 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -245,11 +245,11 @@
ALOGV("createIMemoryCopy");
size_t source_size = source_data->size();
- void* source_pointer = source_data->pointer();
+ void* source_pointer = source_data->unsecurePointer();
sp<MemoryHeapBase> newMemoryHeap = new MemoryHeapBase(source_size);
sp<MemoryBase> newMemory = new MemoryBase(newMemoryHeap, 0, source_size);
- memcpy(newMemory->pointer(), source_pointer, source_size);
+ memcpy(newMemory->unsecurePointer(), source_pointer, source_size);
return newMemory;
}
diff --git a/media/libstagefright/CodecBase.cpp b/media/libstagefright/CodecBase.cpp
index d0610b2..97f38f8 100644
--- a/media/libstagefright/CodecBase.cpp
+++ b/media/libstagefright/CodecBase.cpp
@@ -18,7 +18,7 @@
#define LOG_TAG "CodecBase"
#include <android/hardware/cas/native/1.0/IDescrambler.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/stagefright/CodecBase.h>
#include <utils/Log.h>
diff --git a/media/libstagefright/FrameCaptureLayer.cpp b/media/libstagefright/FrameCaptureLayer.cpp
new file mode 100644
index 0000000..29642be
--- /dev/null
+++ b/media/libstagefright/FrameCaptureLayer.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameCaptureLayer"
+
+#include <include/FrameCaptureLayer.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
+#include <gui/BufferQueue.h>
+#include <gui/GLConsumer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/Surface.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <renderengine/RenderEngine.h>
+#include <utils/Log.h>
+
+namespace android {
+
+static const int64_t kAcquireBufferTimeoutNs = 100000000LL;
+
+ui::Dataspace translateDataspace(ui::Dataspace dataspace) {
+ ui::Dataspace updatedDataspace = dataspace;
+ // translate legacy dataspaces to modern dataspaces
+ switch (dataspace) {
+ case ui::Dataspace::SRGB:
+ updatedDataspace = ui::Dataspace::V0_SRGB;
+ break;
+ case ui::Dataspace::SRGB_LINEAR:
+ updatedDataspace = ui::Dataspace::V0_SRGB_LINEAR;
+ break;
+ case ui::Dataspace::JFIF:
+ updatedDataspace = ui::Dataspace::V0_JFIF;
+ break;
+ case ui::Dataspace::BT601_625:
+ updatedDataspace = ui::Dataspace::V0_BT601_625;
+ break;
+ case ui::Dataspace::BT601_525:
+ updatedDataspace = ui::Dataspace::V0_BT601_525;
+ break;
+ case ui::Dataspace::BT709:
+ updatedDataspace = ui::Dataspace::V0_BT709;
+ break;
+ default:
+ break;
+ }
+
+ return updatedDataspace;
+}
+
+bool isHdrY410(const BufferItem &bi) {
+ ui::Dataspace dataspace = translateDataspace(static_cast<ui::Dataspace>(bi.mDataSpace));
+ // pixel format is HDR Y410 masquerading as RGBA_1010102
+ return (dataspace == ui::Dataspace::BT2020_ITU_PQ &&
+ bi.mGraphicBuffer->getPixelFormat() == HAL_PIXEL_FORMAT_RGBA_1010102);
+}
+
+struct FrameCaptureLayer::BufferLayer : public FrameCaptureProcessor::Layer {
+ BufferLayer(const BufferItem &bi) : mBufferItem(bi) {}
+ void getLayerSettings(
+ const Rect &sourceCrop, uint32_t textureName,
+ renderengine::LayerSettings *layerSettings) override;
+ BufferItem mBufferItem;
+};
+
+void FrameCaptureLayer::BufferLayer::getLayerSettings(
+ const Rect &sourceCrop, uint32_t textureName,
+ renderengine::LayerSettings *layerSettings) {
+ layerSettings->geometry.boundaries = sourceCrop.toFloatRect();
+ layerSettings->alpha = 1.0f;
+
+ layerSettings->sourceDataspace = translateDataspace(
+ static_cast<ui::Dataspace>(mBufferItem.mDataSpace));
+
+ // from BufferLayer
+ layerSettings->source.buffer.buffer = mBufferItem.mGraphicBuffer;
+ layerSettings->source.buffer.isOpaque = true;
+ layerSettings->source.buffer.fence = mBufferItem.mFence;
+ layerSettings->source.buffer.textureName = textureName;
+ layerSettings->source.buffer.usePremultipliedAlpha = false;
+ layerSettings->source.buffer.isY410BT2020 = isHdrY410(mBufferItem);
+
+ // Set filtering to false since the capture itself doesn't involve
+ // any scaling, metadata retriever JNI is scaling the bitmap if
+ // display size is different from decoded size. If that scaling
+ // needs to be handled by server side, consider enable this based
+ // display size vs decoded size.
+ const bool useFiltering = false;
+ layerSettings->source.buffer.useTextureFiltering = useFiltering;
+
+ float textureMatrix[16];
+ GLConsumer::computeTransformMatrix(
+ textureMatrix, mBufferItem.mGraphicBuffer,
+ mBufferItem.mCrop, mBufferItem.mTransform, useFiltering);
+
+ // Flip y-coordinates because GLConsumer expects OpenGL convention.
+ mat4 tr = mat4::translate(vec4(.5, .5, 0, 1)) * mat4::scale(vec4(1, -1, 1, 1)) *
+ mat4::translate(vec4(-.5, -.5, 0, 1));
+
+ layerSettings->source.buffer.textureTransform =
+ mat4(static_cast<const float*>(textureMatrix)) * tr;
+}
+
+status_t FrameCaptureLayer::init() {
+ if (FrameCaptureProcessor::getInstance() == nullptr) {
+ ALOGE("failed to get capture processor");
+ return ERROR_UNSUPPORTED;
+ }
+
+ // Mimic surfaceflinger's BufferQueueLayer::onFirstRef() to create a
+ // BufferQueue for encoder output
+ sp<IGraphicBufferProducer> producer;
+ sp<IGraphicBufferConsumer> consumer;
+
+ BufferQueue::createBufferQueue(&producer, &consumer);
+ // We don't need HW_COMPOSER usage since we're not using hwc to compose.
+ // The buffer is only used as a GL texture.
+ consumer->setConsumerUsageBits(GraphicBuffer::USAGE_HW_TEXTURE);
+ consumer->setConsumerName(String8("FrameDecoder"));
+
+ status_t err = consumer->consumerConnect(
+ new BufferQueue::ProxyConsumerListener(this), false);
+ if (NO_ERROR != err) {
+ ALOGE("Error connecting to BufferQueue: %s (%d)", strerror(-err), err);
+ return err;
+ }
+
+ mConsumer = consumer;
+ mSurface = new Surface(producer);
+
+ return OK;
+}
+
+status_t FrameCaptureLayer::capture(const ui::PixelFormat reqPixelFormat,
+ const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer) {
+ ALOGV("capture: reqPixelFormat %d, crop {%d, %d, %d, %d}", reqPixelFormat,
+ sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);
+
+ BufferItem bi;
+ status_t err = acquireBuffer(&bi);
+ if (err != OK) {
+ return err;
+ }
+
+ // create out buffer
+ const uint32_t usage =
+ GRALLOC_USAGE_SW_READ_OFTEN | GRALLOC_USAGE_SW_WRITE_OFTEN |
+ GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE;
+ sp<GraphicBuffer> buffer = new GraphicBuffer(
+ sourceCrop.getWidth(), sourceCrop.getHeight(),
+ static_cast<android_pixel_format>(reqPixelFormat),
+ 1, usage, std::string("thumbnail"));
+
+ err = FrameCaptureProcessor::getInstance()->capture(
+ new BufferLayer(bi), sourceCrop, buffer);
+ if (err == OK) {
+ *outBuffer = buffer;
+ }
+
+ (void)releaseBuffer(bi);
+ return err;
+}
+
+FrameCaptureLayer::FrameCaptureLayer() : mFrameAvailable(false) {}
+
+void FrameCaptureLayer::onFrameAvailable(const BufferItem& /*item*/) {
+ ALOGV("onFrameAvailable");
+ Mutex::Autolock _lock(mLock);
+
+ mFrameAvailable = true;
+ mCondition.signal();
+}
+
+void FrameCaptureLayer::onBuffersReleased() {
+ ALOGV("onBuffersReleased");
+ Mutex::Autolock _lock(mLock);
+
+ uint64_t mask = 0;
+ mConsumer->getReleasedBuffers(&mask);
+ for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
+ if (mask & (1ULL << i)) {
+ mSlotToBufferMap[i] = nullptr;
+ }
+ }
+}
+
+void FrameCaptureLayer::onSidebandStreamChanged() {
+ ALOGV("onSidebandStreamChanged");
+}
+
+status_t FrameCaptureLayer::acquireBuffer(BufferItem *bi) {
+ ALOGV("acquireBuffer");
+ Mutex::Autolock _lock(mLock);
+
+ if (!mFrameAvailable) {
+ // The output buffer is already released to the codec at this point.
+ // Use a small timeout of 100ms in case the buffer hasn't arrived
+ // at the consumer end of the output surface yet.
+ if (mCondition.waitRelative(mLock, kAcquireBufferTimeoutNs) != OK) {
+ ALOGE("wait for buffer timed out");
+ return TIMED_OUT;
+ }
+ }
+ mFrameAvailable = false;
+
+ status_t err = mConsumer->acquireBuffer(bi, 0);
+ if (err != OK) {
+ ALOGE("failed to acquire buffer!");
+ return err;
+ }
+
+ if (bi->mGraphicBuffer != nullptr) {
+ mSlotToBufferMap[bi->mSlot] = bi->mGraphicBuffer;
+ } else {
+ bi->mGraphicBuffer = mSlotToBufferMap[bi->mSlot];
+ }
+
+ if (bi->mGraphicBuffer == nullptr) {
+ ALOGE("acquired null buffer!");
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t FrameCaptureLayer::releaseBuffer(const BufferItem &bi) {
+ ALOGV("releaseBuffer");
+ Mutex::Autolock _lock(mLock);
+
+ return mConsumer->releaseBuffer(bi.mSlot, bi.mFrameNumber,
+ EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
+}
+
+} // namespace android
diff --git a/media/libstagefright/FrameCaptureProcessor.cpp b/media/libstagefright/FrameCaptureProcessor.cpp
new file mode 100644
index 0000000..c517e33
--- /dev/null
+++ b/media/libstagefright/FrameCaptureProcessor.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameCaptureProcessor"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
+#include <media/stagefright/MediaErrors.h>
+#include <renderengine/RenderEngine.h>
+#include <ui/Fence.h>
+#include <ui/PixelFormat.h>
+#include <utils/Log.h>
+
+namespace android {
+
+//static
+Mutex FrameCaptureProcessor::sLock;
+//static
+sp<FrameCaptureProcessor> FrameCaptureProcessor::sInstance;
+
+//static
+sp<FrameCaptureProcessor> FrameCaptureProcessor::getInstance() {
+ Mutex::Autolock _l(sLock);
+ if (sInstance == nullptr) {
+ sInstance = new FrameCaptureProcessor();
+ sInstance->createRenderEngine();
+ }
+ // init only once, if failed nullptr will be returned afterwards.
+ return (sInstance->initCheck() == OK) ? sInstance : nullptr;
+}
+
+//static
+status_t FrameCaptureProcessor::PostAndAwaitResponse(
+ const sp<AMessage> &msg, sp<AMessage> *response) {
+ status_t err = msg->postAndAwaitResponse(response);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if (!(*response)->findInt32("err", &err)) {
+ err = OK;
+ }
+
+ return err;
+}
+
+//static
+void FrameCaptureProcessor::PostReplyWithError(
+ const sp<AReplyToken> &replyID, status_t err) {
+ sp<AMessage> response = new AMessage;
+ if (err != OK) {
+ response->setInt32("err", err);
+ }
+ response->postReply(replyID);
+}
+
+FrameCaptureProcessor::FrameCaptureProcessor()
+ : mInitStatus(NO_INIT), mTextureName(0) {}
+
+FrameCaptureProcessor::~FrameCaptureProcessor() {
+ if (mLooper != nullptr) {
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+ }
+}
+
+void FrameCaptureProcessor::createRenderEngine() {
+ // this method should only be called once, immediately after ctor
+ CHECK(mInitStatus == NO_INIT);
+
+ mLooper = new ALooper();
+ mLooper->setName("capture_looper");
+ mLooper->start(); // default priority
+ mLooper->registerHandler(this);
+
+ sp<AMessage> response;
+ status_t err = PostAndAwaitResponse(new AMessage(kWhatCreate, this), &response);
+ if (err != OK) {
+ mInitStatus = ERROR_UNSUPPORTED;
+
+ mLooper->unregisterHandler(id());
+ mLooper->stop();
+ mLooper.clear();
+ return;
+ }
+
+ // only need one texture name
+ mRE->genTextures(1, &mTextureName);
+
+ mInitStatus = OK;
+}
+
+status_t FrameCaptureProcessor::capture(
+ const sp<Layer> &layer, const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
+ sp<AMessage> msg = new AMessage(kWhatCapture, this);
+ msg->setObject("layer", layer);
+ msg->setRect("crop", sourceCrop.left, sourceCrop.top, sourceCrop.right, sourceCrop.bottom);
+ msg->setObject("buffer", buffer);
+ sp<AMessage> response;
+ return PostAndAwaitResponse(msg, &response);
+}
+
+status_t FrameCaptureProcessor::onCreate() {
+ mRE = renderengine::RenderEngine::create(
+ renderengine::RenderEngineCreationArgs::Builder()
+ .setPixelFormat(static_cast<int>(ui::PixelFormat::RGBA_8888))
+ .setImageCacheSize(2 /*maxFrameBufferAcquiredBuffers*/)
+ .setUseColorManagerment(true)
+ .setEnableProtectedContext(false)
+ .setPrecacheToneMapperShaderOnly(true)
+ .setContextPriority(renderengine::RenderEngine::ContextPriority::LOW)
+ .build());
+
+ if (mRE == nullptr) {
+ return ERROR_UNSUPPORTED;
+ }
+ return OK;
+}
+
+status_t FrameCaptureProcessor::onCapture(const sp<Layer> &layer,
+ const Rect &sourceCrop, const sp<GraphicBuffer> &buffer) {
+ renderengine::DisplaySettings clientCompositionDisplay;
+ std::vector<renderengine::LayerSettings> clientCompositionLayers;
+
+ clientCompositionDisplay.physicalDisplay = sourceCrop;
+ clientCompositionDisplay.clip = sourceCrop;
+
+ clientCompositionDisplay.outputDataspace = ui::Dataspace::V0_SRGB;
+ clientCompositionDisplay.maxLuminance = sDefaultMaxLumiance;
+ clientCompositionDisplay.clearRegion = Region::INVALID_REGION;
+
+ // from Layer && BufferLayer
+ renderengine::LayerSettings layerSettings;
+
+ layer->getLayerSettings(sourceCrop, mTextureName, &layerSettings);
+
+ clientCompositionLayers.push_back(layerSettings);
+
+ // Use an empty fence for the buffer fence, since we just created the buffer so
+ // there is no need for synchronization with the GPU.
+ base::unique_fd bufferFence;
+ base::unique_fd drawFence;
+ mRE->useProtectedContext(false);
+ status_t err = mRE->drawLayers(clientCompositionDisplay, clientCompositionLayers, buffer.get(),
+ /*useFramebufferCache=*/false, std::move(bufferFence), &drawFence);
+
+ sp<Fence> fence = new Fence(std::move(drawFence));
+
+ if (err != OK) {
+ ALOGE("drawLayers returned err %d", err);
+ return err;
+ }
+
+ err = fence->wait(500);
+ if (err != OK) {
+ ALOGW("wait for fence returned err %d", err);
+ }
+ return OK;
+}
+
+void FrameCaptureProcessor::onMessageReceived(const sp<AMessage> &msg) {
+ switch (msg->what()) {
+ case kWhatCreate:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ status_t err = onCreate();
+
+ PostReplyWithError(replyID, err);
+ break;
+ }
+ case kWhatCapture:
+ {
+ sp<AReplyToken> replyID;
+ CHECK(msg->senderAwaitsResponse(&replyID));
+
+ sp<RefBase> layerObj, bufferObj;
+ int32_t left, top, right, bottom;
+ CHECK(msg->findObject("layer", &layerObj));
+ CHECK(msg->findRect("crop", &left, &top, &right, &bottom));
+ CHECK(msg->findObject("buffer", &bufferObj));
+
+ sp<GraphicBuffer> buffer = static_cast<GraphicBuffer*>(bufferObj.get());
+ sp<Layer> layer = static_cast<Layer*>(layerObj.get());
+
+ PostReplyWithError(replyID,
+ onCapture(layer, Rect(left, top, right, bottom), buffer));
+
+ break;
+ }
+ default:
+ TRESPASS();
+ }
+}
+
+} // namespace android
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index c6ec6de..d75b317 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -18,12 +18,12 @@
#define LOG_TAG "FrameDecoder"
#include "include/FrameDecoder.h"
+#include "include/FrameCaptureLayer.h"
#include <binder/MemoryBase.h>
#include <binder/MemoryHeapBase.h>
#include <gui/Surface.h>
-#include <gui/SurfaceComposerClient.h>
#include <inttypes.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IMediaSource.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/avc_utils.h>
@@ -31,6 +31,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/FrameCaptureProcessor.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaDefs.h>
@@ -43,6 +44,7 @@
static const int64_t kBufferTimeOutUs = 10000LL; // 10 msec
static const size_t kRetryCount = 50; // must be >0
+static const int64_t kDefaultSampleDurationUs = 33333LL; // 33ms
sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
@@ -98,7 +100,7 @@
ALOGE("not enough memory for VideoFrame size=%zu", size);
return NULL;
}
- VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->pointer());
+ VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->unsecurePointer());
frameCopy->init(frame, iccData, iccSize);
return frameMem;
@@ -199,7 +201,19 @@
tileWidth = tileHeight = 0;
}
}
- return allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
+
+ sp<IMemory> metaMem = allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp);
+
+ // try to fill sequence meta's duration based on average frame rate,
+ // default to 33ms if frame rate is unavailable.
+ int32_t frameRate;
+ VideoFrame* meta = static_cast<VideoFrame*>(metaMem->unsecurePointer());
+ if (trackMeta->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
+ meta->mDurationUs = 1000000LL / frameRate;
+ } else {
+ meta->mDurationUs = kDefaultSampleDurationUs;
+ }
+ return metaMem;
}
FrameDecoder::FrameDecoder(
@@ -443,7 +457,8 @@
mFrame(NULL),
mIsAvcOrHevc(false),
mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
- mTargetTimeUs(-1LL) {
+ mTargetTimeUs(-1LL),
+ mDefaultSampleDurationUs(0) {
}
sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
@@ -498,7 +513,7 @@
}
if (isHDR(videoFormat)) {
- *window = initSurfaceControl();
+ *window = initSurface();
if (*window == NULL) {
ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
} else {
@@ -506,6 +521,13 @@
}
}
+ int32_t frameRate;
+ if (trackMeta()->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
+ mDefaultSampleDurationUs = 1000000LL / frameRate;
+ } else {
+ mDefaultSampleDurationUs = kDefaultSampleDurationUs;
+ }
+
return videoFormat;
}
@@ -526,6 +548,12 @@
// option, in which case we need to actually decode to targetTimeUs.
*flags |= MediaCodec::BUFFER_FLAG_EOS;
}
+ int64_t durationUs;
+ if (sampleMeta.findInt64(kKeyDuration, &durationUs)) {
+ mSampleDurations.push_back(durationUs);
+ } else {
+ mSampleDurations.push_back(mDefaultSampleDurationUs);
+ }
return OK;
}
@@ -533,6 +561,11 @@
const sp<MediaCodecBuffer> &videoFrameBuffer,
const sp<AMessage> &outputFormat,
int64_t timeUs, bool *done) {
+ int64_t durationUs = mDefaultSampleDurationUs;
+ if (!mSampleDurations.empty()) {
+ durationUs = *mSampleDurations.begin();
+ mSampleDurations.erase(mSampleDurations.begin());
+ }
bool shouldOutput = (mTargetTimeUs < 0LL) || (timeUs >= mTargetTimeUs);
// If this is not the target frame, skip color convert.
@@ -557,7 +590,7 @@
}
if (!outputFormat->findInt32("stride", &stride)) {
- if (mSurfaceControl == NULL) {
+ if (mCaptureLayer == NULL) {
ALOGE("format must have stride for byte buffer mode: %s",
outputFormat->debugString().c_str());
return ERROR_MALFORMED;
@@ -581,14 +614,16 @@
0,
0,
dstBpp(),
- mSurfaceControl != nullptr /*allocRotated*/);
- mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+ mCaptureLayer != nullptr /*allocRotated*/);
+ mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
setFrame(frameMem);
}
- if (mSurfaceControl != nullptr) {
- return captureSurfaceControl();
+ mFrame->mDurationUs = durationUs;
+
+ if (mCaptureLayer != nullptr) {
+ return captureSurface();
}
ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
@@ -621,70 +656,26 @@
return ERROR_UNSUPPORTED;
}
-sp<Surface> VideoFrameDecoder::initSurfaceControl() {
- sp<SurfaceComposerClient> client = new SurfaceComposerClient();
- if (client->initCheck() != NO_ERROR) {
- ALOGE("failed to get SurfaceComposerClient");
- return NULL;
+sp<Surface> VideoFrameDecoder::initSurface() {
+ // create the consumer listener interface, and hold sp so that this
+ // interface lives as long as the GraphicBufferSource.
+ sp<FrameCaptureLayer> captureLayer = new FrameCaptureLayer();
+ if (captureLayer->init() != OK) {
+ ALOGE("failed to init capture layer");
+ return nullptr;
}
+ mCaptureLayer = captureLayer;
- // create a container layer to hold the capture layer, so that we can
- // use full frame drop. If without the container, the crop will be set
- // to display size.
- sp<SurfaceControl> parent = client->createSurface(
- String8("parent"),
- 0 /* width */, 0 /* height */,
- PIXEL_FORMAT_RGBA_8888,
- ISurfaceComposerClient::eFXSurfaceContainer );
-
- if (!parent) {
- ALOGE("failed to get surface control parent");
- return NULL;
- }
-
- // create the surface with unknown size 1x1 for now, real size will
- // be set before the capture when we have output format info.
- sp<SurfaceControl> surfaceControl = client->createSurface(
- String8("thumbnail"),
- 1 /* width */, 1 /* height */,
- PIXEL_FORMAT_RGBA_8888,
- ISurfaceComposerClient::eFXSurfaceBufferQueue,
- parent.get());
-
- if (!surfaceControl) {
- ALOGE("failed to get surface control");
- return NULL;
- }
-
- SurfaceComposerClient::Transaction t;
- t.hide(parent)
- .show(surfaceControl)
- .apply(true);
-
- mSurfaceControl = surfaceControl;
- mParent = parent;
-
- return surfaceControl->getSurface();
+ return captureLayer->getSurface();
}
-status_t VideoFrameDecoder::captureSurfaceControl() {
- // set the layer size to the output size before the capture
- SurfaceComposerClient::Transaction()
- .setSize(mSurfaceControl, mFrame->mWidth, mFrame->mHeight)
- .apply(true);
-
+status_t VideoFrameDecoder::captureSurface() {
sp<GraphicBuffer> outBuffer;
- status_t err = ScreenshotClient::captureChildLayers(
- mParent->getHandle(),
- ui::Dataspace::V0_SRGB,
- captureFormat(),
- Rect(0, 0, mFrame->mWidth, mFrame->mHeight),
- {},
- 1.0f /*frameScale*/,
- &outBuffer);
+ status_t err = mCaptureLayer->capture(
+ captureFormat(), Rect(0, 0, mFrame->mWidth, mFrame->mHeight), &outBuffer);
if (err != OK) {
- ALOGE("failed to captureLayers: err %d", err);
+ ALOGE("failed to capture layer (err %d)", err);
return err;
}
@@ -861,7 +852,7 @@
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp());
- mFrame = static_cast<VideoFrame*>(frameMem->pointer());
+ mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
setFrame(frameMem);
}
@@ -883,12 +874,6 @@
}
converter.setSrcColorSpace(standard, range, transfer);
- int32_t dstLeft, dstTop, dstRight, dstBottom;
- dstLeft = mTilesDecoded % mGridCols * width;
- dstTop = mTilesDecoded / mGridCols * height;
- dstRight = dstLeft + width - 1;
- dstBottom = dstTop + height - 1;
-
int32_t crop_left, crop_top, crop_right, crop_bottom;
if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
crop_left = crop_top = 0;
@@ -896,15 +881,25 @@
crop_bottom = height - 1;
}
+ int32_t crop_width, crop_height;
+ crop_width = crop_right - crop_left + 1;
+ crop_height = crop_bottom - crop_top + 1;
+
+ int32_t dstLeft, dstTop, dstRight, dstBottom;
+ dstLeft = mTilesDecoded % mGridCols * crop_width;
+ dstTop = mTilesDecoded / mGridCols * crop_height;
+ dstRight = dstLeft + crop_width - 1;
+ dstBottom = dstTop + crop_height - 1;
+
// apply crop on bottom-right
// TODO: need to move this into the color converter itself.
if (dstRight >= mWidth) {
- crop_right = mWidth - dstLeft - 1;
- dstRight = dstLeft + crop_right;
+ crop_right = crop_left + mWidth - dstLeft - 1;
+ dstRight = mWidth - 1;
}
if (dstBottom >= mHeight) {
- crop_bottom = mHeight - dstTop - 1;
- dstBottom = dstTop + crop_bottom;
+ crop_bottom = crop_top + mHeight - dstTop - 1;
+ dstBottom = mHeight - 1;
}
*done = (++mTilesDecoded >= mTargetTiles);
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index f130c9b..bf4e7de 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2213,8 +2213,10 @@
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
mMeta->findData(kKeyHVCC, &type, &data, &size);
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
- || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ mMeta->findData(kKeyDVCC, &type, &data, &size);
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
if (mMeta->findData(kKeyESDS, &type, &data, &size)) {
ESDS esds(data, size);
if (esds.getCodecSpecificInfo(&data, &size) == OK &&
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index ae0fa3c..14564c9 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -21,13 +21,14 @@
#include <inttypes.h>
#include <stdlib.h>
-#include "include/SecureBuffer.h"
-#include "include/SharedMemoryBuffer.h"
#include "include/SoftwareRenderer.h"
#include "StagefrightPluginLoader.h"
#include <android/hardware/cas/native/1.0/IDescrambler.h>
+#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
+#include <android/media/BnResourceManagerClient.h>
+#include <android/media/IResourceManagerService.h>
#include <binder/IMemory.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -35,11 +36,11 @@
#include <cutils/properties.h>
#include <gui/BufferQueue.h>
#include <gui/Surface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IOMX.h>
-#include <media/IResourceManagerService.h>
#include <media/MediaCodecBuffer.h>
#include <media/MediaAnalyticsItem.h>
+#include <media/MediaResource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -48,6 +49,7 @@
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/ACodec.h>
+#include <media/stagefright/BatteryChecker.h>
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
@@ -62,6 +64,10 @@
namespace android {
+using ::android::binder::Status;
+using ::android::media::BnResourceManagerClient;
+using ::android::media::IResourceManagerService;
+
// key for media statistics
static const char *kCodecKeyName = "codec";
// attrs for media statistics
@@ -116,18 +122,18 @@
static const int kMaxRetry = 2;
static const int kMaxReclaimWaitTimeInUs = 500000; // 0.5s
static const int kNumBuffersAlign = 16;
-static const int kBatteryStatsTimeoutUs = 3000000ll; // 3 seconds
////////////////////////////////////////////////////////////////////////////////
struct ResourceManagerClient : public BnResourceManagerClient {
explicit ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
- virtual bool reclaimResource() {
+ Status reclaimResource(bool* _aidl_return) override {
sp<MediaCodec> codec = mMediaCodec.promote();
if (codec == NULL) {
// codec is already gone.
- return true;
+ *_aidl_return = true;
+ return Status::ok();
}
status_t err = codec->reclaim();
if (err == WOULD_BLOCK) {
@@ -139,22 +145,23 @@
if (err != OK) {
ALOGW("ResourceManagerClient failed to release codec with err %d", err);
}
- return (err == OK);
+ *_aidl_return = (err == OK);
+ return Status::ok();
}
- virtual String8 getName() {
- String8 ret;
+ Status getName(::std::string* _aidl_return) override {
+ _aidl_return->clear();
sp<MediaCodec> codec = mMediaCodec.promote();
if (codec == NULL) {
// codec is already gone.
- return ret;
+ return Status::ok();
}
AString name;
if (codec->getName(&name) == OK) {
- ret.setTo(name.c_str());
+ *_aidl_return = name.c_str();
}
- return ret;
+ return Status::ok();
}
protected:
@@ -166,6 +173,35 @@
DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
};
+struct MediaCodec::ResourceManagerServiceProxy : public IBinder::DeathRecipient {
+ ResourceManagerServiceProxy(pid_t pid, uid_t uid);
+ ~ResourceManagerServiceProxy();
+
+ void init();
+
+ // implements DeathRecipient
+ virtual void binderDied(const wp<IBinder>& /*who*/);
+
+ void addResource(
+ int64_t clientId,
+ const sp<IResourceManagerClient> &client,
+ const std::vector<MediaResourceParcel> &resources);
+
+ void removeResource(
+ int64_t clientId,
+ const std::vector<MediaResourceParcel> &resources);
+
+ void removeClient(int64_t clientId);
+
+ bool reclaimResource(const std::vector<MediaResourceParcel> &resources);
+
+private:
+ Mutex mLock;
+ sp<android::media::IResourceManagerService> mService;
+ pid_t mPid;
+ uid_t mUid;
+};
+
MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy(
pid_t pid, uid_t uid)
: mPid(pid), mUid(uid) {
@@ -200,7 +236,7 @@
void MediaCodec::ResourceManagerServiceProxy::addResource(
int64_t clientId,
const sp<IResourceManagerClient> &client,
- const Vector<MediaResource> &resources) {
+ const std::vector<MediaResourceParcel> &resources) {
Mutex::Autolock _l(mLock);
if (mService == NULL) {
return;
@@ -210,7 +246,7 @@
void MediaCodec::ResourceManagerServiceProxy::removeResource(
int64_t clientId,
- const Vector<MediaResource> &resources) {
+ const std::vector<MediaResourceParcel> &resources) {
Mutex::Autolock _l(mLock);
if (mService == NULL) {
return;
@@ -227,12 +263,14 @@
}
bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
- const Vector<MediaResource> &resources) {
+ const std::vector<MediaResourceParcel> &resources) {
Mutex::Autolock _l(mLock);
if (mService == NULL) {
return false;
}
- return mService->reclaimResource(mPid, resources);
+ bool success;
+ Status status = mService->reclaimResource(mPid, resources, &success);
+ return status.isOk() && success;
}
////////////////////////////////////////////////////////////////////////////////
@@ -506,7 +544,7 @@
sp<IOMX> omx = client.interface();
sp<IGraphicBufferProducer> bufferProducer;
- sp<IGraphicBufferSource> bufferSource;
+ sp<hardware::media::omx::V1_0::IGraphicBufferSource> bufferSource;
status_t err = omx->createInputSurface(&bufferProducer, &bufferSource);
@@ -527,7 +565,7 @@
mFlags(0),
mStickyError(OK),
mSoftRenderer(NULL),
- mAnalyticsItem(NULL),
+ mMetricsHandle(0),
mIsVideo(false),
mVideoWidth(0),
mVideoHeight(0),
@@ -539,10 +577,7 @@
mHaveInputSurface(false),
mHavePendingInputBuffers(false),
mCpuBoostRequested(false),
- mLatencyUnknown(0),
- mLastActivityTimeUs(-1ll),
- mBatteryStatNotified(false),
- mBatteryCheckerGeneration(0) {
+ mLatencyUnknown(0) {
if (uid == kNoUid) {
mUid = IPCThreadState::self()->getCallingUid();
} else {
@@ -551,19 +586,19 @@
mResourceManagerClient = new ResourceManagerClient(this);
mResourceManagerService = new ResourceManagerServiceProxy(pid, mUid);
- initAnalyticsItem();
+ initMediametrics();
}
MediaCodec::~MediaCodec() {
CHECK_EQ(mState, UNINITIALIZED);
mResourceManagerService->removeClient(getId(mResourceManagerClient));
- flushAnalyticsItem();
+ flushMediametrics();
}
-void MediaCodec::initAnalyticsItem() {
- if (mAnalyticsItem == NULL) {
- mAnalyticsItem = MediaAnalyticsItem::create(kCodecKeyName);
+void MediaCodec::initMediametrics() {
+ if (mMetricsHandle == 0) {
+ mMetricsHandle = mediametrics_create(kCodecKeyName);
}
mLatencyHist.setup(kLatencyHistBuckets, kLatencyHistWidth, kLatencyHistFloor);
@@ -577,38 +612,39 @@
}
}
-void MediaCodec::updateAnalyticsItem() {
- ALOGV("MediaCodec::updateAnalyticsItem");
- if (mAnalyticsItem == NULL) {
+void MediaCodec::updateMediametrics() {
+ ALOGV("MediaCodec::updateMediametrics");
+ if (mMetricsHandle == 0) {
return;
}
+
if (mLatencyHist.getCount() != 0 ) {
- mAnalyticsItem->setInt64(kCodecLatencyMax, mLatencyHist.getMax());
- mAnalyticsItem->setInt64(kCodecLatencyMin, mLatencyHist.getMin());
- mAnalyticsItem->setInt64(kCodecLatencyAvg, mLatencyHist.getAvg());
- mAnalyticsItem->setInt64(kCodecLatencyCount, mLatencyHist.getCount());
+ mediametrics_setInt64(mMetricsHandle, kCodecLatencyMax, mLatencyHist.getMax());
+ mediametrics_setInt64(mMetricsHandle, kCodecLatencyMin, mLatencyHist.getMin());
+ mediametrics_setInt64(mMetricsHandle, kCodecLatencyAvg, mLatencyHist.getAvg());
+ mediametrics_setInt64(mMetricsHandle, kCodecLatencyCount, mLatencyHist.getCount());
if (kEmitHistogram) {
// and the histogram itself
std::string hist = mLatencyHist.emit();
- mAnalyticsItem->setCString(kCodecLatencyHist, hist.c_str());
+ mediametrics_setCString(mMetricsHandle, kCodecLatencyHist, hist.c_str());
}
}
if (mLatencyUnknown > 0) {
- mAnalyticsItem->setInt64(kCodecLatencyUnknown, mLatencyUnknown);
+ mediametrics_setInt64(mMetricsHandle, kCodecLatencyUnknown, mLatencyUnknown);
}
#if 0
// enable for short term, only while debugging
- updateEphemeralAnalytics(mAnalyticsItem);
+ updateEphemeralMediametrics(mMetricsHandle);
#endif
}
-void MediaCodec::updateEphemeralAnalytics(MediaAnalyticsItem *item) {
- ALOGD("MediaCodec::updateEphemeralAnalytics()");
+void MediaCodec::updateEphemeralMediametrics(mediametrics_handle_t item) {
+ ALOGD("MediaCodec::updateEphemeralMediametrics()");
- if (item == NULL) {
+ if (item == 0) {
return;
}
@@ -631,28 +667,27 @@
// spit the data (if any) into the supplied analytics record
if (recentHist.getCount()!= 0 ) {
- item->setInt64(kCodecRecentLatencyMax, recentHist.getMax());
- item->setInt64(kCodecRecentLatencyMin, recentHist.getMin());
- item->setInt64(kCodecRecentLatencyAvg, recentHist.getAvg());
- item->setInt64(kCodecRecentLatencyCount, recentHist.getCount());
+ mediametrics_setInt64(item, kCodecRecentLatencyMax, recentHist.getMax());
+ mediametrics_setInt64(item, kCodecRecentLatencyMin, recentHist.getMin());
+ mediametrics_setInt64(item, kCodecRecentLatencyAvg, recentHist.getAvg());
+ mediametrics_setInt64(item, kCodecRecentLatencyCount, recentHist.getCount());
if (kEmitHistogram) {
// and the histogram itself
std::string hist = recentHist.emit();
- item->setCString(kCodecRecentLatencyHist, hist.c_str());
+ mediametrics_setCString(item, kCodecRecentLatencyHist, hist.c_str());
}
}
}
-void MediaCodec::flushAnalyticsItem() {
- updateAnalyticsItem();
- if (mAnalyticsItem != NULL) {
- // don't log empty records
- if (mAnalyticsItem->count() > 0) {
- mAnalyticsItem->selfrecord();
+void MediaCodec::flushMediametrics() {
+ updateMediametrics();
+ if (mMetricsHandle != 0) {
+ if (mediametrics_count(mMetricsHandle) > 0) {
+ mediametrics_selfRecord(mMetricsHandle);
}
- delete mAnalyticsItem;
- mAnalyticsItem = NULL;
+ mediametrics_delete(mMetricsHandle);
+ mMetricsHandle = 0;
}
}
@@ -755,7 +790,11 @@
return;
}
- scheduleBatteryCheckerIfNeeded();
+ if (mBatteryChecker != nullptr) {
+ mBatteryChecker->onCodecActivity([this] () {
+ addResource(MediaResource::VideoBatteryResource());
+ });
+ }
const int64_t nowNs = systemTime(SYSTEM_TIME_MONOTONIC);
BufferFlightTiming_t startdata = { presentationUs, nowNs };
@@ -791,7 +830,11 @@
return;
}
- scheduleBatteryCheckerIfNeeded();
+ if (mBatteryChecker != nullptr) {
+ mBatteryChecker->onCodecActivity([this] () {
+ addResource(MediaResource::VideoBatteryResource());
+ });
+ }
BufferFlightTiming_t startdata;
bool valid = false;
@@ -976,18 +1019,19 @@
// ".secure"
msg->setString("name", name);
- if (mAnalyticsItem != NULL) {
- mAnalyticsItem->setCString(kCodecCodec, name.c_str());
- mAnalyticsItem->setCString(kCodecMode, mIsVideo ? kCodecModeVideo : kCodecModeAudio);
+ if (mMetricsHandle != 0) {
+ mediametrics_setCString(mMetricsHandle, kCodecCodec, name.c_str());
+ mediametrics_setCString(mMetricsHandle, kCodecMode,
+ mIsVideo ? kCodecModeVideo : kCodecModeAudio);
+ }
+
+ if (mIsVideo) {
+ mBatteryChecker = new BatteryChecker(new AMessage(kWhatCheckBatteryStats, this));
}
status_t err;
- Vector<MediaResource> resources;
- MediaResource::Type type =
- secureCodec ? MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
- MediaResource::SubType subtype =
- mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
- resources.push_back(MediaResource(type, subtype, 1));
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource::CodecResource(secureCodec, mIsVideo));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -1035,16 +1079,17 @@
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
- if (mAnalyticsItem != NULL) {
+ if (mMetricsHandle != 0) {
int32_t profile = 0;
if (format->findInt32("profile", &profile)) {
- mAnalyticsItem->setInt32(kCodecProfile, profile);
+ mediametrics_setInt32(mMetricsHandle, kCodecProfile, profile);
}
int32_t level = 0;
if (format->findInt32("level", &level)) {
- mAnalyticsItem->setInt32(kCodecLevel, level);
+ mediametrics_setInt32(mMetricsHandle, kCodecLevel, level);
}
- mAnalyticsItem->setInt32(kCodecEncoder, (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+ mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
+ (flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
}
if (mIsVideo) {
@@ -1054,17 +1099,17 @@
mRotationDegrees = 0;
}
- if (mAnalyticsItem != NULL) {
- mAnalyticsItem->setInt32(kCodecWidth, mVideoWidth);
- mAnalyticsItem->setInt32(kCodecHeight, mVideoHeight);
- mAnalyticsItem->setInt32(kCodecRotation, mRotationDegrees);
+ if (mMetricsHandle != 0) {
+ mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
+ mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
+ mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
int32_t maxWidth = 0;
if (format->findInt32("max-width", &maxWidth)) {
- mAnalyticsItem->setInt32(kCodecMaxWidth, maxWidth);
+ mediametrics_setInt32(mMetricsHandle, kCodecMaxWidth, maxWidth);
}
int32_t maxHeight = 0;
if (format->findInt32("max-height", &maxHeight)) {
- mAnalyticsItem->setInt32(kCodecMaxHeight, maxHeight);
+ mediametrics_setInt32(mMetricsHandle, kCodecMaxHeight, maxHeight);
}
}
@@ -1086,8 +1131,8 @@
} else {
msg->setPointer("descrambler", descrambler.get());
}
- if (mAnalyticsItem != NULL) {
- mAnalyticsItem->setInt32(kCodecCrypto, 1);
+ if (mMetricsHandle != 0) {
+ mediametrics_setInt32(mMetricsHandle, kCodecCrypto, 1);
}
} else if (mFlags & kFlagIsSecure) {
ALOGW("Crypto or descrambler should be given for secure codec");
@@ -1097,15 +1142,11 @@
mConfigureMsg = msg;
status_t err;
- Vector<MediaResource> resources;
- MediaResource::Type type = (mFlags & kFlagIsSecure) ?
- MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
- MediaResource::SubType subtype =
- mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
- resources.push_back(MediaResource(type, subtype, 1));
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
+ resources.push_back(MediaResource::GraphicMemoryResource(1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -1230,18 +1271,16 @@
return size;
}
-void MediaCodec::addResource(
- MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(type, subtype, value));
+void MediaCodec::addResource(const MediaResourceParcel &resource) {
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(resource);
mResourceManagerService->addResource(
getId(mResourceManagerClient), mResourceManagerClient, resources);
}
-void MediaCodec::removeResource(
- MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(type, subtype, value));
+void MediaCodec::removeResource(const MediaResourceParcel &resource) {
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(resource);
mResourceManagerService->removeResource(getId(mResourceManagerClient), resources);
}
@@ -1249,15 +1288,11 @@
sp<AMessage> msg = new AMessage(kWhatStart, this);
status_t err;
- Vector<MediaResource> resources;
- MediaResource::Type type = (mFlags & kFlagIsSecure) ?
- MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
- MediaResource::SubType subtype =
- mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
- resources.push_back(MediaResource(type, subtype, 1));
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
+ resources.push_back(MediaResource::GraphicMemoryResource(1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -1552,22 +1587,22 @@
return OK;
}
-status_t MediaCodec::getMetrics(MediaAnalyticsItem * &reply) {
+status_t MediaCodec::getMetrics(mediametrics_handle_t &reply) {
- reply = NULL;
+ reply = 0;
// shouldn't happen, but be safe
- if (mAnalyticsItem == NULL) {
+ if (mMetricsHandle == 0) {
return UNKNOWN_ERROR;
}
// update any in-flight data that's not carried within the record
- updateAnalyticsItem();
+ updateMediametrics();
// send it back to the caller.
- reply = mAnalyticsItem->dup();
+ reply = mediametrics_dup(mMetricsHandle);
- updateEphemeralAnalytics(reply);
+ updateEphemeralMediametrics(reply);
return OK;
}
@@ -1699,26 +1734,33 @@
totalPixel = width * height;
}
if (totalPixel >= 1920 * 1080) {
- addResource(MediaResource::kCpuBoost,
- MediaResource::kUnspecifiedSubType, 1);
+ addResource(MediaResource::CpuBoostResource());
mCpuBoostRequested = true;
}
}
}
-void MediaCodec::scheduleBatteryCheckerIfNeeded() {
- if (!mIsVideo || !isExecuting()) {
+BatteryChecker::BatteryChecker(const sp<AMessage> &msg, int64_t timeoutUs)
+ : mTimeoutUs(timeoutUs)
+ , mLastActivityTimeUs(-1ll)
+ , mBatteryStatNotified(false)
+ , mBatteryCheckerGeneration(0)
+ , mIsExecuting(false)
+ , mBatteryCheckerMsg(msg) {}
+
+void BatteryChecker::onCodecActivity(std::function<void()> batteryOnCb) {
+ if (!isExecuting()) {
// ignore if not executing
return;
}
if (!mBatteryStatNotified) {
- addResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1);
+ batteryOnCb();
mBatteryStatNotified = true;
- sp<AMessage> msg = new AMessage(kWhatCheckBatteryStats, this);
+ sp<AMessage> msg = mBatteryCheckerMsg->dup();
msg->setInt32("generation", mBatteryCheckerGeneration);
// post checker and clear last activity time
- msg->post(kBatteryStatsTimeoutUs);
+ msg->post(mTimeoutUs);
mLastActivityTimeUs = -1ll;
} else {
// update last activity time
@@ -1726,7 +1768,8 @@
}
}
-void MediaCodec::onBatteryChecker(const sp<AMessage> &msg) {
+void BatteryChecker::onCheckBatteryTimer(
+ const sp<AMessage> &msg, std::function<void()> batteryOffCb) {
// ignore if this checker already expired because the client resource was removed
int32_t generation;
if (!msg->findInt32("generation", &generation)
@@ -1736,15 +1779,20 @@
if (mLastActivityTimeUs < 0ll) {
// timed out inactive, do not repost checker
- removeResource(MediaResource::kBattery, MediaResource::kVideoCodec, 1);
+ batteryOffCb();
mBatteryStatNotified = false;
} else {
// repost checker and clear last activity time
- msg->post(kBatteryStatsTimeoutUs + mLastActivityTimeUs - ALooper::GetNowUs());
+ msg->post(mTimeoutUs + mLastActivityTimeUs - ALooper::GetNowUs());
mLastActivityTimeUs = -1ll;
}
}
+void BatteryChecker::onClientRemoved() {
+ mBatteryStatNotified = false;
+ mBatteryCheckerGeneration++;
+}
+
////////////////////////////////////////////////////////////////////////////////
void MediaCodec::cancelPendingDequeueOperations() {
@@ -1867,10 +1915,11 @@
case CONFIGURING:
{
if (actionCode == ACTION_CODE_FATAL) {
- mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
- flushAnalyticsItem();
- initAnalyticsItem();
+ mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+ mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+ stateString(mState).c_str());
+ flushMediametrics();
+ initMediametrics();
}
setState(actionCode == ACTION_CODE_FATAL ?
UNINITIALIZED : INITIALIZED);
@@ -1880,10 +1929,11 @@
case STARTING:
{
if (actionCode == ACTION_CODE_FATAL) {
- mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
- flushAnalyticsItem();
- initAnalyticsItem();
+ mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+ mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+ stateString(mState).c_str());
+ flushMediametrics();
+ initMediametrics();
}
setState(actionCode == ACTION_CODE_FATAL ?
UNINITIALIZED : CONFIGURED);
@@ -1921,10 +1971,11 @@
case FLUSHING:
{
if (actionCode == ACTION_CODE_FATAL) {
- mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
- flushAnalyticsItem();
- initAnalyticsItem();
+ mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+ mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+ stateString(mState).c_str());
+ flushMediametrics();
+ initMediametrics();
setState(UNINITIALIZED);
} else {
@@ -1954,10 +2005,11 @@
setState(INITIALIZED);
break;
default:
- mAnalyticsItem->setInt32(kCodecError, err);
- mAnalyticsItem->setCString(kCodecErrorState, stateString(mState).c_str());
- flushAnalyticsItem();
- initAnalyticsItem();
+ mediametrics_setInt32(mMetricsHandle, kCodecError, err);
+ mediametrics_setCString(mMetricsHandle, kCodecErrorState,
+ stateString(mState).c_str());
+ flushMediametrics();
+ initMediametrics();
setState(UNINITIALIZED);
break;
}
@@ -2014,7 +2066,8 @@
CHECK(msg->findString("componentName", &mComponentName));
if (mComponentName.c_str()) {
- mAnalyticsItem->setCString(kCodecCodec, mComponentName.c_str());
+ mediametrics_setCString(mMetricsHandle, kCodecCodec,
+ mComponentName.c_str());
}
const char *owner = mCodecInfo->getOwnerName();
@@ -2026,20 +2079,17 @@
}
mOwnerName = owner;
- MediaResource::Type resourceType;
if (mComponentName.endsWith(".secure")) {
mFlags |= kFlagIsSecure;
- resourceType = MediaResource::kSecureCodec;
- mAnalyticsItem->setInt32(kCodecSecure, 1);
+ mediametrics_setInt32(mMetricsHandle, kCodecSecure, 1);
} else {
mFlags &= ~kFlagIsSecure;
- resourceType = MediaResource::kNonSecureCodec;
- mAnalyticsItem->setInt32(kCodecSecure, 0);
+ mediametrics_setInt32(mMetricsHandle, kCodecSecure, 0);
}
if (mIsVideo) {
// audio codec is currently ignored.
- addResource(resourceType, MediaResource::kVideoCodec, 1);
+ addResource(MediaResource::CodecResource(mFlags & kFlagIsSecure, mIsVideo));
}
(new AMessage)->postReply(mReplyID);
@@ -2082,14 +2132,15 @@
(new AMessage)->postReply(mReplyID);
// augment our media metrics info, now that we know more things
- if (mAnalyticsItem != NULL) {
+ if (mMetricsHandle != 0) {
sp<AMessage> format;
if (mConfigureMsg != NULL &&
mConfigureMsg->findMessage("format", &format)) {
// format includes: mime
AString mime;
if (format->findString("mime", &mime)) {
- mAnalyticsItem->setCString(kCodecMime, mime.c_str());
+ mediametrics_setCString(mMetricsHandle, kCodecMime,
+ mime.c_str());
}
}
}
@@ -2159,10 +2210,8 @@
CHECK_EQ(mState, STARTING);
if (mIsVideo) {
- addResource(
- MediaResource::kGraphicMemory,
- MediaResource::kUnspecifiedSubType,
- getGraphicBufferSize());
+ addResource(MediaResource::GraphicMemoryResource(
+ getGraphicBufferSize()));
}
setState(STARTED);
(new AMessage)->postReply(mReplyID);
@@ -2382,8 +2431,10 @@
mFlags &= ~kFlagIsComponentAllocated;
// off since we're removing all resources including the battery on
- mBatteryStatNotified = false;
- mBatteryCheckerGeneration++;
+ if (mBatteryChecker != nullptr) {
+ mBatteryChecker->onClientRemoved();
+ }
+
mResourceManagerService->removeClient(getId(mResourceManagerClient));
(new AMessage)->postReply(mReplyID);
@@ -3097,7 +3148,11 @@
case kWhatCheckBatteryStats:
{
- onBatteryChecker(msg);
+ if (mBatteryChecker != nullptr) {
+ mBatteryChecker->onCheckBatteryTimer(msg, [this] () {
+ removeResource(MediaResource::VideoBatteryResource());
+ });
+ }
break;
}
@@ -3197,6 +3252,10 @@
mState = newState;
+ if (mBatteryChecker != nullptr) {
+ mBatteryChecker->setExecuting(isExecuting());
+ }
+
cancelPendingDequeueOperations();
}
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
index dd7c3e6..6b5b50e 100644
--- a/media/libstagefright/MediaCodecListOverrides.cpp
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -22,7 +22,7 @@
#include <cutils/properties.h>
#include <gui/Surface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IMediaCodecList.h>
#include <media/MediaCodecInfo.h>
#include <media/MediaResourcePolicy.h>
@@ -264,7 +264,9 @@
}
}
}
- global_results->add(kPolicySupportsMultipleSecureCodecs, supportMultipleSecureCodecs);
+ global_results->add(
+ MediaResourcePolicy::kPolicySupportsMultipleSecureCodecs().c_str(),
+ supportMultipleSecureCodecs);
}
static AString globalResultsToXml(const CodecSettings &results) {
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 50e454c..7243b82 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -22,7 +22,7 @@
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaBufferHolder.h>
#include <media/MediaCodecBuffer.h>
#include <media/MediaSource.h>
diff --git a/media/libstagefright/NdkUtils.cpp b/media/libstagefright/NdkUtils.cpp
deleted file mode 100644
index 904fe72..0000000
--- a/media/libstagefright/NdkUtils.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-
-#include <media/stagefright/NdkUtils.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-namespace android {
-
-sp<MetaData> convertMediaFormatWrapperToMetaData(const sp<AMediaFormatWrapper> &fmt) {
- sp<AMessage> msg = fmt->toAMessage();
- sp<MetaData> meta = new MetaData;
- convertMessageToMetaData(msg, meta);
- return meta;
-}
-
-} // namespace android
-
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 680d426..66fb4b0 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -22,13 +22,13 @@
#include "include/ESDS.h"
+#include <datasource/DataSourceFactory.h>
+#include <datasource/FileSource.h>
#include <media/DataSource.h>
#include <media/MediaSource.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSourceFactory.h>
-#include <media/stagefright/FileSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
@@ -36,6 +36,7 @@
#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
namespace android {
@@ -81,7 +82,7 @@
}
sp<DataSource> dataSource =
- DataSourceFactory::CreateFromURI(httpService, path, headers);
+ DataSourceFactory::getInstance()->CreateFromURI(httpService, path, headers);
if (dataSource == NULL) {
return -ENOENT;
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 8b6262f..771dfea 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -20,7 +20,7 @@
#include <gui/Surface.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/foundation/ALooper.h>
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index cf4edae..ce73676 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -158,7 +158,11 @@
if (mRetriever->setDataSource(fd, 0, size) == OK) {
sp<IMemory> mem = mRetriever->extractAlbumArt();
if (mem != NULL) {
- MediaAlbumArt *art = static_cast<MediaAlbumArt *>(mem->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ MediaAlbumArt *art = static_cast<MediaAlbumArt *>(mem->unsecurePointer());
return art->clone();
}
}
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 96818eb..c1b270c 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -7,6 +7,9 @@
"include-annotation": "android.platform.test.annotations.RequiresDevice"
}
]
+ },
+ {
+ "name": "BatteryChecker_test"
}
]
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 135151f..ac4d087 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -227,6 +227,68 @@
}
}
+static void parseDolbyVisionProfileLevelFromDvcc(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
+ // dv_major.dv_minor Should be 1.0 or 2.1
+ if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
+ ALOGV("Size %zu, dv_major %d, dv_minor %d", size, ptr[0], ptr[1]);
+ return;
+ }
+
+ const uint8_t profile = ptr[2] >> 1;
+ const uint8_t level = ((ptr[2] & 0x1) << 5) | ((ptr[3] >> 3) & 0x1f);
+ const uint8_t rpu_present_flag = (ptr[3] >> 2) & 0x01;
+ const uint8_t el_present_flag = (ptr[3] >> 1) & 0x01;
+ const uint8_t bl_present_flag = (ptr[3] & 0x01);
+ const int32_t bl_compatibility_id = (int32_t)(ptr[4] >> 4);
+
+ ALOGV("profile-level-compatibility value in dv(c|v)c box %d-%d-%d",
+ profile, level, bl_compatibility_id);
+
+ // All Dolby Profiles will have profile and level info in MediaFormat
+ // Profile 8 and 9 will have bl_compatibility_id too.
+ const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONPROFILETYPE> profiles{
+ {1, OMX_VIDEO_DolbyVisionProfileDvavPen},
+ {3, OMX_VIDEO_DolbyVisionProfileDvheDen},
+ {4, OMX_VIDEO_DolbyVisionProfileDvheDtr},
+ {5, OMX_VIDEO_DolbyVisionProfileDvheStn},
+ {6, OMX_VIDEO_DolbyVisionProfileDvheDth},
+ {7, OMX_VIDEO_DolbyVisionProfileDvheDtb},
+ {8, OMX_VIDEO_DolbyVisionProfileDvheSt},
+ {9, OMX_VIDEO_DolbyVisionProfileDvavSe},
+ {10, OMX_VIDEO_DolbyVisionProfileDvav110},
+ };
+
+ const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONLEVELTYPE> levels{
+ {0, OMX_VIDEO_DolbyVisionLevelUnknown},
+ {1, OMX_VIDEO_DolbyVisionLevelHd24},
+ {2, OMX_VIDEO_DolbyVisionLevelHd30},
+ {3, OMX_VIDEO_DolbyVisionLevelFhd24},
+ {4, OMX_VIDEO_DolbyVisionLevelFhd30},
+ {5, OMX_VIDEO_DolbyVisionLevelFhd60},
+ {6, OMX_VIDEO_DolbyVisionLevelUhd24},
+ {7, OMX_VIDEO_DolbyVisionLevelUhd30},
+ {8, OMX_VIDEO_DolbyVisionLevelUhd48},
+ {9, OMX_VIDEO_DolbyVisionLevelUhd60},
+ };
+ // set rpuAssoc
+ if (rpu_present_flag && el_present_flag && !bl_present_flag) {
+ format->setInt32("rpuAssoc", 1);
+ }
+ // set profile & level if they are recognized
+ OMX_VIDEO_DOLBYVISIONPROFILETYPE codecProfile;
+ OMX_VIDEO_DOLBYVISIONLEVELTYPE codecLevel;
+ if (profiles.map(profile, &codecProfile)) {
+ format->setInt32("profile", codecProfile);
+ if (codecProfile == OMX_VIDEO_DolbyVisionProfileDvheSt ||
+ codecProfile == OMX_VIDEO_DolbyVisionProfileDvavSe) {
+ format->setInt32("bl_compatibility_id", bl_compatibility_id);
+ }
+ if (levels.map(level, &codecLevel)) {
+ format->setInt32("level", codecLevel);
+ }
+ }
+}
+
static void parseH263ProfileLevelFromD263(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
if (size < 7) {
return;
@@ -1411,6 +1473,12 @@
msg->setBuffer("csd-0", buffer);
}
+ if (meta->findData(kKeyDVCC, &type, &data, &size)) {
+ const uint8_t *ptr = (const uint8_t *)data;
+ ALOGV("DV: calling parseDolbyVisionProfileLevelFromDvcc with data size %zu", size);
+ parseDolbyVisionProfileLevelFromDvcc(ptr, size, msg);
+ }
+
*format = msg;
return OK;
@@ -1839,6 +1907,32 @@
meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
} else if (mime == MEDIA_MIMETYPE_VIDEO_AV1) {
meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
+ } else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION) {
+ if (msg->findBuffer("csd-2", &csd2)) {
+ meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
+
+ size_t dvcc_size = 1024;
+ uint8_t dvcc[dvcc_size];
+ memcpy(dvcc, csd2->data(), dvcc_size);
+ const uint8_t profile = dvcc[2] >> 1;
+
+ if (profile > 1 && profile < 9) {
+ std::vector<uint8_t> hvcc(csd0size + 1024);
+ size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
+ meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
+ } else if (DolbyVisionProfileDvav110 == profile) {
+ meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
+ } else {
+ sp<ABuffer> csd1;
+ if (msg->findBuffer("csd-1", &csd1)) {
+ std::vector<char> avcc(csd0size + csd1->size() + 1024);
+ size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+ meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+ }
+ }
+ } else {
+ ALOGW("We need csd-2!!. %s", msg->debugString().c_str());
+ }
} else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
} else if (mime == MEDIA_MIMETYPE_AUDIO_OPUS) {
@@ -1885,8 +1979,18 @@
meta->setData(kKeyStreamHeader, 'mdat', csd0->data(), csd0->size());
} else if (msg->findBuffer("d263", &csd0)) {
meta->setData(kKeyD263, kTypeD263, csd0->data(), csd0->size());
- }
+ } else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION && msg->findBuffer("csd-2", &csd2)) {
+ meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
+ // Remove CSD-2 from the data here to avoid duplicate data in meta
+ meta->remove(kKeyOpaqueCSD2);
+
+ if (msg->findBuffer("csd-avc", &csd0)) {
+ meta->setData(kKeyAVCC, kTypeAVCC, csd0->data(), csd0->size());
+ } else if (msg->findBuffer("csd-hevc", &csd0)) {
+ meta->setData(kKeyHVCC, kTypeHVCC, csd0->data(), csd0->size());
+ }
+ }
// XXX TODO add whatever other keys there are
#if 0
@@ -1895,22 +1999,6 @@
#endif
}
-AString MakeUserAgent() {
- AString ua;
- ua.append("stagefright/1.2 (Linux;Android ");
-
-#if (PROPERTY_VALUE_MAX < 8)
-#error "PROPERTY_VALUE_MAX must be at least 8"
-#endif
-
- char value[PROPERTY_VALUE_MAX];
- property_get("ro.build.version.release", value, "Unknown");
- ua.append(value);
- ua.append(")");
-
- return ua;
-}
-
status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
const sp<MetaData>& meta)
{
@@ -2099,39 +2187,6 @@
return AudioSystem::isOffloadSupported(info);
}
-AString uriDebugString(const AString &uri, bool incognito) {
- if (incognito) {
- return AString("<URI suppressed>");
- }
-
- if (property_get_bool("media.stagefright.log-uri", false)) {
- return uri;
- }
-
- // find scheme
- AString scheme;
- const char *chars = uri.c_str();
- for (size_t i = 0; i < uri.size(); i++) {
- const char c = chars[i];
- if (!isascii(c)) {
- break;
- } else if (isalpha(c)) {
- continue;
- } else if (i == 0) {
- // first character must be a letter
- break;
- } else if (isdigit(c) || c == '+' || c == '.' || c =='-') {
- continue;
- } else if (c != ':') {
- break;
- }
- scheme = AString(uri, 0, i);
- scheme.append("://<suppressed>");
- return scheme;
- }
- return AString("<no-scheme URI suppressed>");
-}
-
HLSTime::HLSTime(const sp<AMessage>& meta) :
mSeq(-1),
mTimeUs(-1LL),
@@ -2230,36 +2285,4 @@
}
}
-AString nameForFd(int fd) {
- const size_t SIZE = 256;
- char buffer[SIZE];
- AString result;
- snprintf(buffer, SIZE, "/proc/%d/fd/%d", getpid(), fd);
- struct stat s;
- if (lstat(buffer, &s) == 0) {
- if ((s.st_mode & S_IFMT) == S_IFLNK) {
- char linkto[256];
- int len = readlink(buffer, linkto, sizeof(linkto));
- if(len > 0) {
- if(len > 255) {
- linkto[252] = '.';
- linkto[253] = '.';
- linkto[254] = '.';
- linkto[255] = 0;
- } else {
- linkto[len] = 0;
- }
- result.append(linkto);
- }
- } else {
- result.append("unexpected type for ");
- result.append(buffer);
- }
- } else {
- result.append("couldn't open ");
- result.append(buffer);
- }
- return result;
-}
-
} // namespace android
diff --git a/media/libstagefright/VideoFrameScheduler2.cpp b/media/libstagefright/VideoFrameScheduler2.cpp
deleted file mode 100644
index 23671f2..0000000
--- a/media/libstagefright/VideoFrameScheduler2.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "VideoFrameScheduler2"
-#include <utils/Log.h>
-#define ATRACE_TAG ATRACE_TAG_VIDEO
-#include <utils/Mutex.h>
-#include <utils/Thread.h>
-#include <utils/Trace.h>
-
-#include <algorithm>
-#include <jni.h>
-#include <math.h>
-
-#include <android/choreographer.h>
-#include <android/looper.h>
-#include <media/stagefright/VideoFrameScheduler2.h>
-#include <mediaplayer2/JavaVMHelper.h>
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AUtils.h>
-
-namespace android {
-
-static void getVsyncOffset(nsecs_t* appVsyncOffsetPtr, nsecs_t* sfVsyncOffsetPtr);
-
-/* ======================================================================= */
-/* VsyncTracker */
-/* ======================================================================= */
-
-class VsyncTracker : public RefBase{
-public:
- VsyncTracker();
- ~VsyncTracker() {}
- nsecs_t getVsyncPeriod();
- nsecs_t getVsyncTime(nsecs_t periodOffset);
- void addSample(nsecs_t timestamp);
-
-private:
- static const int kMaxSamples = 32;
- static const int kMinSamplesForUpdate = 6;
- int mNumSamples;
- int mFirstSample;
- nsecs_t mReferenceTime;
- nsecs_t mPhase;
- nsecs_t mPeriod;
- nsecs_t mTimestampSamples[kMaxSamples];
- Mutex mLock;
-
- void updateModelLocked();
-};
-
-VsyncTracker::VsyncTracker()
- : mNumSamples(0),
- mFirstSample(0),
- mReferenceTime(0),
- mPhase(0),
- mPeriod(0) {
- for (int i = 0; i < kMaxSamples; i++) {
- mTimestampSamples[i] = 0;
- }
-}
-
-nsecs_t VsyncTracker::getVsyncPeriod() {
- Mutex::Autolock dataLock(mLock);
- return mPeriod;
-}
-
-nsecs_t VsyncTracker::getVsyncTime(nsecs_t periodOffset) {
- Mutex::Autolock dataLock(mLock);
- const nsecs_t now = systemTime();
- nsecs_t phase = mReferenceTime + mPhase;
-
- // result = (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase
- // prevent overflow
- nsecs_t result = (now - phase) / mPeriod;
- if (result > LONG_LONG_MAX - periodOffset - 1) {
- return LONG_LONG_MAX;
- } else {
- result += periodOffset + 1;
- }
- if (result > LONG_LONG_MAX / mPeriod) {
- return LONG_LONG_MAX;
- } else {
- result *= mPeriod;
- }
- if (result > LONG_LONG_MAX - phase) {
- return LONG_LONG_MAX;
- } else {
- result += phase;
- }
-
- return result;
-}
-
-void VsyncTracker::addSample(nsecs_t timestamp) {
- Mutex::Autolock dataLock(mLock);
- if (mNumSamples == 0) {
- mPhase = 0;
- mReferenceTime = timestamp;
- }
- int idx = (mFirstSample + mNumSamples) % kMaxSamples;
- mTimestampSamples[idx] = timestamp;
- if (mNumSamples < kMaxSamples) {
- mNumSamples++;
- } else {
- mFirstSample = (mFirstSample + 1) % kMaxSamples;
- }
- updateModelLocked();
-}
-
-void VsyncTracker::updateModelLocked() {
- if (mNumSamples < kMinSamplesForUpdate) {
- return;
- }
- nsecs_t durationSum = 0;
- nsecs_t minDuration = LONG_MAX;
- nsecs_t maxDuration = 0;
-
- for (int i = 1; i < mNumSamples; i++) {
- int idx = (mFirstSample + i) % kMaxSamples;
- int prev = (idx + kMaxSamples - 1) % kMaxSamples;
- long duration = mTimestampSamples[idx] - mTimestampSamples[prev];
- durationSum += duration;
- if (minDuration > duration) { minDuration = duration; }
- if (maxDuration < duration) { maxDuration = duration; }
- }
-
- durationSum -= (minDuration + maxDuration);
- mPeriod = durationSum / (mNumSamples - 3);
-
- double sampleAvgX = 0.0;
- double sampleAvgY = 0.0;
- double scale = 2.0 * M_PI / (double) mPeriod;
-
- for (int i = 1; i < mNumSamples; i++) {
- int idx = (mFirstSample + i) % kMaxSamples;
- long sample = mTimestampSamples[idx] - mReferenceTime;
- double samplePhase = (double) (sample % mPeriod) * scale;
- sampleAvgX += cos(samplePhase);
- sampleAvgY += sin(samplePhase);
- }
-
- sampleAvgX /= (double) mNumSamples - 1.0;
- sampleAvgY /= (double) mNumSamples - 1.0;
- mPhase = (long) (atan2(sampleAvgY, sampleAvgX) / scale);
-}
-
-static void frameCallback(int64_t frameTimeNanos, void* data) {
- if (data == NULL) {
- return;
- }
- sp<VsyncTracker> vsyncTracker(static_cast<VsyncTracker*>(data));
- vsyncTracker->addSample(frameTimeNanos);
- AChoreographer_postFrameCallback64(AChoreographer_getInstance(),
- frameCallback, static_cast<void*>(vsyncTracker.get()));
-}
-
-/* ======================================================================= */
-/* JNI */
-/* ======================================================================= */
-
-static void getVsyncOffset(nsecs_t* appVsyncOffsetPtr, nsecs_t* sfVsyncOffsetPtr) {
- static const nsecs_t kOneMillisecInNanosec = 1000000;
- static const nsecs_t kOneSecInNanosec = kOneMillisecInNanosec * 1000;
-
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jDisplayManagerGlobalCls = env->FindClass(
- "android/hardware/display/DisplayManagerGlobal");
- jclass jDisplayCls = env->FindClass("android/view/Display");
-
- jmethodID jGetInstance = env->GetStaticMethodID(jDisplayManagerGlobalCls,
- "getInstance", "()Landroid/hardware/display/DisplayManagerGlobal;");
- jobject javaDisplayManagerGlobalObj = env->CallStaticObjectMethod(
- jDisplayManagerGlobalCls, jGetInstance);
-
- jfieldID jDEFAULT_DISPLAY = env->GetStaticFieldID(jDisplayCls, "DEFAULT_DISPLAY", "I");
- jint DEFAULT_DISPLAY = env->GetStaticIntField(jDisplayCls, jDEFAULT_DISPLAY);
-
- jmethodID jgetRealDisplay = env->GetMethodID(jDisplayManagerGlobalCls,
- "getRealDisplay", "(I)Landroid/view/Display;");
- jobject javaDisplayObj = env->CallObjectMethod(
- javaDisplayManagerGlobalObj, jgetRealDisplay, DEFAULT_DISPLAY);
-
- jmethodID jGetRefreshRate = env->GetMethodID(jDisplayCls, "getRefreshRate", "()F");
- jfloat javaRefreshRate = env->CallFloatMethod(javaDisplayObj, jGetRefreshRate);
- nsecs_t vsyncPeriod = (nsecs_t) (kOneSecInNanosec / (float) javaRefreshRate);
-
- jmethodID jGetAppVsyncOffsetNanos = env->GetMethodID(
- jDisplayCls, "getAppVsyncOffsetNanos", "()J");
- jlong javaAppVsyncOffset = env->CallLongMethod(javaDisplayObj, jGetAppVsyncOffsetNanos);
- *appVsyncOffsetPtr = (nsecs_t) javaAppVsyncOffset;
-
- jmethodID jGetPresentationDeadlineNanos = env->GetMethodID(
- jDisplayCls, "getPresentationDeadlineNanos", "()J");
- jlong javaPresentationDeadline = env->CallLongMethod(
- javaDisplayObj, jGetPresentationDeadlineNanos);
-
- *sfVsyncOffsetPtr = vsyncPeriod - ((nsecs_t) javaPresentationDeadline - kOneMillisecInNanosec);
-}
-
-/* ======================================================================= */
-/* Choreographer Thread */
-/* ======================================================================= */
-
-struct ChoreographerThread : public Thread {
- ChoreographerThread(bool canCallJava);
- status_t init(void* data);
- virtual status_t readyToRun() override;
- virtual bool threadLoop() override;
-
-protected:
- virtual ~ChoreographerThread() {}
-
-private:
- DISALLOW_EVIL_CONSTRUCTORS(ChoreographerThread);
- void* mData;
-};
-
-ChoreographerThread::ChoreographerThread(bool canCallJava) : Thread(canCallJava) {
-}
-
-status_t ChoreographerThread::init(void* data) {
- if (data == NULL) {
- return NO_INIT;
- }
- mData = data;
- return OK;
-}
-
-status_t ChoreographerThread::readyToRun() {
- ALooper_prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
- if (AChoreographer_getInstance() == NULL) {
- return NO_INIT;
- }
- AChoreographer_postFrameCallback64(AChoreographer_getInstance(), frameCallback, mData);
- return OK;
-}
-
-bool ChoreographerThread::threadLoop() {
- ALooper_pollOnce(-1, nullptr, nullptr, nullptr);
- return true;
-}
-
-/* ======================================================================= */
-/* Frame Scheduler */
-/* ======================================================================= */
-
-VideoFrameScheduler2::VideoFrameScheduler2() : VideoFrameSchedulerBase() {
-
- getVsyncOffset(&mAppVsyncOffset, &mSfVsyncOffset);
-
- Mutex::Autolock threadLock(mLock);
- mChoreographerThread = new ChoreographerThread(true);
-
- mVsyncTracker = new VsyncTracker();
- if (mChoreographerThread->init(static_cast<void*>(mVsyncTracker.get())) != OK) {
- mChoreographerThread.clear();
- }
- if (mChoreographerThread != NULL && mChoreographerThread->run("Choreographer") != OK) {
- mChoreographerThread.clear();
- }
-}
-
-void VideoFrameScheduler2::updateVsync() {
- mVsyncTime = 0;
- mVsyncPeriod = 0;
-
- if (mVsyncTracker != NULL) {
- mVsyncPeriod = mVsyncTracker->getVsyncPeriod();
- mVsyncTime = mVsyncTracker->getVsyncTime(mSfVsyncOffset - mAppVsyncOffset);
- }
- mVsyncRefreshAt = systemTime(SYSTEM_TIME_MONOTONIC) + kVsyncRefreshPeriod;
-}
-
-void VideoFrameScheduler2::release() {
- // Do not change order
- {
- Mutex::Autolock threadLock(mLock);
- mChoreographerThread->requestExitAndWait();
- mChoreographerThread.clear();
- }
-
- mVsyncTracker.clear();
-}
-
-VideoFrameScheduler2::~VideoFrameScheduler2() {
- release();
-}
-
-} // namespace android
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index db67034..6719bab 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -27,7 +27,6 @@
"libcutils",
"libhidlbase",
"libhidlmemory",
- "libhidltransport",
"liblog",
"libstagefright_foundation",
"libui",
@@ -39,7 +38,6 @@
"android.hidl.token@1.0-utils",
"libbase",
"libEGL",
- "libhwbinder",
"libnativewindow",
"libvndksupport",
],
diff --git a/media/libstagefright/bqhelper/GraphicBufferSource.cpp b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
index 59317e7..de9d12c 100644
--- a/media/libstagefright/bqhelper/GraphicBufferSource.cpp
+++ b/media/libstagefright/bqhelper/GraphicBufferSource.cpp
@@ -29,6 +29,7 @@
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/foundation/FileDescriptor.h>
+#include <android-base/properties.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
#include <gui/BufferItem.h>
@@ -800,6 +801,9 @@
}
}
+#ifdef __clang__
+__attribute__((no_sanitize("integer")))
+#endif
bool GraphicBufferSource::calculateCodecTimestamp_l(
nsecs_t bufferTimeNs, int64_t *codecTimeUs) {
int64_t timeUs = bufferTimeNs / 1000;
@@ -816,14 +820,15 @@
mPrevFrameUs = mBaseFrameUs =
std::llround((timeUs * mCaptureFps) / mFps);
mFrameCount = 0;
- } else {
- // snap to nearest capture point
+ } else if (mSnapTimestamps) {
double nFrames = (timeUs - mPrevCaptureUs) * mCaptureFps / 1000000;
if (nFrames < 0.5 - kTimestampFluctuation) {
// skip this frame as it's too close to previous capture
- ALOGD("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
+ ALOGD("skipping frame, timeUs %lld",
+ static_cast<long long>(timeUs));
return false;
}
+ // snap to nearest capture point
if (nFrames <= 1.0) {
nFrames = 1.0;
}
@@ -832,6 +837,22 @@
mFrameCount * 1000000 / mCaptureFps);
mPrevFrameUs = mBaseFrameUs + std::llround(
mFrameCount * 1000000 / mFps);
+ } else {
+ if (timeUs <= mPrevCaptureUs) {
+ if (mFrameDropper != NULL && mFrameDropper->disabled()) {
+ // Warn only, client has disabled frame drop logic possibly for image
+ // encoding cases where camera's ZSL mode could send out of order frames.
+ ALOGW("Received frame that's going backward in time");
+ } else {
+ // Drop the frame if it's going backward in time. Bad timestamp
+ // could disrupt encoder's rate control completely.
+ ALOGW("Dropping frame that's going backward in time");
+ return false;
+ }
+ }
+ mPrevCaptureUs = timeUs;
+ mPrevFrameUs = mBaseFrameUs + std::llround(
+ (timeUs - mBaseCaptureUs) * (mCaptureFps / mFps));
}
ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
@@ -1359,6 +1380,12 @@
mFps = fps;
mCaptureFps = captureFps;
+ if (captureFps > fps) {
+ mSnapTimestamps = 1 == base::GetIntProperty(
+ "debug.stagefright.snap_timestamps", int64_t(0));
+ } else {
+ mSnapTimestamps = false;
+ }
return OK;
}
diff --git a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
index b4c4d4a..ed5d7cb 100644
--- a/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
+++ b/media/libstagefright/bqhelper/include/media/stagefright/bqhelper/GraphicBufferSource.h
@@ -461,12 +461,33 @@
// Slow motion mode is enabled if both encoding and capture frame rates are
// defined and the encoding frame rate is less than half the capture frame
// rate. In this mode, the source is expected to produce frames with an even
- // timestamp interval (after rounding) with the configured capture fps. The
- // first source timestamp is used as the source base time. Afterwards, the
- // timestamp of each source frame is snapped to the nearest expected capture
- // timestamp and scaled to match the configured encoding frame rate.
+ // timestamp interval (after rounding) with the configured capture fps.
+ //
+ // These modes must be configured by calling setTimeLapseConfig() before
+ // using this source.
+ //
+ // Timestamp snapping for slow motion recording
+ // ============================================
+ //
+ // When the slow motion mode is configured with setTimeLapseConfig(), the
+ // property "debug.stagefright.snap_timestamps" will be checked. If the
+ // value of the property is set to any value other than 1, mSnapTimestamps
+ // will be set to false. Otherwise, mSnapTimestamps will be set to true.
+ // (mSnapTimestamps will be false for time lapse recording regardless of the
+ // value of the property.)
+ //
+ // If mSnapTimestamps is true, i.e., timestamp snapping is enabled, the
+ // first source timestamp will be used as the source base time; afterwards,
+ // the timestamp of each source frame will be snapped to the nearest
+ // expected capture timestamp and scaled to match the configured encoding
+ // frame rate.
+ //
+ // If timestamp snapping is disabled, the timestamp of source frames will
+ // be scaled to match the ratio between the configured encoding frame rate
+ // and the configured capture frame rate.
- // These modes must be enabled before using this source.
+ // whether timestamps will be snapped
+ bool mSnapTimestamps{true};
// adjusted capture timestamp of the base frame
int64_t mBaseCaptureUs;
diff --git a/media/libstagefright/codecs/on2/enc/Android.bp b/media/libstagefright/codecs/on2/enc/Android.bp
index cd69e0d..705e554 100644
--- a/media/libstagefright/codecs/on2/enc/Android.bp
+++ b/media/libstagefright/codecs/on2/enc/Android.bp
@@ -21,4 +21,5 @@
},
shared_libs: ["libvpx"],
+ header_libs: ["libbase_headers"],
}
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index cf91405..4711315 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -31,9 +31,14 @@
namespace android {
-static int ALIGN(int x, int y) {
- // y must be a power of 2.
- return (x + y - 1) & ~(y - 1);
+inline void initDstYUV(
+ const android_ycbcr &ycbcr, int32_t cropTop, int32_t cropLeft,
+ uint8_t **dst_y, uint8_t **dst_u, uint8_t **dst_v) {
+ *dst_y = (uint8_t *)ycbcr.y + cropTop * ycbcr.ystride + cropLeft;
+
+ int32_t c_offset = (cropTop / 2) * ycbcr.cstride + cropLeft / 2;
+ *dst_v = (uint8_t *)ycbcr.cr + c_offset;
+ *dst_u = (uint8_t *)ycbcr.cb + c_offset;
}
SoftwareRenderer::SoftwareRenderer(
@@ -300,20 +305,14 @@
const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
- uint8_t *dst_y = (uint8_t *)ycbcr.y;
- uint8_t *dst_v = (uint8_t *)ycbcr.cr;
- uint8_t *dst_u = (uint8_t *)ycbcr.cb;
- size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-
- dst_y += mCropTop * buf->stride + mCropLeft;
- dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
- dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+ uint8_t *dst_y, *dst_u, *dst_v;
+ initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
src_y += mStride;
- dst_y += buf->stride;
+ dst_y += ycbcr.ystride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -322,22 +321,16 @@
src_u += mStride / 2;
src_v += mStride / 2;
- dst_u += dst_c_stride;
- dst_v += dst_c_stride;
+ dst_u += ycbcr.cstride;
+ dst_v += ycbcr.cstride;
}
} else if (mColorFormat == OMX_COLOR_FormatYUV420Planar16) {
const uint8_t *src_y = (const uint8_t *)data + mCropTop * mStride + mCropLeft * 2;
const uint8_t *src_u = (const uint8_t *)data + mStride * mHeight + mCropTop * mStride / 4;
const uint8_t *src_v = (const uint8_t *)src_u + mStride * mHeight / 4;
- uint8_t *dst_y = (uint8_t *)ycbcr.y;
- uint8_t *dst_v = (uint8_t *)ycbcr.cr;
- uint8_t *dst_u = (uint8_t *)ycbcr.cb;
- size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-
- dst_y += mCropTop * buf->stride + mCropLeft;
- dst_v += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
- dst_u += (mCropTop / 2) * dst_c_stride + mCropLeft / 2;
+ uint8_t *dst_y, *dst_u, *dst_v;
+ initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
for (int y = 0; y < mCropHeight; ++y) {
for (int x = 0; x < mCropWidth; ++x) {
@@ -345,7 +338,7 @@
}
src_y += mStride;
- dst_y += buf->stride;
+ dst_y += ycbcr.ystride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -356,8 +349,8 @@
src_u += mStride / 2;
src_v += mStride / 2;
- dst_u += dst_c_stride;
- dst_v += dst_c_stride;
+ dst_u += ycbcr.cstride;
+ dst_v += ycbcr.cstride;
}
} else if (mColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
@@ -368,20 +361,14 @@
src_y += mCropLeft + mCropTop * mWidth;
src_uv += (mCropLeft + mCropTop * mWidth) / 2;
- uint8_t *dst_y = (uint8_t *)ycbcr.y;
- uint8_t *dst_v = (uint8_t *)ycbcr.cr;
- uint8_t *dst_u = (uint8_t *)ycbcr.cb;
- size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
-
- dst_y += mCropTop * buf->stride + mCropLeft;
- dst_v += (mCropTop/2) * dst_c_stride + mCropLeft/2;
- dst_u += (mCropTop/2) * dst_c_stride + mCropLeft/2;
+ uint8_t *dst_y, *dst_u, *dst_v;
+ initDstYUV(ycbcr, mCropTop, mCropLeft, &dst_y, &dst_u, &dst_v);
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
src_y += mWidth;
- dst_y += buf->stride;
+ dst_y += ycbcr.ystride;
}
for (int y = 0; y < (mCropHeight + 1) / 2; ++y) {
@@ -392,8 +379,8 @@
}
src_uv += mWidth;
- dst_u += dst_c_stride;
- dst_v += dst_c_stride;
+ dst_u += ycbcr.cstride;
+ dst_v += ycbcr.cstride;
}
} else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
uint8_t* srcPtr = (uint8_t*)data + mWidth * mCropTop * 3 + mCropLeft * 3;
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index a07eb8c..04041eb 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -77,7 +77,7 @@
<Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.android.gav1.decoder" type="video/av01">
+ <MediaCodec name="c2.android.av1.decoder" type="video/av01">
<Limit name="size" min="96x96" max="1920x1080" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 9532ba6..67d3f1a 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -182,7 +182,7 @@
</Variant>
<Feature name="adaptive-playback" />
</MediaCodec>
- <MediaCodec name="c2.android.gav1.decoder" type="video/av01" variant="!slow-cpu">
+ <MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
<Limit name="size" min="2x2" max="1920x1080" />
<Limit name="alignment" value="2x2" />
<Limit name="block-size" value="16x16" />
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
index b1f62c7..88f30c4 100644
--- a/media/libstagefright/filters/Android.bp
+++ b/media/libstagefright/filters/Android.bp
@@ -23,6 +23,10 @@
"-Wall",
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"libgui",
"libmedia",
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index df66ac6..7752bda 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -22,7 +22,6 @@
#include "AMessage.h"
-#include <binder/Parcel.h>
#include <log/log.h>
#include "AAtomizer.h"
@@ -34,6 +33,10 @@
#include <media/stagefright/foundation/hexdump.h>
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
namespace android {
extern ALooperRoster gLooperRoster;
@@ -643,6 +646,7 @@
return s;
}
+#ifndef __ANDROID_VNDK__
// static
sp<AMessage> AMessage::FromParcel(const Parcel &parcel, size_t maxNestingLevel) {
int32_t what = parcel.readInt32();
@@ -809,6 +813,7 @@
}
}
}
+#endif // __ANDROID_VNDK__
sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
if (other == NULL) {
diff --git a/media/libstagefright/foundation/AString.cpp b/media/libstagefright/foundation/AString.cpp
index fb51cc5..4bd186c 100644
--- a/media/libstagefright/foundation/AString.cpp
+++ b/media/libstagefright/foundation/AString.cpp
@@ -23,11 +23,14 @@
#include <stdlib.h>
#include <string.h>
-#include <binder/Parcel.h>
#include <utils/String8.h>
#include "ADebug.h"
#include "AString.h"
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
namespace android {
// static
@@ -362,11 +365,10 @@
return !strcasecmp(mData + mSize - suffixLen, suffix);
}
+#ifndef __ANDROID_VNDK__
// static
AString AString::FromParcel(const Parcel &parcel) {
size_t size = static_cast<size_t>(parcel.readInt32());
- // The static analyzer incorrectly reports a false-positive here in c++17.
- // https://bugs.llvm.org/show_bug.cgi?id=38176 . NOLINTNEXTLINE
return AString(static_cast<const char *>(parcel.readInplace(size)), size);
}
@@ -378,6 +380,7 @@
}
return err;
}
+#endif
AString AStringPrintf(const char *format, ...) {
va_list ap;
diff --git a/media/libstagefright/foundation/Android.bp b/media/libstagefright/foundation/Android.bp
index 533cd72..5485f6d 100644
--- a/media/libstagefright/foundation/Android.bp
+++ b/media/libstagefright/foundation/Android.bp
@@ -65,6 +65,7 @@
"AudioPresentationInfo.cpp",
"ByteUtils.cpp",
"ColorUtils.cpp",
+ "FoundationUtils.cpp",
"MediaBuffer.cpp",
"MediaBufferBase.cpp",
"MediaBufferGroup.cpp",
@@ -78,6 +79,17 @@
"hexdump.cpp",
],
+ target: {
+ vendor: {
+ exclude_shared_libs: [
+ "libbinder",
+ ],
+ cflags: [
+ "-DNO_IMEMORY",
+ ],
+ },
+ },
+
clang: true,
sanitize: {
diff --git a/media/libstagefright/foundation/FoundationUtils.cpp b/media/libstagefright/foundation/FoundationUtils.cpp
new file mode 100644
index 0000000..8285e4c
--- /dev/null
+++ b/media/libstagefright/foundation/FoundationUtils.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FoundationUtils"
+#include <utils/Log.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/AString.h>
+
+namespace android {
+
+AString uriDebugString(const AString &uri, bool incognito) {
+ if (incognito) {
+ return AString("<URI suppressed>");
+ }
+
+ if (property_get_bool("media.stagefright.log-uri", false)) {
+ return uri;
+ }
+
+ // find scheme
+ AString scheme;
+ const char *chars = uri.c_str();
+ for (size_t i = 0; i < uri.size(); i++) {
+ const char c = chars[i];
+ if (!isascii(c)) {
+ break;
+ } else if (isalpha(c)) {
+ continue;
+ } else if (i == 0) {
+ // first character must be a letter
+ break;
+ } else if (isdigit(c) || c == '+' || c == '.' || c =='-') {
+ continue;
+ } else if (c != ':') {
+ break;
+ }
+ scheme = AString(uri, 0, i);
+ scheme.append("://<suppressed>");
+ return scheme;
+ }
+ return AString("<no-scheme URI suppressed>");
+}
+
+AString MakeUserAgent() {
+ AString ua;
+ ua.append("stagefright/1.2 (Linux;Android ");
+
+#if (PROPERTY_VALUE_MAX < 8)
+#error "PROPERTY_VALUE_MAX must be at least 8"
+#endif
+
+ char value[PROPERTY_VALUE_MAX];
+ property_get("ro.build.version.release", value, "Unknown");
+ ua.append(value);
+ ua.append(")");
+
+ return ua;
+}
+
+AString nameForFd(int fd) {
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ AString result;
+ snprintf(buffer, SIZE, "/proc/%d/fd/%d", getpid(), fd);
+ struct stat s;
+ if (lstat(buffer, &s) == 0) {
+ if ((s.st_mode & S_IFMT) == S_IFLNK) {
+ char linkto[256];
+ int len = readlink(buffer, linkto, sizeof(linkto));
+ if(len > 0) {
+ if(len > 255) {
+ linkto[252] = '.';
+ linkto[253] = '.';
+ linkto[254] = '.';
+ linkto[255] = 0;
+ } else {
+ linkto[len] = 0;
+ }
+ result.append(linkto);
+ }
+ } else {
+ result.append("unexpected type for ");
+ result.append(buffer);
+ }
+ } else {
+ result.append("couldn't open ");
+ result.append(buffer);
+ }
+ return result;
+}
+
+} // namespace android
diff --git a/media/libstagefright/foundation/MediaBuffer.cpp b/media/libstagefright/foundation/MediaBuffer.cpp
index 9beac05..8e245dc 100644
--- a/media/libstagefright/foundation/MediaBuffer.cpp
+++ b/media/libstagefright/foundation/MediaBuffer.cpp
@@ -72,7 +72,7 @@
}
} else {
getSharedControl()->clear();
- mData = (uint8_t *)mMemory->pointer() + sizeof(SharedControl);
+ mData = (uint8_t *)mMemory->unsecurePointer() + sizeof(SharedControl);
ALOGV("Allocated shared mem buffer of size %zu @ %p", size, mData);
}
}
diff --git a/media/libstagefright/foundation/MediaBufferGroup.cpp b/media/libstagefright/foundation/MediaBufferGroup.cpp
index 84ff9a6..3c25047 100644
--- a/media/libstagefright/foundation/MediaBufferGroup.cpp
+++ b/media/libstagefright/foundation/MediaBufferGroup.cpp
@@ -75,7 +75,7 @@
for (size_t i = 0; i < buffers; ++i) {
sp<IMemory> mem = memoryDealer->allocate(augmented_size);
- if (mem.get() == nullptr || mem->pointer() == nullptr) {
+ if (mem.get() == nullptr || mem->unsecurePointer() == nullptr) {
ALOGW("Only allocated %zu shared buffers of size %zu", i, buffer_size);
break;
}
diff --git a/media/libstagefright/foundation/MetaData.cpp b/media/libstagefright/foundation/MetaData.cpp
index 1d0a607..8174597 100644
--- a/media/libstagefright/foundation/MetaData.cpp
+++ b/media/libstagefright/foundation/MetaData.cpp
@@ -17,7 +17,6 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MetaData"
#include <inttypes.h>
-#include <binder/Parcel.h>
#include <utils/KeyedVector.h>
#include <utils/Log.h>
@@ -29,6 +28,10 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MetaData.h>
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
namespace android {
@@ -45,6 +48,7 @@
MetaData::~MetaData() {
}
+#ifndef __ANDROID_VNDK__
/* static */
sp<MetaData> MetaData::createFromParcel(const Parcel &parcel) {
@@ -52,6 +56,7 @@
meta->updateFromParcel(parcel);
return meta;
}
+#endif
} // namespace android
diff --git a/media/libstagefright/foundation/MetaDataBase.cpp b/media/libstagefright/foundation/MetaDataBase.cpp
index bfea6f1..4b439c6 100644
--- a/media/libstagefright/foundation/MetaDataBase.cpp
+++ b/media/libstagefright/foundation/MetaDataBase.cpp
@@ -17,7 +17,6 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MetaDataBase"
#include <inttypes.h>
-#include <binder/Parcel.h>
#include <utils/KeyedVector.h>
#include <utils/Log.h>
@@ -29,6 +28,10 @@
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MetaDataBase.h>
+#ifndef __ANDROID_VNDK__
+#include <binder/Parcel.h>
+#endif
+
namespace android {
struct MetaDataBase::typed_data {
@@ -449,6 +452,7 @@
}
}
+#ifndef __ANDROID_VNDK__
status_t MetaDataBase::writeToParcel(Parcel &parcel) {
status_t ret;
size_t numItems = mInternalData->mItems.size();
@@ -528,6 +532,7 @@
ALOGW("no metadata in parcel");
return UNKNOWN_ERROR;
}
+#endif
} // namespace android
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 513e41f..f5687e0 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -292,6 +292,10 @@
*opusHeadSize = data_size;
return true;
} else if (memcmp(AOPUS_CSD_MARKER_PREFIX, data, AOPUS_CSD_MARKER_PREFIX_SIZE) == 0) {
+ if (data_size < AOPUS_UNIFIED_CSD_MINSIZE || data_size > AOPUS_UNIFIED_CSD_MAXSIZE) {
+ ALOGD("Unexpected size for unified opus csd %zu", data_size);
+ return false;
+ }
size_t i = 0;
bool found = false;
while (i <= data_size - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE) {
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
index 742651e..b5d6666 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AMessage.h
@@ -63,6 +63,7 @@
AMessage();
AMessage(uint32_t what, const sp<const AHandler> &handler);
+#ifndef __ANDROID_VNDK__
// Construct an AMessage from a parcel.
// nestingAllowed determines how many levels AMessage can be nested inside
// AMessage. The default value here is arbitrarily set to 255.
@@ -87,6 +88,7 @@
// All items in the AMessage must have types that are recognized by
// FromParcel(); otherwise, TRESPASS error will occur.
void writeToParcel(Parcel *parcel) const;
+#endif
void setWhat(uint32_t what);
uint32_t what() const;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
index 0f6299c..deef0d4 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/AString.h
@@ -89,8 +89,10 @@
void tolower();
+#ifndef __ANDROID_VNDK__
static AString FromParcel(const Parcel &parcel);
status_t writeToParcel(Parcel *parcel) const;
+#endif
private:
constexpr static const char *kEmptyString = "";
diff --git a/media/libstagefright/httplive/Android.bp b/media/libstagefright/httplive/Android.bp
index c0ee14e..12e7ca6 100644
--- a/media/libstagefright/httplive/Android.bp
+++ b/media/libstagefright/httplive/Android.bp
@@ -31,6 +31,7 @@
"liblog",
"libcrypto",
"libcutils",
+ "libdatasource",
"libmedia",
"libmediandk",
"libstagefright",
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
index c7e92cd..68f1de9 100644
--- a/media/libstagefright/httplive/HTTPDownloader.cpp
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -21,13 +21,13 @@
#include "HTTPDownloader.h"
#include "M3UParser.h"
+#include <datasource/MediaHTTP.h>
+#include <datasource/FileSource.h>
#include <media/DataSource.h>
#include <media/MediaHTTPConnection.h>
#include <media/MediaHTTPService.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/ClearMediaHTTP.h>
-#include <media/stagefright/ClearFileSource.h>
#include <openssl/aes.h>
#include <openssl/md5.h>
#include <utils/Mutex.h>
@@ -38,7 +38,7 @@
HTTPDownloader::HTTPDownloader(
const sp<MediaHTTPService> &httpService,
const KeyedVector<String8, String8> &headers) :
- mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())),
+ mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
mExtraHeaders(headers),
mDisconnecting(false) {
}
@@ -91,7 +91,7 @@
if (reconnect) {
if (!strncasecmp(url, "file://", 7)) {
- mDataSource = new ClearFileSource(url + 7);
+ mDataSource = new FileSource(url + 7);
} else if (strncasecmp(url, "http://", 7)
&& strncasecmp(url, "https://", 8)) {
return ERROR_UNSUPPORTED;
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 9cf97c7..3bad015 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -34,6 +34,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <utils/Mutex.h>
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index cb97a3c..e0324e3 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -27,6 +27,7 @@
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <media/mediaplayer.h>
namespace android {
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 0950db0..fdcde29 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -28,17 +28,18 @@
#include "mpeg2ts/AnotherPacketSource.h"
#include "mpeg2ts/HlsSampleDecryptor.h"
+#include <datasource/DataURISource.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/MediaKeys.h>
#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/DataURISource.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MetaDataUtils.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <ctype.h>
#include <inttypes.h>
diff --git a/media/libstagefright/id3/Android.bp b/media/libstagefright/id3/Android.bp
index 7151d07..c8173cf 100644
--- a/media/libstagefright/id3/Android.bp
+++ b/media/libstagefright/id3/Android.bp
@@ -4,6 +4,7 @@
srcs: ["ID3.cpp"],
header_libs: [
+ "libmedia_headers",
"media_ndk_headers",
],
@@ -33,6 +34,7 @@
],
shared_libs: [
+ "libdatasource",
"libstagefright",
"libutils",
"liblog",
diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp
index 86e6adf..9984d85 100644
--- a/media/libstagefright/id3/testid3.cpp
+++ b/media/libstagefright/id3/testid3.cpp
@@ -22,7 +22,7 @@
#include <dirent.h>
#include <binder/ProcessState.h>
-#include <media/stagefright/FileSource.h>
+#include <datasource/FileSource.h>
#include <media/stagefright/foundation/ADebug.h>
#define MAXPATHLEN 256
diff --git a/media/libstagefright/include/ACodecBufferChannel.h b/media/libstagefright/include/ACodecBufferChannel.h
index 7c01e45..3a087d1 100644
--- a/media/libstagefright/include/ACodecBufferChannel.h
+++ b/media/libstagefright/include/ACodecBufferChannel.h
@@ -25,7 +25,7 @@
#include <media/openmax/OMX_Types.h>
#include <media/stagefright/CodecBase.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/IOMX.h>
namespace android {
diff --git a/media/libstagefright/include/FrameCaptureLayer.h b/media/libstagefright/include/FrameCaptureLayer.h
new file mode 100644
index 0000000..23fd5e5
--- /dev/null
+++ b/media/libstagefright/include/FrameCaptureLayer.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_CAPTURE_LAYER_H_
+#define FRAME_CAPTURE_LAYER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <gui/IConsumerListener.h>
+#include <ui/GraphicTypes.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+
+namespace android {
+
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class Rect;
+class Surface;
+
+/*
+ * This class is a simple BufferQueue consumer implementation to
+ * obtain a decoded buffer output from MediaCodec. The output
+ * buffer is then sent to FrameCaptureProcessor to be converted
+ * to sRGB properly.
+ */
+struct FrameCaptureLayer : public ConsumerListener {
+ FrameCaptureLayer();
+ ~FrameCaptureLayer() = default;
+
+ // ConsumerListener
+ void onFrameAvailable(const BufferItem& /*item*/) override;
+ void onBuffersReleased() override;
+ void onSidebandStreamChanged() override;
+
+ status_t init();
+
+ sp<Surface> getSurface() { return mSurface; }
+
+ status_t capture(const ui::PixelFormat reqPixelFormat,
+ const Rect &sourceCrop, sp<GraphicBuffer> *outBuffer);
+
+private:
+ struct BufferLayer;
+ // Note: do not hold any sp ref to GraphicBufferSource
+ // GraphicBufferSource is holding an sp to us, holding any sp ref
+ // to GraphicBufferSource will cause circular dependency and both
+ // object will not be released.
+ sp<IGraphicBufferConsumer> mConsumer;
+ sp<Surface> mSurface;
+ std::map<int32_t, sp<GraphicBuffer> > mSlotToBufferMap;
+
+ Mutex mLock;
+ Condition mCondition;
+ bool mFrameAvailable GUARDED_BY(mLock);
+
+ status_t acquireBuffer(BufferItem *bi);
+ status_t releaseBuffer(const BufferItem &bi);
+
+ DISALLOW_EVIL_CONSTRUCTORS(FrameCaptureLayer);
+};
+
+} // namespace android
+
+#endif // FRAME_CAPTURE_LAYER_H_
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
index 1af6276..353c957 100644
--- a/media/libstagefright/include/FrameDecoder.h
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -33,7 +33,6 @@
class IMediaSource;
class MediaCodecBuffer;
class Surface;
-class SurfaceControl;
class VideoFrame;
struct FrameRect {
@@ -101,6 +100,7 @@
DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
};
+struct FrameCaptureLayer;
struct VideoFrameDecoder : public FrameDecoder {
VideoFrameDecoder(
@@ -133,15 +133,16 @@
bool *done) override;
private:
- sp<SurfaceControl> mSurfaceControl;
- sp<SurfaceControl> mParent;
+ sp<FrameCaptureLayer> mCaptureLayer;
VideoFrame *mFrame;
bool mIsAvcOrHevc;
MediaSource::ReadOptions::SeekMode mSeekMode;
int64_t mTargetTimeUs;
+ List<int64_t> mSampleDurations;
+ int64_t mDefaultSampleDurationUs;
- sp<Surface> initSurfaceControl();
- status_t captureSurfaceControl();
+ sp<Surface> initSurface();
+ status_t captureSurface();
};
struct ImageDecoder : public FrameDecoder {
diff --git a/media/libstagefright/include/SecureBuffer.h b/media/libstagefright/include/SecureBuffer.h
index cf7933a..c45e0e5 100644
--- a/media/libstagefright/include/SecureBuffer.h
+++ b/media/libstagefright/include/SecureBuffer.h
@@ -18,7 +18,7 @@
#define SECURE_BUFFER_H_
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
namespace android {
diff --git a/media/libstagefright/include/media/stagefright/ACodec.h b/media/libstagefright/include/media/stagefright/ACodec.h
index 784fd36..7754de4 100644
--- a/media/libstagefright/include/media/stagefright/ACodec.h
+++ b/media/libstagefright/include/media/stagefright/ACodec.h
@@ -37,6 +37,15 @@
#define TRACK_BUFFER_TIMING 0
namespace android {
+namespace hardware {
+namespace media {
+namespace omx {
+namespace V1_0 {
+struct IGraphicBufferSource;
+} // namespace V1_0
+} // namespace omx
+} // namespace media
+} // namespace hardware
struct ABuffer;
class ACodecBufferChannel;
@@ -279,7 +288,7 @@
size_t mNumUndequeuedBuffers;
sp<DataConverter> mConverter[2];
- sp<IGraphicBufferSource> mGraphicBufferSource;
+ sp<hardware::media::omx::V1_0::IGraphicBufferSource> mGraphicBufferSource;
int64_t mRepeatFrameDelayUs;
int64_t mMaxPtsGapUs;
float mMaxFps;
@@ -496,6 +505,7 @@
AudioEncoding encoding = kAudioEncodingPcm16bit);
status_t setPriority(int32_t priority);
+ status_t setLowLatency(int32_t lowLatency);
status_t setLatency(uint32_t latency);
status_t getLatency(uint32_t *latency);
status_t setAudioPresentation(int32_t presentationId, int32_t programId);
diff --git a/media/libstagefright/include/media/stagefright/BatteryChecker.h b/media/libstagefright/include/media/stagefright/BatteryChecker.h
new file mode 100644
index 0000000..2ec4ac0
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/BatteryChecker.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BATTERY_CHECKER_H_
+#define BATTERY_CHECKER_H_
+
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+struct BatteryChecker : public RefBase {
+ BatteryChecker(const sp<AMessage> &msg, int64_t timeout = 3000000ll);
+
+ void setExecuting(bool executing) { mIsExecuting = executing; }
+ void onCodecActivity(std::function<void()> batteryOnCb);
+ void onCheckBatteryTimer(const sp<AMessage>& msg, std::function<void()> batteryOffCb);
+ void onClientRemoved();
+
+private:
+ const int64_t mTimeoutUs;
+ int64_t mLastActivityTimeUs;
+ bool mBatteryStatNotified;
+ int32_t mBatteryCheckerGeneration;
+ bool mIsExecuting;
+ sp<AMessage> mBatteryCheckerMsg;
+
+ bool isExecuting() { return mIsExecuting; }
+
+ DISALLOW_EVIL_CONSTRUCTORS(BatteryChecker);
+};
+
+} // namespace android
+
+#endif // BATTERY_CHECKER_H_
diff --git a/media/libstagefright/include/media/stagefright/NdkUtils.h b/media/libstagefright/include/media/stagefright/FoundationUtils.h
similarity index 66%
rename from media/libstagefright/include/media/stagefright/NdkUtils.h
rename to media/libstagefright/include/media/stagefright/FoundationUtils.h
index a68884a..1548981 100644
--- a/media/libstagefright/include/media/stagefright/NdkUtils.h
+++ b/media/libstagefright/include/media/stagefright/FoundationUtils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,18 +14,19 @@
* limitations under the License.
*/
-#ifndef NDK_UTILS_H_
+#ifndef FOUNDATION_UTILS_H_
-#define NDK_UTILS_H_
+#define FOUNDATION_UTILS_H_
-#include <media/stagefright/MetaData.h>
-#include <media/NdkWrapper.h>
+#include <media/stagefright/foundation/AString.h>
namespace android {
-sp<MetaData> convertMediaFormatWrapperToMetaData(
- const sp<AMediaFormatWrapper> &fmt);
+AString MakeUserAgent();
+AString uriDebugString(const AString &uri, bool incognito = false);
+
+AString nameForFd(int fd);
} // namespace android
-#endif // NDK_UTILS_H_
+#endif // FOUNDATION_UTILS_H_
diff --git a/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h b/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h
new file mode 100644
index 0000000..66e5daa
--- /dev/null
+++ b/media/libstagefright/include/media/stagefright/FrameCaptureProcessor.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_CAPTURE_PROCESSOR_H_
+#define FRAME_CAPTURE_PROCESSOR_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct AMessage;
+class GraphicBuffer;
+class Rect;
+
+namespace renderengine {
+class RenderEngine;
+struct LayerSettings;
+}
+
+/*
+ * Process a decoded graphic buffer through RenderEngine to
+ * convert it to sRGB.
+ *
+ * This class is a singleton that holds one instance of RenderEngine
+ * and its event queue (on which the GL context runs). The RenderEngine
+ * is created upon the first getInstance().
+ */
+class FrameCaptureProcessor : public AHandler {
+
+public:
+
+ struct Layer : public RefBase {
+ virtual void getLayerSettings(
+ const Rect &sourceCrop, uint32_t textureName,
+ renderengine::LayerSettings *layerSettings) = 0;
+ };
+
+ static sp<FrameCaptureProcessor> getInstance();
+
+ status_t capture(
+ const sp<Layer> &layer,
+ const Rect &sourceCrop, const sp<GraphicBuffer> &outBuffer);
+
+protected:
+ virtual ~FrameCaptureProcessor();
+ void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+ FrameCaptureProcessor();
+
+ enum {
+ kWhatCreate,
+ kWhatCapture,
+ };
+
+ static Mutex sLock;
+ static sp<FrameCaptureProcessor> sInstance GUARDED_BY(sLock);
+
+ constexpr static float sDefaultMaxLumiance = 500.0f;
+
+ status_t mInitStatus;
+ sp<ALooper> mLooper;
+ std::unique_ptr<renderengine::RenderEngine> mRE;
+ uint32_t mTextureName;
+
+ static status_t PostAndAwaitResponse(
+ const sp<AMessage> &msg, sp<AMessage> *response);
+ static void PostReplyWithError(
+ const sp<AReplyToken> &replyID, status_t err);
+
+ status_t initCheck() { return mInitStatus; }
+ void createRenderEngine();
+
+ // message handlers
+ status_t onCreate();
+ status_t onCapture(const sp<Layer> &layer,
+ const Rect &sourceCrop, const sp<GraphicBuffer> &outBuffer);
+
+ DISALLOW_EVIL_CONSTRUCTORS(FrameCaptureProcessor);
+};
+
+} // namespace android
+
+#endif // FRAME_CAPTURE_PROCESSOR_H_
diff --git a/media/libstagefright/include/media/stagefright/MediaBuffer.h b/media/libstagefright/include/media/stagefright/MediaBuffer.h
index ace63ae..9145b63 100644
--- a/media/libstagefright/include/media/stagefright/MediaBuffer.h
+++ b/media/libstagefright/include/media/stagefright/MediaBuffer.h
@@ -48,7 +48,11 @@
explicit MediaBuffer(const sp<ABuffer> &buffer);
#ifndef NO_IMEMORY
MediaBuffer(const sp<IMemory> &mem) :
- MediaBuffer((uint8_t *)mem->pointer() + sizeof(SharedControl), mem->size()) {
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ MediaBuffer((uint8_t *)mem->unsecurePointer() + sizeof(SharedControl), mem->size()) {
// delegate and override mMemory
mMemory = mem;
}
@@ -94,9 +98,13 @@
virtual int remoteRefcount() const {
#ifndef NO_IMEMORY
- if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
int32_t remoteRefcount =
- reinterpret_cast<SharedControl *>(mMemory->pointer())->getRemoteRefcount();
+ reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->getRemoteRefcount();
// Sanity check so that remoteRefCount() is non-negative.
return remoteRefcount >= 0 ? remoteRefcount : 0; // do not allow corrupted data.
#else
@@ -107,8 +115,12 @@
// returns old value
int addRemoteRefcount(int32_t value) {
#ifndef NO_IMEMORY
- if (mMemory.get() == nullptr || mMemory->pointer() == nullptr) return 0;
- return reinterpret_cast<SharedControl *>(mMemory->pointer())->addRemoteRefcount(value);
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ if (mMemory.get() == nullptr || mMemory->unsecurePointer() == nullptr) return 0;
+ return reinterpret_cast<SharedControl *>(mMemory->unsecurePointer())->addRemoteRefcount(value);
#else
(void) value;
return 0;
@@ -121,8 +133,12 @@
static bool isDeadObject(const sp<IMemory> &memory) {
#ifndef NO_IMEMORY
- if (memory.get() == nullptr || memory->pointer() == nullptr) return false;
- return reinterpret_cast<SharedControl *>(memory->pointer())->isDeadObject();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ if (memory.get() == nullptr || memory->unsecurePointer() == nullptr) return false;
+ return reinterpret_cast<SharedControl *>(memory->unsecurePointer())->isDeadObject();
#else
(void) memory;
return false;
@@ -220,7 +236,11 @@
inline SharedControl *getSharedControl() const {
#ifndef NO_IMEMORY
- return reinterpret_cast<SharedControl *>(mMemory->pointer());
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ return reinterpret_cast<SharedControl *>(mMemory->unsecurePointer());
#else
return nullptr;
#endif
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index 462eb84e..78d00b1 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -24,8 +24,7 @@
#include <gui/IGraphicBufferProducer.h>
#include <media/hardware/CryptoAPI.h>
#include <media/MediaCodecInfo.h>
-#include <media/MediaResource.h>
-#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/FrameRenderTracker.h>
#include <utils/Vector.h>
@@ -36,14 +35,13 @@
struct AMessage;
struct AReplyToken;
struct AString;
+struct BatteryChecker;
class BufferChannelBase;
struct CodecBase;
class IBatteryStats;
struct ICrypto;
class MediaCodecBuffer;
class IMemory;
-class IResourceManagerClient;
-class IResourceManagerService;
struct PersistentSurface;
class SoftwareRenderer;
class Surface;
@@ -53,7 +51,13 @@
namespace V1_0 {
struct IDescrambler;
}}}}
+namespace media {
+class IResourceManagerClient;
+class MediaResourceParcel;
+}
using hardware::cas::native::V1_0::IDescrambler;
+using media::IResourceManagerClient;
+using media::MediaResourceParcel;
struct MediaCodec : public AHandler {
enum ConfigureFlags {
@@ -188,7 +192,7 @@
status_t getCodecInfo(sp<MediaCodecInfo> *codecInfo) const;
- status_t getMetrics(MediaAnalyticsItem * &reply);
+ status_t getMetrics(mediametrics_handle_t &reply);
status_t setParameters(const sp<AMessage> ¶ms);
@@ -283,34 +287,7 @@
bool mOwnedByClient;
};
- struct ResourceManagerServiceProxy : public IBinder::DeathRecipient {
- ResourceManagerServiceProxy(pid_t pid, uid_t uid);
- ~ResourceManagerServiceProxy();
-
- void init();
-
- // implements DeathRecipient
- virtual void binderDied(const wp<IBinder>& /*who*/);
-
- void addResource(
- int64_t clientId,
- const sp<IResourceManagerClient> &client,
- const Vector<MediaResource> &resources);
-
- void removeResource(
- int64_t clientId,
- const Vector<MediaResource> &resources);
-
- void removeClient(int64_t clientId);
-
- bool reclaimResource(const Vector<MediaResource> &resources);
-
- private:
- Mutex mLock;
- sp<IResourceManagerService> mService;
- pid_t mPid;
- uid_t mUid;
- };
+ struct ResourceManagerServiceProxy;
State mState;
uid_t mUid;
@@ -327,11 +304,11 @@
sp<Surface> mSurface;
SoftwareRenderer *mSoftRenderer;
- MediaAnalyticsItem *mAnalyticsItem;
- void initAnalyticsItem();
- void updateAnalyticsItem();
- void flushAnalyticsItem();
- void updateEphemeralAnalytics(MediaAnalyticsItem *item);
+ mediametrics_handle_t mMetricsHandle;
+ void initMediametrics();
+ void updateMediametrics();
+ void flushMediametrics();
+ void updateEphemeralMediametrics(mediametrics_handle_t item);
sp<AMessage> mOutputFormat;
sp<AMessage> mInputFormat;
@@ -433,8 +410,8 @@
bool isExecuting() const;
uint64_t getGraphicBufferSize();
- void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
- void removeResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
+ void addResource(const MediaResourceParcel &resource);
+ void removeResource(const MediaResourceParcel &resource);
void requestCpuBoostIfNeeded();
bool hasPendingBuffer(int portIndex);
@@ -463,11 +440,7 @@
Mutex mLatencyLock;
int64_t mLatencyUnknown; // buffers for which we couldn't calculate latency
- int64_t mLastActivityTimeUs;
- bool mBatteryStatNotified;
- int32_t mBatteryCheckerGeneration;
- void onBatteryChecker(const sp<AMessage>& msg);
- void scheduleBatteryCheckerIfNeeded();
+ sp<BatteryChecker> mBatteryChecker;
void statsBufferSent(int64_t presentationUs);
void statsBufferReceived(int64_t presentationUs);
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 50d7724..16e207d 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -510,22 +510,24 @@
constexpr int32_t DolbyVisionProfileDvheStn = 0x20;
constexpr int32_t DolbyVisionProfileDvheDth = 0x40;
constexpr int32_t DolbyVisionProfileDvheDtb = 0x80;
-constexpr int32_t DolbyVisionProfileDvheSt = 0x100;
-constexpr int32_t DolbyVisionProfileDvavSe = 0x200;
+constexpr int32_t DolbyVisionProfileDvheSt = 0x100;
+constexpr int32_t DolbyVisionProfileDvavSe = 0x200;
+constexpr int32_t DolbyVisionProfileDvav110 = 0x400;
inline static const char *asString_DolbyVisionProfile(int32_t i, const char *def = "??") {
switch (i) {
- case DolbyVisionProfileDvavPer: return "DvavPer";
- case DolbyVisionProfileDvavPen: return "DvavPen";
- case DolbyVisionProfileDvheDer: return "DvheDer";
- case DolbyVisionProfileDvheDen: return "DvheDen";
- case DolbyVisionProfileDvheDtr: return "DvheDtr";
- case DolbyVisionProfileDvheStn: return "DvheStn";
- case DolbyVisionProfileDvheDth: return "DvheDth";
- case DolbyVisionProfileDvheDtb: return "DvheDtb";
- case DolbyVisionProfileDvheSt: return "DvheSt";
- case DolbyVisionProfileDvavSe: return "DvavSe";
- default: return def;
+ case DolbyVisionProfileDvavPer: return "DvavPer";
+ case DolbyVisionProfileDvavPen: return "DvavPen";
+ case DolbyVisionProfileDvheDer: return "DvheDer";
+ case DolbyVisionProfileDvheDen: return "DvheDen";
+ case DolbyVisionProfileDvheDtr: return "DvheDtr";
+ case DolbyVisionProfileDvheStn: return "DvheStn";
+ case DolbyVisionProfileDvheDth: return "DvheDth";
+ case DolbyVisionProfileDvheDtb: return "DvheDtb";
+ case DolbyVisionProfileDvheSt: return "DvheSt";
+ case DolbyVisionProfileDvavSe: return "DvavSe";
+ case DolbyVisionProfileDvav110: return "Dav110";
+ default: return def;
}
}
@@ -774,6 +776,7 @@
constexpr char KEY_LANGUAGE[] = "language";
constexpr char KEY_LATENCY[] = "latency";
constexpr char KEY_LEVEL[] = "level";
+constexpr char KEY_LOW_LATENCY[] = "low-latency";
constexpr char KEY_MAX_B_FRAMES[] = "max-bframes";
constexpr char KEY_MAX_BIT_RATE[] = "max-bitrate";
constexpr char KEY_MAX_FPS_TO_ENCODER[] = "max-fps-to-encoder";
diff --git a/media/libstagefright/include/media/stagefright/MediaErrors.h b/media/libstagefright/include/media/stagefright/MediaErrors.h
index 09639e2..6f48c5d 100644
--- a/media/libstagefright/include/media/stagefright/MediaErrors.h
+++ b/media/libstagefright/include/media/stagefright/MediaErrors.h
@@ -99,7 +99,13 @@
ERROR_CAS_DEVICE_REVOKED = CAS_ERROR_BASE - 9,
ERROR_CAS_RESOURCE_BUSY = CAS_ERROR_BASE - 10,
ERROR_CAS_INSUFFICIENT_OUTPUT_PROTECTION = CAS_ERROR_BASE - 11,
- ERROR_CAS_LAST_USED_ERRORCODE = CAS_ERROR_BASE - 11,
+ ERROR_CAS_NEED_ACTIVATION = CAS_ERROR_BASE - 12,
+ ERROR_CAS_NEED_PAIRING = CAS_ERROR_BASE - 13,
+ ERROR_CAS_NO_CARD = CAS_ERROR_BASE - 14,
+ ERROR_CAS_CARD_MUTE = CAS_ERROR_BASE - 15,
+ ERROR_CAS_CARD_INVALID = CAS_ERROR_BASE - 16,
+ ERROR_CAS_BLACKOUT = CAS_ERROR_BASE - 17,
+ ERROR_CAS_LAST_USED_ERRORCODE = CAS_ERROR_BASE - 17,
ERROR_CAS_VENDOR_MAX = CAS_ERROR_BASE - 500,
ERROR_CAS_VENDOR_MIN = CAS_ERROR_BASE - 999,
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
index f625358..68adf346 100644
--- a/media/libstagefright/include/media/stagefright/MetaData.h
+++ b/media/libstagefright/include/media/stagefright/MetaData.h
@@ -41,7 +41,9 @@
friend class BnMediaSource;
friend class BpMediaSource;
friend class BpMediaExtractor;
+#ifndef __ANDROID_VNDK__
static sp<MetaData> createFromParcel(const Parcel &parcel);
+#endif
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 8dc2dd5..e17093a 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -59,6 +59,7 @@
kKeyAACProfile = 'aacp', // int32_t
kKeyAVCC = 'avcc', // raw data
kKeyHVCC = 'hvcc', // raw data
+ kKeyDVCC = 'dvcc', // raw data
kKeyAV1C = 'av1c', // raw data
kKeyThumbnailHVCC = 'thvc', // raw data
kKeyD263 = 'd263', // raw data
@@ -245,6 +246,7 @@
kTypeAVCC = 'avcc',
kTypeHVCC = 'hvcc',
kTypeAV1C = 'av1c',
+ kTypeDVCC = 'dvcc',
kTypeD263 = 'd263',
};
@@ -319,8 +321,10 @@
struct Rect;
struct MetaDataInternal;
MetaDataInternal *mInternalData;
+#ifndef __ANDROID_VNDK__
status_t writeToParcel(Parcel &parcel);
status_t updateFromParcel(const Parcel &parcel);
+#endif
};
} // namespace android
diff --git a/media/libstagefright/include/media/stagefright/PersistentSurface.h b/media/libstagefright/include/media/stagefright/PersistentSurface.h
index 49b36c9..f4943c3 100644
--- a/media/libstagefright/include/media/stagefright/PersistentSurface.h
+++ b/media/libstagefright/include/media/stagefright/PersistentSurface.h
@@ -18,31 +18,21 @@
#define PERSISTENT_SURFACE_H_
-#include <android/IGraphicBufferSource.h>
#include <binder/Parcel.h>
#include <hidl/HidlSupport.h>
#include <hidl/HybridInterface.h>
#include <gui/IGraphicBufferProducer.h>
#include <media/stagefright/foundation/ABase.h>
-using android::hidl::base::V1_0::IBase;
-
namespace android {
struct PersistentSurface : public RefBase {
PersistentSurface() {}
- // create an OMX persistent surface
+ // create a persistent surface
PersistentSurface(
const sp<IGraphicBufferProducer>& bufferProducer,
- const sp<IGraphicBufferSource>& bufferSource) :
- mBufferProducer(bufferProducer),
- mBufferSource(bufferSource) { }
-
- // create a HIDL persistent surface
- PersistentSurface(
- const sp<IGraphicBufferProducer>& bufferProducer,
- const sp<IBase>& hidlTarget) :
+ const sp<hidl::base::V1_0::IBase>& hidlTarget) :
mBufferProducer(bufferProducer),
mHidlTarget(hidlTarget) { }
@@ -50,18 +40,12 @@
return mBufferProducer;
}
- sp<IGraphicBufferSource> getBufferSource() const {
- return mBufferSource;
- }
-
- sp<IBase> getHidlTarget() const {
+ sp<hidl::base::V1_0::IBase> getHidlTarget() const {
return mHidlTarget;
}
status_t writeToParcel(Parcel *parcel) const {
parcel->writeStrongBinder(IInterface::asBinder(mBufferProducer));
- // this can handle null
- parcel->writeStrongBinder(IInterface::asBinder(mBufferSource));
// write hidl target
if (mHidlTarget != nullptr) {
HalToken token;
@@ -79,8 +63,6 @@
status_t readFromParcel(const Parcel *parcel) {
mBufferProducer = interface_cast<IGraphicBufferProducer>(
parcel->readStrongBinder());
- mBufferSource = interface_cast<IGraphicBufferSource>(
- parcel->readStrongBinder());
// read hidl target
bool haveHidlTarget = parcel->readBool();
if (haveHidlTarget) {
@@ -97,8 +79,7 @@
private:
sp<IGraphicBufferProducer> mBufferProducer;
- sp<IGraphicBufferSource> mBufferSource;
- sp<IBase> mHidlTarget;
+ sp<hidl::base::V1_0::IBase> mHidlTarget;
DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
};
diff --git a/media/libstagefright/include/media/stagefright/RemoteDataSource.h b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
index e191e6a..b634505 100644
--- a/media/libstagefright/include/media/stagefright/RemoteDataSource.h
+++ b/media/libstagefright/include/media/stagefright/RemoteDataSource.h
@@ -48,7 +48,7 @@
if (size > kBufferSize) {
size = kBufferSize;
}
- return mSource->readAt(offset, mMemory->pointer(), size);
+ return mSource->readAt(offset, mMemory->unsecurePointer(), size);
}
virtual status_t getSize(off64_t *size) {
return mSource->getSize(size);
diff --git a/media/libstagefright/include/media/stagefright/Utils.h b/media/libstagefright/include/media/stagefright/Utils.h
index e8e0a11..2b9b759 100644
--- a/media/libstagefright/include/media/stagefright/Utils.h
+++ b/media/libstagefright/include/media/stagefright/Utils.h
@@ -41,8 +41,6 @@
// TODO: combine this with avc_utils::getNextNALUnit
const uint8_t *findNextNalStartCode(const uint8_t *data, size_t length);
-AString MakeUserAgent();
-
// Convert a MIME type to a AudioSystem::audio_format
status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime);
@@ -60,8 +58,6 @@
bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo,
bool isStreaming, audio_stream_type_t streamType);
-AString uriDebugString(const AString &uri, bool incognito = false);
-
struct HLSTime {
int32_t mSeq;
int64_t mTimeUs;
@@ -85,7 +81,6 @@
void writeToAMessage(const sp<AMessage> &msg, const BufferingSettings &buffering);
void readFromAMessage(const sp<AMessage> &msg, BufferingSettings *buffering /* nonnull */);
-AString nameForFd(int fd);
} // namespace android
#endif // UTILS_H_
diff --git a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
index ed272bb..7d217eb 100644
--- a/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/1.0/WGraphicBufferSource.cpp
@@ -14,6 +14,10 @@
* limitations under the License.
*/
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
//#define LOG_NDEBUG 0
#define LOG_TAG "TWGraphicBufferSource"
@@ -21,6 +25,7 @@
#include <media/stagefright/omx/1.0/WOmxNode.h>
#include <media/stagefright/omx/1.0/Conversion.h>
#include <media/stagefright/omx/OMXUtils.h>
+#include <media/stagefright/omx/OmxGraphicBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <media/openmax/OMX_Component.h>
diff --git a/media/libstagefright/omx/Android.bp b/media/libstagefright/omx/Android.bp
index 7d03d98..7d612b4 100644
--- a/media/libstagefright/omx/Android.bp
+++ b/media/libstagefright/omx/Android.bp
@@ -45,7 +45,6 @@
"libdl",
"libhidlbase",
"libhidlmemory",
- "libhidltransport",
"libvndksupport",
"android.hardware.media.omx@1.0",
"android.hardware.graphics.bufferqueue@1.0",
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index ddb4ba0..e1c3916 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -128,7 +128,7 @@
}
OMX_U8 *getPointer() {
- return mMem.get() ? static_cast<OMX_U8*>(mMem->pointer()) :
+ return mMem.get() ? static_cast<OMX_U8*>(mMem->unsecurePointer()) :
mHidlMemory.get() ? static_cast<OMX_U8*>(
static_cast<void*>(mHidlMemory->getPointer())) : nullptr;
}
@@ -1173,7 +1173,11 @@
return BAD_VALUE;
}
if (params != NULL) {
- paramsPointer = params->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ paramsPointer = params->unsecurePointer();
paramsSize = params->size();
} else if (hParams != NULL) {
paramsPointer = hParams->getPointer();
diff --git a/media/libstagefright/omx/OmxGraphicBufferSource.cpp b/media/libstagefright/omx/OmxGraphicBufferSource.cpp
index 8de1f4f..7b187f9 100644
--- a/media/libstagefright/omx/OmxGraphicBufferSource.cpp
+++ b/media/libstagefright/omx/OmxGraphicBufferSource.cpp
@@ -14,12 +14,18 @@
* limitations under the License.
*/
+#ifdef __LP64__
+#define OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+#endif
+
#include <inttypes.h>
#define LOG_TAG "OmxGraphicBufferSource"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <media/openmax/OMX_Core.h>
+
#include <media/stagefright/bqhelper/ComponentWrapper.h>
#include <media/stagefright/bqhelper/GraphicBufferSource.h>
#include <media/stagefright/omx/OmxGraphicBufferSource.h>
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
index 9669677..264c01d 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/Conversion.h
@@ -48,7 +48,6 @@
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
#include <android/hardware/media/omx/1.0/IOmxObserver.h>
-#include <android/IGraphicBufferSource.h>
#include <android/IOMXBufferSource.h>
namespace android {
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
index 4e56c98..02d4b7b 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/1.0/WGraphicBufferSource.h
@@ -26,18 +26,16 @@
#include <android/hardware/media/omx/1.0/IOmxNode.h>
#include <android/hardware/media/omx/1.0/IGraphicBufferSource.h>
-#include <android/BnGraphicBufferSource.h>
-
-#include <media/stagefright/omx/OmxGraphicBufferSource.h>
-
namespace android {
+
+class OmxGraphicBufferSource;
+
namespace hardware {
namespace media {
namespace omx {
namespace V1_0 {
namespace implementation {
-using ::android::OmxGraphicBufferSource;
using ::android::hardware::graphics::common::V1_0::Dataspace;
using ::android::hardware::media::omx::V1_0::ColorAspects;
using ::android::hardware::media::omx::V1_0::IGraphicBufferSource;
@@ -52,8 +50,6 @@
using ::android::hardware::Void;
using ::android::sp;
-using ::android::IOMXNode;
-
/**
* Wrapper classes for conversion
* ==============================
diff --git a/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h b/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h
index 518e0cb..e576d75 100644
--- a/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h
+++ b/media/libstagefright/omx/include/media/stagefright/omx/OmxGraphicBufferSource.h
@@ -21,7 +21,6 @@
#include <media/stagefright/bqhelper/GraphicBufferSource.h>
#include <media/stagefright/foundation/ABase.h>
-#include <android/BnGraphicBufferSource.h>
#include <android/BnOMXBufferSource.h>
#include "IOmxNodeWrapper.h"
diff --git a/media/libstagefright/omx/tests/Android.bp b/media/libstagefright/omx/tests/Android.bp
index 569fa88..eb01543 100644
--- a/media/libstagefright/omx/tests/Android.bp
+++ b/media/libstagefright/omx/tests/Android.bp
@@ -7,6 +7,7 @@
shared_libs: [
"libstagefright",
"libbinder",
+ "libdatasource",
"libmedia",
"libmedia_omx",
"libutils",
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index cc8c234..6848a83 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -27,13 +27,13 @@
#include <binder/ProcessState.h>
#include <binder/IServiceManager.h>
#include <cutils/properties.h>
+#include <datasource/DataSourceFactory.h>
#include <media/DataSource.h>
#include <media/IMediaHTTPService.h>
#include <media/MediaSource.h>
#include <media/OMXBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
@@ -278,7 +278,7 @@
static sp<IMediaExtractor> CreateExtractorFromURI(const char *uri) {
sp<DataSource> source =
- DataSourceFactory::CreateFromURI(NULL /* httpService */, uri);
+ DataSourceFactory::getInstance()->CreateFromURI(NULL /* httpService */, uri);
if (source == NULL) {
return NULL;
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 789e62a..cac1af9 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -21,12 +21,14 @@
#include "ARTSPConnection.h"
#include "NetworkUtils.h"
+#include <datasource/HTTPBase.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/base64.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <arpa/inet.h>
#include <fcntl.h>
@@ -34,7 +36,6 @@
#include <openssl/md5.h>
#include <sys/socket.h>
-#include "include/HTTPBase.h"
namespace android {
diff --git a/media/libstagefright/rtsp/Android.bp b/media/libstagefright/rtsp/Android.bp
index 9bc9c89..a5a895e 100644
--- a/media/libstagefright/rtsp/Android.bp
+++ b/media/libstagefright/rtsp/Android.bp
@@ -21,6 +21,7 @@
shared_libs: [
"libcrypto",
+ "libdatasource",
"libmedia",
],
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 85ffba2..7f025a5 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -36,18 +36,19 @@
#include <ctype.h>
#include <cutils/properties.h>
+#include <datasource/HTTPBase.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <netdb.h>
-#include "HTTPBase.h"
#if LOG_NDEBUG
#define UNUSED_UNLESS_VERBOSE(x) (void)(x)
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 665d51a..e236267 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -22,12 +22,13 @@
#include "ASessionDescription.h"
+#include <datasource/MediaHTTP.h>
#include <media/MediaHTTPConnection.h>
#include <media/MediaHTTPService.h>
-#include <media/stagefright/ClearMediaHTTP.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/Utils.h>
+#include <media/stagefright/FoundationUtils.h>
#define DEFAULT_SDP_SIZE 100000
@@ -41,7 +42,7 @@
mFlags(flags),
mNetLooper(new ALooper),
mCancelled(false),
- mHTTPDataSource(new ClearMediaHTTP(httpService->makeHTTPConnection())) {
+ mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())) {
mNetLooper->setName("sdp net");
mNetLooper->start(false /* runOnCallingThread */,
false /* canCallJava */,
diff --git a/media/libstagefright/tests/Android.bp b/media/libstagefright/tests/Android.bp
index be10fdc..a7f94c1 100644
--- a/media/libstagefright/tests/Android.bp
+++ b/media/libstagefright/tests/Android.bp
@@ -27,3 +27,21 @@
"-Wall",
],
}
+
+cc_test {
+ name: "BatteryChecker_test",
+ srcs: ["BatteryChecker_test.cpp"],
+ test_suites: ["device-tests"],
+
+ shared_libs: [
+ "libstagefright",
+ "libstagefright_foundation",
+ "libutils",
+ "liblog",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
\ No newline at end of file
diff --git a/media/libstagefright/tests/BatteryChecker_test.cpp b/media/libstagefright/tests/BatteryChecker_test.cpp
new file mode 100644
index 0000000..0c5ee9b
--- /dev/null
+++ b/media/libstagefright/tests/BatteryChecker_test.cpp
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "BatteryChecker_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include <media/stagefright/BatteryChecker.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <vector>
+
+namespace android {
+
+static const int kBatteryTimeoutUs = 1000000ll; // 1 seconds
+static const int kTestMarginUs = 50000ll; // 50ms
+static const int kWaitStatusChangeUs = kBatteryTimeoutUs + kTestMarginUs;
+static const int kSparseFrameIntervalUs = kBatteryTimeoutUs - kTestMarginUs;
+
+class BatteryCheckerTestHandler : public AHandler {
+ enum EventType {
+ // Events simulating MediaCodec
+ kWhatStart = 0, // codec entering executing state
+ kWhatStop, // codec exiting executing state
+ kWhatActivity, // codec queue input or dequeue output
+ kWhatReleased, // codec released
+ kWhatCheckpoint, // test checkpoing with expected values on On/Off
+
+ // Message for battery checker monitor (not for testing through runTest())
+ kWhatBatteryChecker,
+ };
+
+ struct Operation {
+ int32_t event;
+ int64_t delay = 0;
+ uint32_t repeatCount = 0;
+ int32_t expectedOnCounter = 0;
+ int32_t expectedOffCounter = 0;
+ };
+
+ std::vector<Operation> mOps;
+ sp<BatteryChecker> mBatteryChecker;
+ int32_t mOnCounter;
+ int32_t mOffCounter;
+ Condition mDone;
+ Mutex mLock;
+
+ BatteryCheckerTestHandler() : mOnCounter(0), mOffCounter(0) {}
+
+ void runTest(const std::vector<Operation> &ops, int64_t timeoutUs) {
+ mOps = ops;
+
+ mBatteryChecker = new BatteryChecker(
+ new AMessage(kWhatBatteryChecker, this), kBatteryTimeoutUs);
+
+ (new AMessage(ops[0].event, this))->post();
+
+ // wait for done
+ AutoMutex lock(mLock);
+ EXPECT_NE(TIMED_OUT, mDone.waitRelative(mLock, timeoutUs * 1000ll));
+ }
+
+ virtual void onMessageReceived(const sp<AMessage> &msg);
+
+ friend class BatteryCheckerTest;
+};
+
+class BatteryCheckerTest : public ::testing::Test {
+public:
+ BatteryCheckerTest()
+ : mLooper(new ALooper)
+ , mHandler(new BatteryCheckerTestHandler()) {
+ mLooper->setName("BatterCheckerLooper");
+ mLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+ mLooper->registerHandler(mHandler);
+ }
+
+protected:
+ using EventType = BatteryCheckerTestHandler::EventType;
+ using Operation = BatteryCheckerTestHandler::Operation;
+
+ virtual ~BatteryCheckerTest() {
+ mLooper->stop();
+ mLooper->unregisterHandler(mHandler->id());
+ }
+
+ void runTest(const std::vector<Operation> &ops, int64_t timeoutUs) {
+ mHandler->runTest(ops, timeoutUs);
+ }
+
+ sp<ALooper> mLooper;
+ sp<BatteryCheckerTestHandler> mHandler;
+};
+
+void BatteryCheckerTestHandler::onMessageReceived(const sp<AMessage> &msg) {
+ switch(msg->what()) {
+ case kWhatStart:
+ mBatteryChecker->setExecuting(true);
+ break;
+ case kWhatStop:
+ mBatteryChecker->setExecuting(false);
+ break;
+ case kWhatActivity:
+ mBatteryChecker->onCodecActivity([this] () { mOnCounter++; });
+ break;
+ case kWhatReleased:
+ mBatteryChecker->onClientRemoved();
+ break;
+ case kWhatBatteryChecker:
+ mBatteryChecker->onCheckBatteryTimer(msg, [this] () { mOffCounter++; });
+ break;
+ case kWhatCheckpoint:
+ // verify ON/OFF state and total events
+ EXPECT_EQ(mOnCounter, mOps[0].expectedOnCounter);
+ EXPECT_EQ(mOffCounter, mOps[0].expectedOffCounter);
+ break;
+ default:
+ TRESPASS();
+ }
+ if (msg->what() != kWhatBatteryChecker) {
+ EXPECT_EQ(msg->what(), mOps[0].event);
+ // post next message
+ if (!mOps[0].repeatCount) {
+ mOps.erase(mOps.begin());
+ } else {
+ mOps[0].repeatCount--;
+ }
+ int64_t duration = mOps[0].delay;
+ if (!mOps.empty()) {
+ (new AMessage(mOps[0].event, this))->post(duration);
+ } else {
+ AutoMutex lock(mLock);
+ mDone.signal();
+ }
+ }
+}
+
+TEST_F(BatteryCheckerTest, testNormalOperations) {
+ runTest({
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 0, 0},
+ {EventType::kWhatActivity, 33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0}, // ON
+ {EventType::kWhatActivity, 33333ll, 2*kWaitStatusChangeUs/33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+ {EventType::kWhatCheckpoint, kWaitStatusChangeUs, 0, 1, 1}, // OFF
+ }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testPauseResume) {
+ runTest({
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 0, 0},
+ {EventType::kWhatActivity, 33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0}, // ON
+ {EventType::kWhatCheckpoint, kWaitStatusChangeUs, 0, 1, 1}, // OFF
+ {EventType::kWhatActivity, 33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 2, 1}, // ON
+ {EventType::kWhatCheckpoint, kWaitStatusChangeUs, 0, 2, 2}, // OFF
+ }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testClientRemovedAndRestart) {
+ runTest({
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatActivity, 33333ll, kWaitStatusChangeUs/33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+
+ // stop executing state itself shouldn't trigger any calls
+ {EventType::kWhatStop, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+
+ // release shouldn't trigger any calls either,
+ // client resource will be removed entirely
+ {EventType::kWhatReleased, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+ {EventType::kWhatCheckpoint, kWaitStatusChangeUs, 0, 1, 0},
+
+ // start pushing buffers again, On should be received without any Off
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatActivity, 33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 2, 0},
+
+ // double check that only new checker msg triggers OFF,
+ // left-over checker msg from stale generate discarded
+ {EventType::kWhatCheckpoint, kWaitStatusChangeUs, 0, 2, 1},
+ }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testActivityWhileNotExecuting) {
+ runTest({
+ // activity before start shouldn't trigger
+ {EventType::kWhatActivity, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 0, 0},
+
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 0, 0},
+
+ // activity after start before stop should trigger
+ {EventType::kWhatActivity, 33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+
+ // stop executing state itself shouldn't trigger any calls
+ {EventType::kWhatStop, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+
+ // keep pushing another 3 seconds after stop, expected to OFF
+ {EventType::kWhatActivity, 33333ll, kWaitStatusChangeUs/33333ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 1},
+ }, 10000000ll);
+}
+
+TEST_F(BatteryCheckerTest, testSparseActivity) {
+ runTest({
+ {EventType::kWhatStart, 0ll},
+ {EventType::kWhatCheckpoint, 0ll, 0, 0, 0},
+
+ // activity arrives sparsely with interval only slightly small than timeout
+ // should only trigger 1 ON
+ {EventType::kWhatActivity, kSparseFrameIntervalUs, 2},
+ {EventType::kWhatCheckpoint, 0ll, 0, 1, 0},
+ {EventType::kWhatCheckpoint, kSparseFrameIntervalUs, 0, 1, 0},
+ {EventType::kWhatCheckpoint, kTestMarginUs, 0, 1, 1}, // OFF
+ }, 10000000ll);
+}
+} // namespace android
diff --git a/media/libstagefright/timedtext/Android.bp b/media/libstagefright/timedtext/Android.bp
index 6935655..4f4ceb1 100644
--- a/media/libstagefright/timedtext/Android.bp
+++ b/media/libstagefright/timedtext/Android.bp
@@ -23,32 +23,4 @@
shared_libs: ["libmedia"],
}
-cc_library_static {
- name: "libstagefright_timedtext2",
- srcs: ["TextDescriptions2.cpp"],
-
- static_libs: [
- "libmediaplayer2-protos",
- "libprotobuf-cpp-lite",
- ],
-
- cflags: [
- "-Wno-multichar",
- "-Werror",
- "-Wall",
- ],
-
- sanitize: {
- misc_undefined: [
- "signed-integer-overflow",
- ],
- cfi: true,
- },
-
- include_dirs: [
- "frameworks/av/media/libstagefright",
- ],
-
- shared_libs: ["libmedia"],
-}
diff --git a/media/libstagefright/timedtext/TextDescriptions2.cpp b/media/libstagefright/timedtext/TextDescriptions2.cpp
deleted file mode 100644
index fd42d3a..0000000
--- a/media/libstagefright/timedtext/TextDescriptions2.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TextDescriptions2.h"
-#include <media/stagefright/foundation/ByteUtils.h>
-#include <media/stagefright/MediaErrors.h>
-
-namespace android {
-
-TextDescriptions2::TextDescriptions2() {
-}
-
-status_t TextDescriptions2::getPlayerMessageOfDescriptions(
- const uint8_t *data, ssize_t size,
- uint32_t flags, int timeMs, PlayerMessage *playerMsg) {
- if (flags & IN_BAND_TEXT_3GPP) {
- if (flags & GLOBAL_DESCRIPTIONS) {
- return extract3GPPGlobalDescriptions(data, size, playerMsg);
- } else if (flags & LOCAL_DESCRIPTIONS) {
- return extract3GPPLocalDescriptions(data, size, timeMs, playerMsg);
- }
- } else if (flags & OUT_OF_BAND_TEXT_SRT) {
- if (flags & LOCAL_DESCRIPTIONS) {
- return extractSRTLocalDescriptions(data, size, timeMs, playerMsg);
- }
- }
-
- return ERROR_UNSUPPORTED;
-}
-
-// Parse the SRT text sample, and store the timing and text sample in a PlayerMessage.
-// The PlayerMessage will be sent to MediaPlayer2.java through event, and will be
-// parsed in TimedText.java.
-status_t TextDescriptions2::extractSRTLocalDescriptions(
- const uint8_t *data, ssize_t size, int timeMs, PlayerMessage *playerMsg) {
- playerMsg->add_values()->set_int32_value(KEY_LOCAL_SETTING);
- playerMsg->add_values()->set_int32_value(KEY_START_TIME);
- playerMsg->add_values()->set_int32_value(timeMs);
-
- playerMsg->add_values()->set_int32_value(KEY_STRUCT_TEXT);
- playerMsg->add_values()->set_bytes_value(data, size);
-
- return OK;
-}
-
-// Extract the local 3GPP display descriptions. 3GPP local descriptions
-// are appended to the text sample if any.
-status_t TextDescriptions2::extract3GPPLocalDescriptions(
- const uint8_t *data, ssize_t size,
- int timeMs, PlayerMessage *playerMsg) {
-
- playerMsg->add_values()->set_int32_value(KEY_LOCAL_SETTING);
-
- // write start time to display this text sample
- playerMsg->add_values()->set_int32_value(KEY_START_TIME);
- playerMsg->add_values()->set_int32_value(timeMs);
-
- if (size < 2) {
- return OK;
- }
- ssize_t textLen = (*data) << 8 | (*(data + 1));
-
- if (size < textLen + 2) {
- return OK;
- }
-
- // write text sample length and text sample itself
- playerMsg->add_values()->set_int32_value(KEY_STRUCT_TEXT);
- playerMsg->add_values()->set_bytes_value(data + 2, textLen);
-
- if (size > textLen + 2) {
- data += (textLen + 2);
- size -= (textLen + 2);
- } else {
- return OK;
- }
-
- while (size >= 8) {
- const uint8_t *tmpData = data;
- ssize_t chunkSize = U32_AT(tmpData); // size includes size and type
- uint32_t chunkType = U32_AT(tmpData + 4);
-
- if (chunkSize <= 8 || chunkSize > size) {
- return OK;
- }
-
- size_t remaining = chunkSize - 8;
-
- tmpData += 8;
-
- switch(chunkType) {
- // 'tbox' box to indicate the position of the text with values
- // of top, left, bottom and right
- case FOURCC('t', 'b', 'o', 'x'):
- {
- if (remaining < 8) {
- return OK;
- }
- playerMsg->add_values()->set_int32_value(KEY_STRUCT_TEXT_POS);
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 2));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 4));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 6));
-
- tmpData += 8;
- remaining -= 8;
- break;
- }
- default:
- {
- break;
- }
- }
-
- data += chunkSize;
- size -= chunkSize;
- }
-
- return OK;
-}
-
-// To extract box 'tx3g' defined in 3GPP TS 26.245, and store it in a PlayerMessage
-status_t TextDescriptions2::extract3GPPGlobalDescriptions(
- const uint8_t *data, ssize_t size, PlayerMessage *playerMsg) {
-
- playerMsg->add_values()->set_int32_value(KEY_GLOBAL_SETTING);
-
- while (size >= 8) {
- ssize_t chunkSize = U32_AT(data);
- uint32_t chunkType = U32_AT(data + 4);
- const uint8_t *tmpData = data;
- tmpData += 8;
- size_t remaining = size - 8;
-
- if (chunkSize <= 8 || size < chunkSize) {
- return OK;
- }
- switch(chunkType) {
- case FOURCC('t', 'x', '3', 'g'):
- {
- if (remaining < 18) {
- return OK;
- }
- // Skip DISPLAY_FLAGS, STRUCT_JUSTIFICATION, and BACKGROUND_COLOR_RGBA
- tmpData += 18;
- remaining -= 18;
-
- if (remaining < 8) {
- return OK;
- }
- playerMsg->add_values()->set_int32_value(KEY_STRUCT_TEXT_POS);
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 2));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 4));
- playerMsg->add_values()->set_int32_value(U16_AT(tmpData + 6));
-
- tmpData += 8;
- remaining -= 18;
- // Ignore remaining data.
- break;
- }
- default:
- {
- break;
- }
- }
-
- data += chunkSize;
- size -= chunkSize;
- }
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/timedtext/TextDescriptions2.h b/media/libstagefright/timedtext/TextDescriptions2.h
deleted file mode 100644
index 7c7d2d0..0000000
--- a/media/libstagefright/timedtext/TextDescriptions2.h
+++ /dev/null
@@ -1,88 +0,0 @@
- /*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef TEXT_DESCRIPTIONS2_H_
-
-#define TEXT_DESCRIPTIONS2_H_
-
-#include <binder/Parcel.h>
-#include <media/stagefright/foundation/ABase.h>
-
-#include "mediaplayer2.pb.h"
-
-using android::media::MediaPlayer2Proto::PlayerMessage;
-
-namespace android {
-
-class TextDescriptions2 {
-public:
- enum {
- IN_BAND_TEXT_3GPP = 0x01,
- OUT_OF_BAND_TEXT_SRT = 0x02,
-
- GLOBAL_DESCRIPTIONS = 0x100,
- LOCAL_DESCRIPTIONS = 0x200,
- };
-
- static status_t getPlayerMessageOfDescriptions(
- const uint8_t *data, ssize_t size,
- uint32_t flags, int timeMs, PlayerMessage *playerMsg);
-private:
- TextDescriptions2();
-
- enum {
- // These keys must be in sync with the keys in TimedText.java
- KEY_DISPLAY_FLAGS = 1, // int
- KEY_STYLE_FLAGS = 2, // int
- KEY_BACKGROUND_COLOR_RGBA = 3, // int
- KEY_HIGHLIGHT_COLOR_RGBA = 4, // int
- KEY_SCROLL_DELAY = 5, // int
- KEY_WRAP_TEXT = 6, // int
- KEY_START_TIME = 7, // int
- KEY_STRUCT_BLINKING_TEXT_LIST = 8, // List<CharPos>
- KEY_STRUCT_FONT_LIST = 9, // List<Font>
- KEY_STRUCT_HIGHLIGHT_LIST = 10, // List<CharPos>
- KEY_STRUCT_HYPER_TEXT_LIST = 11, // List<HyperText>
- KEY_STRUCT_KARAOKE_LIST = 12, // List<Karaoke>
- KEY_STRUCT_STYLE_LIST = 13, // List<Style>
- KEY_STRUCT_TEXT_POS = 14, // TextPos
- KEY_STRUCT_JUSTIFICATION = 15, // Justification
- KEY_STRUCT_TEXT = 16, // Text
-
- KEY_GLOBAL_SETTING = 101,
- KEY_LOCAL_SETTING = 102,
- KEY_START_CHAR = 103,
- KEY_END_CHAR = 104,
- KEY_FONT_ID = 105,
- KEY_FONT_SIZE = 106,
- KEY_TEXT_COLOR_RGBA = 107,
- };
-
- static status_t extractSRTLocalDescriptions(
- const uint8_t *data, ssize_t size,
- int timeMs, PlayerMessage *playerMsg);
- static status_t extract3GPPGlobalDescriptions(
- const uint8_t *data, ssize_t size,
- PlayerMessage *playerMsg);
- static status_t extract3GPPLocalDescriptions(
- const uint8_t *data, ssize_t size,
- int timeMs, PlayerMessage *playerMsg);
-
- DISALLOW_EVIL_CONSTRUCTORS(TextDescriptions2);
-};
-
-} // namespace android
-#endif // TEXT_DESCRIPTIONS2_H_
diff --git a/media/libstagefright/webm/Android.bp b/media/libstagefright/webm/Android.bp
index 64ecc2d..2cebe8f 100644
--- a/media/libstagefright/webm/Android.bp
+++ b/media/libstagefright/webm/Android.bp
@@ -27,12 +27,14 @@
include_dirs: ["frameworks/av/include"],
shared_libs: [
+ "libdatasource",
"libstagefright_foundation",
"libutils",
"liblog",
],
header_libs: [
+ "libmedia_headers",
"media_ndk_headers",
],
}
diff --git a/media/libstagefright/webm/WebmFrameThread.h b/media/libstagefright/webm/WebmFrameThread.h
index 1ddaf9a..2dde20a 100644
--- a/media/libstagefright/webm/WebmFrameThread.h
+++ b/media/libstagefright/webm/WebmFrameThread.h
@@ -20,8 +20,8 @@
#include "WebmFrame.h"
#include "LinkedBlockingQueue.h"
+#include <datasource/FileSource.h>
#include <media/MediaSource.h>
-#include <media/stagefright/FileSource.h>
#include <utils/List.h>
#include <utils/Errors.h>
diff --git a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
index 9783e9b..d905b8d 100644
--- a/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
+++ b/media/libstagefright/xmlparser/MediaCodecsXmlParser.cpp
@@ -1269,7 +1269,7 @@
void MediaCodecsXmlParser::Impl::State::addDetail(
const std::string &key, const std::string &value) {
CHECK(inType());
- ALOGI("limit: %s = %s", key.c_str(), value.c_str());
+ ALOGV("limit: %s = %s", key.c_str(), value.c_str());
const StringSet &variants = mVariantsStack.back();
if (variants.empty()) {
type()[key] = value;
diff --git a/media/libstagefright/xmlparser/api/current.txt b/media/libstagefright/xmlparser/api/current.txt
index 9d7c57d..16c8af8 100644
--- a/media/libstagefright/xmlparser/api/current.txt
+++ b/media/libstagefright/xmlparser/api/current.txt
@@ -138,7 +138,12 @@
public class Variant {
ctor public Variant();
+ method public java.util.List<media.codecs.Alias> getAlias_optional();
+ method public java.util.List<media.codecs.Quirk> getAttribute_optional();
+ method public java.util.List<media.codecs.Feature> getFeature_optional();
+ method public java.util.List<media.codecs.Limit> getLimit_optional();
method public String getName();
+ method public java.util.List<media.codecs.Quirk> getQuirk_optional();
method public void setName(String);
}
diff --git a/media/libstagefright/xmlparser/media_codecs.xsd b/media/libstagefright/xmlparser/media_codecs.xsd
index 63ec5d0..3b5681f 100644
--- a/media/libstagefright/xmlparser/media_codecs.xsd
+++ b/media/libstagefright/xmlparser/media_codecs.xsd
@@ -107,6 +107,13 @@
<xs:attribute name="value" type="xs:string"/>
</xs:complexType>
<xs:complexType name="Variant">
+ <xs:choice minOccurs="0" maxOccurs="unbounded">
+ <xs:element name="Quirk" type="Quirk"/>
+ <xs:element name="Attribute" type="Quirk"/>
+ <xs:element name="Alias" type="Alias"/>
+ <xs:element name="Limit" type="Limit"/>
+ <xs:element name="Feature" type="Feature"/>
+ </xs:choice>
<xs:attribute name="name" type="xs:string"/>
</xs:complexType>
<xs:complexType name="Setting">
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index 7b22b05..1f65372 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -40,7 +40,7 @@
ALOGI("ServiceManager: %p", sm.get());
AIcu_initializeIcuOrDie();
MediaPlayerService::instantiate();
- ResourceManagerService::instantiate();
+ media::ResourceManagerService::instantiate();
registerExtensions();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 6adf563..af69a10 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -42,6 +42,7 @@
#include "MtpServer.h"
#include "MtpStorage.h"
#include "MtpStringBuffer.h"
+#include "android-base/strings.h"
namespace android {
static const int SN_EVENT_LOG_ID = 0x534e4554;
@@ -956,6 +957,11 @@
if (!mData.getString(modified)) return MTP_RESPONSE_INVALID_PARAMETER; // date modified
// keywords follow
+ int type = storage->getType();
+ if (type == MTP_STORAGE_REMOVABLE_RAM) {
+ std::string str = android::base::Trim((const char*)name);
+ name.set(str.c_str());
+ }
ALOGV("name: %s format: 0x%04X (%s)\n", (const char*)name, format,
MtpDebug::getFormatCodeName(format));
time_t modifiedTime;
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index a3cabd8..032c5e2 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -54,7 +54,6 @@
],
include_dirs: [
- "bionic/libc/private",
"frameworks/base/core/jni",
"frameworks/native/include/media/openmax",
"system/media/camera/include",
@@ -70,27 +69,30 @@
"libgrallocusage",
],
+ header_libs: [
+ "libmediadrm_headers",
+ ],
+
shared_libs: [
"android.hardware.graphics.bufferqueue@1.0",
"android.hidl.token@1.0-utils",
"libandroid_runtime_lazy",
"libbase",
"libbinder",
+ "libdatasource",
"libmedia",
+ "libmediadrm",
"libmedia_omx",
"libmedia_jni_utils",
- "libmediadrm",
"libstagefright",
"libstagefright_foundation",
"liblog",
"libutils",
"libcutils",
"libnativewindow",
- "libbinder",
"libhidlbase",
"libgui",
"libui",
- "libmedia2_jni_core",
"libmediandk_utils",
],
@@ -146,6 +148,10 @@
"-Wall",
],
+ header_libs: [
+ "libmedia_headers",
+ ],
+
shared_libs: [
],
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index c23f19b..af21a99 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -221,7 +221,13 @@
break;
}
- AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(&format);
+ // Here format is MediaCodec's internal copy of output format.
+ // Make a copy since the client might modify it.
+ sp<AMessage> copy;
+ if (format != nullptr) {
+ copy = format->dup();
+ }
+ AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(©);
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
if (mCodec->mAsyncCallbackUserData != NULL
@@ -244,8 +250,8 @@
ALOGE("CB_ERROR: err is expected.");
break;
}
- if (!msg->findInt32("action", &actionCode)) {
- ALOGE("CB_ERROR: action is expected.");
+ if (!msg->findInt32("actionCode", &actionCode)) {
+ ALOGE("CB_ERROR: actionCode is expected.");
break;
}
msg->findString("detail", &detail);
diff --git a/media/ndk/NdkMediaCrypto.cpp b/media/ndk/NdkMediaCrypto.cpp
index ce2c660..792fc00 100644
--- a/media/ndk/NdkMediaCrypto.cpp
+++ b/media/ndk/NdkMediaCrypto.cpp
@@ -27,8 +27,8 @@
#include <utils/Log.h>
#include <utils/StrongPointer.h>
#include <binder/IServiceManager.h>
-#include <media/ICrypto.h>
-#include <media/IMediaDrmService.h>
+#include <mediadrm/ICrypto.h>
+#include <mediadrm/IMediaDrmService.h>
#include <android_util_Binder.h>
#include <jni.h>
diff --git a/media/ndk/NdkMediaCryptoPriv.h b/media/ndk/NdkMediaCryptoPriv.h
index 14ea928..8664d95 100644
--- a/media/ndk/NdkMediaCryptoPriv.h
+++ b/media/ndk/NdkMediaCryptoPriv.h
@@ -30,7 +30,7 @@
#include <sys/types.h>
#include <utils/StrongPointer.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
using namespace android;
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
index 7979c2f..c1d4686 100644
--- a/media/ndk/NdkMediaDataSource.cpp
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -26,18 +26,16 @@
#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
#include <cutils/properties.h>
-#include <utils/Log.h>
-#include <utils/StrongPointer.h>
+#include <datasource/DataSourceFactory.h>
+#include <datasource/HTTPBase.h>
+#include <datasource/NuCachedSource2.h>
#include <media/IMediaHTTPService.h>
#include <media/NdkMediaError.h>
#include <media/NdkMediaDataSource.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/InterfaceUtils.h>
-#include <mediaplayer2/JavaVMHelper.h>
-#include <mediaplayer2/JMedia2HTTPService.h>
+#include <utils/Log.h>
+#include <utils/StrongPointer.h>
-#include "../../libstagefright/include/HTTPBase.h"
-#include "../../libstagefright/include/NuCachedSource2.h"
#include "NdkMediaDataSourceCallbacksPriv.h"
@@ -120,18 +118,11 @@
return size >= 0 ? OK : UNKNOWN_ERROR;
}
-static sp<MediaHTTPService> createMediaHttpServiceFromJavaObj(JNIEnv *env, jobject obj, int version) {
+static sp<MediaHTTPService> createMediaHttpServiceFromJavaObj(JNIEnv *env, jobject obj) {
if (obj == NULL) {
return NULL;
}
- switch (version) {
- case 1:
- return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
- case 2:
- return new JMedia2HTTPService(env, obj);
- default:
- return NULL;
- }
+ return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
}
static sp<MediaHTTPService> createMediaHttpServiceTemplate(
@@ -139,8 +130,7 @@
const char *uri,
const char *clazz,
const char *method,
- const char *signature,
- int version) {
+ const char *signature) {
jobject service = NULL;
if (env == NULL) {
ALOGE("http service must be created from Java thread");
@@ -167,34 +157,22 @@
env->DeleteLocalRef(juri);
env->ExceptionClear();
- sp<MediaHTTPService> httpService = createMediaHttpServiceFromJavaObj(env, service, version);
+ sp<MediaHTTPService> httpService = createMediaHttpServiceFromJavaObj(env, service);
return httpService;
}
-sp<MediaHTTPService> createMediaHttpService(const char *uri, int version) {
+sp<MediaHTTPService> createMediaHttpService(const char *uri) {
JNIEnv *env;
const char *clazz, *method, *signature;
- switch (version) {
- case 1:
- env = AndroidRuntime::getJNIEnv();
- clazz = "android/media/MediaHTTPService";
- method = "createHttpServiceBinderIfNecessary";
- signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
- break;
- case 2:
- env = JavaVMHelper::getJNIEnv();
- clazz = "android/media/Media2HTTPService";
- method = "createHTTPService";
- signature = "(Ljava/lang/String;)Landroid/media/Media2HTTPService;";
- break;
- default:
- return NULL;
- }
+ env = AndroidRuntime::getJNIEnv();
+ clazz = "android/media/MediaHTTPService";
+ method = "createHttpServiceBinderIfNecessary";
+ signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
- return createMediaHttpServiceTemplate(env, uri, clazz, method, signature, version);
+ return createMediaHttpServiceTemplate(env, uri, clazz, method, signature);
}
@@ -216,7 +194,7 @@
int numheaders,
const char * const *key_values) {
- sp<MediaHTTPService> service = createMediaHttpService(uri, /* version = */ 1);
+ sp<MediaHTTPService> service = createMediaHttpService(uri);
KeyedVector<String8, String8> headers;
for (int i = 0; i < numheaders; ++i) {
String8 key8(key_values[i * 2]);
@@ -224,7 +202,7 @@
headers.add(key8, value8);
}
- sp<DataSource> source = DataSourceFactory::CreateFromURI(service, uri, &headers);
+ sp<DataSource> source = DataSourceFactory::getInstance()->CreateFromURI(service, uri, &headers);
if (source == NULL) {
ALOGE("AMediaDataSource_newUri source is null");
return NULL;
diff --git a/media/ndk/NdkMediaDataSourcePriv.h b/media/ndk/NdkMediaDataSourcePriv.h
index 16ff974..ddcd7da 100644
--- a/media/ndk/NdkMediaDataSourcePriv.h
+++ b/media/ndk/NdkMediaDataSourcePriv.h
@@ -62,7 +62,7 @@
};
-sp<MediaHTTPService> createMediaHttpService(const char *uri, int version);
+sp<MediaHTTPService> createMediaHttpService(const char *uri);
#endif // _NDK_MEDIA_DATASOURCE_PRIV_H
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 85dbffe..842216c 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -29,12 +29,12 @@
#include <android-base/properties.h>
#include <binder/PermissionController.h>
-#include <media/IDrm.h>
-#include <media/IDrmClient.h>
+#include <mediadrm/IDrm.h>
+#include <mediadrm/IDrmClient.h>
#include <media/stagefright/MediaErrors.h>
#include <binder/IServiceManager.h>
-#include <media/IMediaDrmService.h>
#include <media/NdkMediaCrypto.h>
+#include <mediadrm/IMediaDrmService.h>
using namespace android;
@@ -89,7 +89,7 @@
};
void DrmListener::notify(DrmPlugin::EventType eventType, int extra, const Parcel *obj) {
- if (!mEventListener && !mExpirationUpdateListener && !mKeysChangeListener) {
+ if (!mEventListener || !mExpirationUpdateListener || !mKeysChangeListener) {
ALOGE("No listeners are specified");
return;
}
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index c83b255..0da0740 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -89,7 +89,7 @@
ALOGV("setDataSource(%s)", uri);
- sp<MediaHTTPService> httpService = createMediaHttpService(uri, /* version = */ 1);
+ sp<MediaHTTPService> httpService = createMediaHttpService(uri);
if (httpService == NULL) {
ALOGE("can't create http service");
return AMEDIA_ERROR_UNSUPPORTED;
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 3e60de0..62b8624 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -570,6 +570,8 @@
* return {@link AMEDIA_ERROR_INVALID_OBJECT}. Application still needs to call this method on those
* {@link AImage} objects to fully delete the {@link AImage} object from memory.</p>
*
+ * Available since API level 24.
+ *
* @param image The {@link AImage} to be deleted.
*/
void AImage_delete(AImage* image) __INTRODUCED_IN(24);
@@ -577,6 +579,8 @@
/**
* Query the width of the input {@link AImage}.
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param width the width of the image will be filled here if the method call succeeeds.
*
@@ -591,6 +595,8 @@
/**
* Query the height of the input {@link AImage}.
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param height the height of the image will be filled here if the method call succeeeds.
*
@@ -607,6 +613,8 @@
*
* <p>The format value will be one of AIMAGE_FORMAT_* enum value.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param format the format of the image will be filled here if the method call succeeeds.
*
@@ -624,6 +632,8 @@
* <p>The crop rectangle specifies the region of valid pixels in the image, using coordinates in the
* largest-resolution plane.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param rect the cropped rectangle of the image will be filled here if the method call succeeeds.
*
@@ -648,6 +658,8 @@
* {@link ACameraCaptureSession_captureCallbacks#onCaptureCompleted} callback.
* </p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param timestampNs the timestamp of the image will be filled here if the method call succeeeds.
*
@@ -665,6 +677,8 @@
* <p>The number of plane of an {@link AImage} is determined by its format, which can be queried by
* {@link AImage_getFormat} method.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param numPlanes the number of planes of the image will be filled here if the method call
* succeeeds.
@@ -687,6 +701,8 @@
* being returned.
* For formats where pixel stride is well defined, the pixel stride is always greater than 0.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
* @param pixelStride the pixel stride of the image will be filled here if the method call succeeeds.
@@ -714,6 +730,8 @@
* being returned.
* For formats where row stride is well defined, the row stride is always greater than 0.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
* @param rowStride the row stride of the image will be filled here if the method call succeeeds.
@@ -739,6 +757,8 @@
* pointer from previous AImage_getPlaneData call becomes invalid. Do NOT use it after the
* {@link AImage} or the parent {@link AImageReader} is deleted.</p>
*
+ * Available since API level 24.
+ *
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
* @param data the data pointer of the image will be filled here if the method call succeeeds.
@@ -769,6 +789,8 @@
* signal the release of the hardware buffer back to the {@link AImageReader}'s queue using
* releaseFenceFd.</p>
*
+ * Available since API level 26.
+ *
* @param image The {@link AImage} to be deleted.
* @param releaseFenceFd A sync fence fd defined in {@link sync.h}, which signals the release of
* underlying {@link AHardwareBuffer}.
@@ -794,6 +816,8 @@
* {@link AImageReader_setBufferRemovedListener} to be notified when the buffer is no longer used
* by {@link AImageReader}.</p>
*
+ * Available since API level 26.
+ *
* @param image the {@link AImage} of interest.
* @param outBuffer The memory area pointed to by buffer will contain the acquired AHardwareBuffer
* handle.
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index e5d863c..600ffc9 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -67,6 +67,8 @@
* The valid sizes and formats depend on the source of the image data.
* </p>
*
+ * Available since API level 24.
+ *
* @param width The default width in pixels of the Images that this reader will produce.
* @param height The default height in pixels of the Images that this reader will produce.
* @param format The format of the Image that this reader will produce. This must be one of the
@@ -101,6 +103,8 @@
* making any of data pointers obtained from {@link AImage_getPlaneData} invalid. Do NOT access
* the reader object or any of those data pointers after this method returns.</p>
*
+ * Available since API level 24.
+ *
* @param reader The image reader to be deleted.
*/
void AImageReader_delete(AImageReader* reader) __INTRODUCED_IN(24);
@@ -108,6 +112,8 @@
/**
* Get a {@link ANativeWindow} that can be used to produce {@link AImage} for this image reader.
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param window The output {@link ANativeWindow} will be filled here if the method call succeeds.
* The {@link ANativeWindow} is managed by this image reader. Do NOT call
@@ -126,6 +132,8 @@
* {@link ANativeWindow}. If so, the actual width of the images can be found using
* {@link AImage_getWidth}.</p>
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param width the default width of the reader will be filled here if the method call succeeeds.
*
@@ -142,6 +150,8 @@
* {@link ANativeWindow}. If so, the actual height of the images can be found using
* {@link AImage_getHeight}.</p>
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param height the default height of the reader will be filled here if the method call succeeeds.
*
@@ -154,6 +164,8 @@
/**
* Query the format of the {@link AImage} generated by this reader.
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param format the fromat of the reader will be filled here if the method call succeeeds. The
* value will be one of the AIMAGE_FORMAT_* enum value defiend in {@link NdkImage.h}.
@@ -167,6 +179,8 @@
/**
* Query the maximum number of concurrently acquired {@link AImage}s of this reader.
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param maxImages the maximum number of concurrently acquired images of the reader will be filled
* here if the method call succeeeds.
@@ -197,6 +211,8 @@
* {@link AImage_delete}.
* </p>
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
*
@@ -214,7 +230,6 @@
media_status_t AImageReader_acquireNextImage(AImageReader* reader, /*out*/AImage** image) __INTRODUCED_IN(24);
/**
-
* Acquire the latest {@link AImage} from the image reader's queue, dropping older images.
*
* <p>
@@ -241,6 +256,8 @@
* {@link AImage_delete}.
* </p>
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
*
@@ -290,6 +307,8 @@
*
* Calling this method will replace previously registered listeners.
*
+ * Available since API level 24.
+ *
* @param reader The image reader of interest.
* @param listener The {@link AImageReader_ImageListener} to be registered. Set this to NULL if
* the application no longer needs to listen to new images.
@@ -356,6 +375,9 @@
* {@link AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE}, or combined</td>
* </tr>
* </table>
+ *
+ * Available since API level 26.
+ *
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
* <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL, or one or more of width,
@@ -377,6 +399,8 @@
* additional parameter for the sync fence. All other parameters and the return values are
* identical to those passed to {@link AImageReader_acquireNextImage}.</p>
*
+ * Available since API level 26.
+ *
* @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
* buffer is ready to consume. When synchronization fence is not needed, fence will be set
* to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
@@ -397,6 +421,8 @@
* additional parameter for the sync fence. All other parameters and the return values are
* identical to those passed to {@link AImageReader_acquireLatestImage}.</p>
*
+ * Available since API level 26.
+ *
* @param acquireFenceFd A sync fence fd defined in {@link sync.h}, which is used to signal when the
* buffer is ready to consume. When synchronization fence is not needed, fence will be set
* to -1 and the {@link AImage} returned is ready for use immediately. Otherwise, user shall
@@ -408,6 +434,7 @@
*/
media_status_t AImageReader_acquireLatestImageAsync(
AImageReader* reader, /*out*/AImage** image, /*out*/int* acquireFenceFd) __INTRODUCED_IN(26);
+
/**
* Signature of the callback which is called when {@link AImageReader} is about to remove a buffer.
*
@@ -451,6 +478,8 @@
*
* <p>Note that calling this method will replace previously registered listeners.</p>
*
+ * Available since API level 26.
+ *
* @param reader The image reader of interest.
* @param listener the {@link AImageReader_BufferRemovedListener} to be registered. Set this to
* NULL if application no longer needs to listen to buffer removed events.
diff --git a/media/ndk/include/media/NdkMediaCodec.h b/media/ndk/include/media/NdkMediaCodec.h
index b3ee853..1823fbc 100644
--- a/media/ndk/include/media/NdkMediaCodec.h
+++ b/media/ndk/include/media/NdkMediaCodec.h
@@ -127,27 +127,37 @@
* Create codec by name. Use this if you know the exact codec you want to use.
* When configuring, you will need to specify whether to use the codec as an
* encoder or decoder.
+ *
+ * Available since API level 21.
*/
AMediaCodec* AMediaCodec_createCodecByName(const char *name) __INTRODUCED_IN(21);
/**
* Create codec by mime type. Most applications will use this, specifying a
* mime type obtained from media extractor.
+ *
+ * Available since API level 21.
*/
AMediaCodec* AMediaCodec_createDecoderByType(const char *mime_type) __INTRODUCED_IN(21);
/**
* Create encoder by name.
+ *
+ * Available since API level 21.
*/
AMediaCodec* AMediaCodec_createEncoderByType(const char *mime_type) __INTRODUCED_IN(21);
/**
- * delete the codec and free its resources
+ * Delete the codec and free its resources.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_delete(AMediaCodec*) __INTRODUCED_IN(21);
/**
* Configure the codec. For decoding you would typically get the format from an extractor.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_configure(
AMediaCodec*,
@@ -159,29 +169,39 @@
/**
* Start the codec. A codec must be configured before it can be started, and must be started
* before buffers can be sent to it.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_start(AMediaCodec*) __INTRODUCED_IN(21);
/**
* Stop the codec.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_stop(AMediaCodec*) __INTRODUCED_IN(21);
/*
* Flush the codec's input and output. All indices previously returned from calls to
* AMediaCodec_dequeueInputBuffer and AMediaCodec_dequeueOutputBuffer become invalid.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_flush(AMediaCodec*) __INTRODUCED_IN(21);
/**
* Get an input buffer. The specified buffer index must have been previously obtained from
* dequeueInputBuffer, and not yet queued.
+ *
+ * Available since API level 21.
*/
uint8_t* AMediaCodec_getInputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
/**
* Get an output buffer. The specified buffer index must have been previously obtained from
* dequeueOutputBuffer, and not yet queued.
+ *
+ * Available since API level 21.
*/
uint8_t* AMediaCodec_getOutputBuffer(AMediaCodec*, size_t idx, size_t *out_size) __INTRODUCED_IN(21);
@@ -189,6 +209,8 @@
* Get the index of the next available input buffer. An app will typically use this with
* getInputBuffer() to get a pointer to the buffer, then copy the data to be encoded or decoded
* into the buffer before passing it to the codec.
+ *
+ * Available since API level 21.
*/
ssize_t AMediaCodec_dequeueInputBuffer(AMediaCodec*, int64_t timeoutUs) __INTRODUCED_IN(21);
@@ -218,6 +240,8 @@
/**
* Send the specified buffer to the codec for processing.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_queueInputBuffer(AMediaCodec*, size_t idx,
_off_t_compat offset, size_t size,
@@ -225,6 +249,8 @@
/**
* Send the specified buffer to the codec for processing.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_queueSecureInputBuffer(AMediaCodec*, size_t idx,
_off_t_compat offset,
@@ -235,15 +261,23 @@
/**
* Get the index of the next available buffer of processed data.
+ *
+ * Available since API level 21.
*/
ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info,
int64_t timeoutUs) __INTRODUCED_IN(21);
+
+/**
+ * Available since API level 21.
+ */
AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*) __INTRODUCED_IN(21);
/**
* If you are done with a buffer, use this call to return the buffer to
* the codec. If you previously specified a surface when configuring this
* video decoder you can optionally render the buffer.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_releaseOutputBuffer(AMediaCodec*, size_t idx, bool render) __INTRODUCED_IN(21);
@@ -256,6 +290,8 @@
* to ImageReader (software readable) output.
*
* For more details, see the Java documentation for MediaCodec.setOutputSurface.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_setOutputSurface(AMediaCodec*, ANativeWindow* surface) __INTRODUCED_IN(21);
@@ -266,6 +302,8 @@
* this call will simply return the buffer to the codec.
*
* For more details, see the Java documentation for MediaCodec.releaseOutputBuffer.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodec_releaseOutputBufferAtTime(
AMediaCodec *mData, size_t idx, int64_t timestampNs) __INTRODUCED_IN(21);
@@ -282,6 +320,8 @@
* ANativeWindow_release() when done.
*
* For more details, see the Java documentation for MediaCodec.createInputSurface.
+ *
+ * Available since API level 26.
*/
media_status_t AMediaCodec_createInputSurface(
AMediaCodec *mData, ANativeWindow **surface) __INTRODUCED_IN(26);
@@ -298,6 +338,8 @@
* ANativeWindow_release() when done.
*
* For more details, see the Java documentation for MediaCodec.createPersistentInputSurface.
+ *
+ * Available since API level 26.
*/
media_status_t AMediaCodec_createPersistentInputSurface(
ANativeWindow **surface) __INTRODUCED_IN(26);
@@ -311,6 +353,8 @@
* AMediaCodec_configure(..); and before AMediaCodec_start() has been called.
*
* For more details, see the Java documentation for MediaCodec.setInputSurface.
+ *
+ * Available since API level 26.
*/
media_status_t AMediaCodec_setInputSurface(
AMediaCodec *mData, ANativeWindow *surface) __INTRODUCED_IN(26);
@@ -322,6 +366,8 @@
* after AMediaCodec_start() has been called.
*
* NOTE: Some of these parameter changes may silently fail to apply.
+ *
+ * Available since API level 26.
*/
media_status_t AMediaCodec_setParameters(
AMediaCodec *mData, const AMediaFormat* params) __INTRODUCED_IN(26);
@@ -339,6 +385,8 @@
* Returns AMEDIA_OK when completed succesfully.
*
* For more details, see the Java documentation for MediaCodec.signalEndOfInputStream.
+ *
+ * Available since API level 26.
*/
media_status_t AMediaCodec_signalEndOfInputStream(AMediaCodec *mData) __INTRODUCED_IN(26);
@@ -349,6 +397,8 @@
/**
* Get format of the buffer. The specified buffer index must have been previously obtained from
* dequeueOutputBuffer.
+ *
+ * Available since API level 28.
*/
AMediaFormat* AMediaCodec_getBufferFormat(AMediaCodec*, size_t index) __INTRODUCED_IN(28);
@@ -356,11 +406,15 @@
* Get the component name. If the codec was created by createDecoderByType
* or createEncoderByType, what component is chosen is not known beforehand.
* Caller shall call AMediaCodec_releaseName to free the returned pointer.
+ *
+ * Available since API level 28.
*/
media_status_t AMediaCodec_getName(AMediaCodec*, char** out_name) __INTRODUCED_IN(28);
/**
* Free the memory pointed by name which is returned by AMediaCodec_getName.
+ *
+ * Available since API level 28.
*/
void AMediaCodec_releaseName(AMediaCodec*, char* name) __INTRODUCED_IN(28);
@@ -382,6 +436,8 @@
* All callbacks are fired on one NDK internal thread.
* AMediaCodec_setAsyncNotifyCallback should not be called on the callback thread.
* No heavy duty task should be performed on callback thread.
+ *
+ * Available since API level 28.
*/
media_status_t AMediaCodec_setAsyncNotifyCallback(
AMediaCodec*,
@@ -390,6 +446,8 @@
/**
* Release the crypto if applicable.
+ *
+ * Available since API level 28.
*/
media_status_t AMediaCodec_releaseCrypto(AMediaCodec*) __INTRODUCED_IN(28);
@@ -397,12 +455,16 @@
* Call this after AMediaCodec_configure() returns successfully to get the input
* format accepted by the codec. Do this to determine what optional configuration
* parameters were supported by the codec.
+ *
+ * Available since API level 28.
*/
AMediaFormat* AMediaCodec_getInputFormat(AMediaCodec*) __INTRODUCED_IN(28);
/**
* Returns true if the codec cannot proceed further, but can be recovered by stopping,
* configuring, and starting again.
+ *
+ * Available since API level 28.
*/
bool AMediaCodecActionCode_isRecoverable(int32_t actionCode) __INTRODUCED_IN(28);
@@ -410,6 +472,8 @@
* Returns true if the codec error is a transient issue, perhaps due to
* resource constraints, and that the method (or encoding/decoding) may be
* retried at a later time.
+ *
+ * Available since API level 28.
*/
bool AMediaCodecActionCode_isTransient(int32_t actionCode) __INTRODUCED_IN(28);
@@ -440,6 +504,8 @@
* numBytesOfClearData can be null to indicate that all data is encrypted.
* This information encapsulates per-sample metadata as outlined in
* ISO/IEC FDIS 23001-7:2011 "Common encryption in ISO base media file format files".
+ *
+ * Available since API level 21.
*/
AMediaCodecCryptoInfo *AMediaCodecCryptoInfo_new(
int numsubsamples,
@@ -450,13 +516,17 @@
size_t *encryptedbytes) __INTRODUCED_IN(21);
/**
- * delete an AMediaCodecCryptoInfo created previously with AMediaCodecCryptoInfo_new, or
- * obtained from AMediaExtractor
+ * Delete an AMediaCodecCryptoInfo created previously with AMediaCodecCryptoInfo_new, or
+ * obtained from AMediaExtractor.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodecCryptoInfo_delete(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
/**
- * Set the crypto pattern on an AMediaCryptoInfo object
+ * Set the crypto pattern on an AMediaCryptoInfo object.
+ *
+ * Available since API level 21.
*/
void AMediaCodecCryptoInfo_setPattern(
AMediaCodecCryptoInfo *info,
@@ -464,32 +534,44 @@
/**
* The number of subsamples that make up the buffer's contents.
+ *
+ * Available since API level 21.
*/
size_t AMediaCodecCryptoInfo_getNumSubSamples(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
/**
- * A 16-byte opaque key
+ * A 16-byte opaque key.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodecCryptoInfo_getKey(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
/**
- * A 16-byte initialization vector
+ * A 16-byte initialization vector.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodecCryptoInfo_getIV(AMediaCodecCryptoInfo*, uint8_t *dst) __INTRODUCED_IN(21);
/**
* The type of encryption that has been applied,
* one of AMEDIACODECRYPTOINFO_MODE_CLEAR or AMEDIACODECRYPTOINFO_MODE_AES_CTR.
+ *
+ * Available since API level 21.
*/
cryptoinfo_mode_t AMediaCodecCryptoInfo_getMode(AMediaCodecCryptoInfo*) __INTRODUCED_IN(21);
/**
* The number of leading unencrypted bytes in each subsample.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodecCryptoInfo_getClearBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
/**
* The number of trailing encrypted bytes in each subsample.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaCodecCryptoInfo_getEncryptedBytes(AMediaCodecCryptoInfo*, size_t *dst) __INTRODUCED_IN(21);
diff --git a/media/ndk/include/media/NdkMediaCrypto.h b/media/ndk/include/media/NdkMediaCrypto.h
index bcdf9a0..3fa07c7 100644
--- a/media/ndk/include/media/NdkMediaCrypto.h
+++ b/media/ndk/include/media/NdkMediaCrypto.h
@@ -49,12 +49,24 @@
#if __ANDROID_API__ >= 21
+/**
+ * Available since API level 21.
+ */
bool AMediaCrypto_isCryptoSchemeSupported(const AMediaUUID uuid) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
bool AMediaCrypto_requiresSecureDecoderComponent(const char *mime) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
AMediaCrypto* AMediaCrypto_new(const AMediaUUID uuid, const void *initData, size_t initDataSize) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
void AMediaCrypto_delete(AMediaCrypto* crypto) __INTRODUCED_IN(21);
#endif /* __ANDROID_API__ >= 21 */
diff --git a/media/ndk/include/media/NdkMediaDataSource.h b/media/ndk/include/media/NdkMediaDataSource.h
index 16b1eb3..0577df2 100644
--- a/media/ndk/include/media/NdkMediaDataSource.h
+++ b/media/ndk/include/media/NdkMediaDataSource.h
@@ -88,6 +88,8 @@
/**
* Create new media data source. Returns NULL if memory allocation
* for the new data source object fails.
+ *
+ * Available since API level 28.
*/
AMediaDataSource* AMediaDataSource_new() __INTRODUCED_IN(28);
@@ -116,6 +118,7 @@
* ...
* key_values[(numheaders - 1) * 2]:key_values[(numheaders - 1) * 2 + 1]
*
+ * Available since API level 29.
*/
AMediaDataSource* AMediaDataSource_newUri(const char *uri,
int numheaders,
@@ -125,12 +128,16 @@
/**
* Delete a previously created media data source.
+ *
+ * Available since API level 28.
*/
void AMediaDataSource_delete(AMediaDataSource*) __INTRODUCED_IN(28);
/**
* Set an user provided opaque handle. This opaque handle is passed as
* the first argument to the data source callbacks.
+ *
+ * Available since API level 28.
*/
void AMediaDataSource_setUserdata(
AMediaDataSource*, void *userdata) __INTRODUCED_IN(28);
@@ -145,6 +152,8 @@
*
* Please refer to the definition of AMediaDataSourceReadAt for
* additional details.
+ *
+ * Available since API level 28.
*/
void AMediaDataSource_setReadAt(
AMediaDataSource*,
@@ -156,6 +165,8 @@
*
* Please refer to the definition of AMediaDataSourceGetSize for
* additional details.
+ *
+ * Available since API level 28.
*/
void AMediaDataSource_setGetSize(
AMediaDataSource*,
@@ -167,6 +178,8 @@
*
* Please refer to the definition of AMediaDataSourceClose for
* additional details.
+ *
+ * Available since API level 28.
*/
void AMediaDataSource_setClose(
AMediaDataSource*,
@@ -181,6 +194,8 @@
*
* Please refer to the definition of AMediaDataSourceClose for
* additional details.
+ *
+ * Available since API level 29.
*/
void AMediaDataSource_close(AMediaDataSource*) __INTRODUCED_IN(29);
@@ -191,6 +206,8 @@
*
* Please refer to the definition of AMediaDataSourceGetAvailableSize
* for additional details.
+ *
+ * Available since API level 29.
*/
void AMediaDataSource_setGetAvailableSize(
AMediaDataSource*,
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 2e438d9..31f5c7d 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -174,41 +174,53 @@
* uuid identifies the universal unique ID of the crypto scheme. uuid must be 16 bytes.
* mimeType is the MIME type of the media container, e.g. "video/mp4". If mimeType
* is not known or required, it can be provided as NULL.
+ *
+ * Available since API level 21.
*/
bool AMediaDrm_isCryptoSchemeSupported(const uint8_t *uuid,
const char *mimeType) __INTRODUCED_IN(21);
/**
- * Create a MediaDrm instance from a UUID
+ * Create a MediaDrm instance from a UUID.
* uuid identifies the universal unique ID of the crypto scheme. uuid must be 16 bytes.
+ *
+ * Available since API level 21.
*/
AMediaDrm* AMediaDrm_createByUUID(const uint8_t *uuid) __INTRODUCED_IN(21);
/**
- * Release a MediaDrm object
+ * Release a MediaDrm object.
+ *
+ * Available since API level 21.
*/
void AMediaDrm_release(AMediaDrm *) __INTRODUCED_IN(21);
/**
- * Register a callback to be invoked when an event occurs
+ * Register a callback to be invoked when an event occurs.
*
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_setOnEventListener(AMediaDrm *,
AMediaDrmEventListener listener) __INTRODUCED_IN(21);
/**
- * Register a callback to be invoked when an expiration update event occurs
+ * Register a callback to be invoked when an expiration update event occurs.
*
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 29.
*/
media_status_t AMediaDrm_setOnExpirationUpdateListener(AMediaDrm *,
AMediaDrmExpirationUpdateListener listener) __INTRODUCED_IN(29);
/**
- * Register a callback to be invoked when a key status change event occurs
+ * Register a callback to be invoked when a key status change event occurs.
*
- * listener is the callback that will be invoked on event
+ * listener is the callback that will be invoked on event.
+ *
+ * Available since API level 29.
*/
media_status_t AMediaDrm_setOnKeysChangeListener(AMediaDrm *,
AMediaDrmKeysChangeListener listener) __INTRODUCED_IN(29);
@@ -216,8 +228,10 @@
/**
* Open a new session with the MediaDrm object. A session ID is returned.
*
- * returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed
- * returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed.
+ * Returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_openSession(AMediaDrm *,
AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
@@ -225,6 +239,8 @@
/**
* Close a session on the MediaDrm object that was previously opened
* with AMediaDrm_openSession.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_closeSession(AMediaDrm *,
const AMediaDrmSessionId *sessionId) __INTRODUCED_IN(21);
@@ -272,9 +288,11 @@
* MediaDrm object is released.
* 2. keyRequestSize will be set to the size of the request
*
- * returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
* problem with the device certificate.
-*/
+ *
+ * Available since API level 21.
+ */
media_status_t AMediaDrm_getKeyRequest(AMediaDrm *, const AMediaDrmScope *scope,
const uint8_t *init, size_t initSize, const char *mimeType, AMediaDrmKeyType keyType,
const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters,
@@ -295,8 +313,9 @@
*
* response points to the opaque response from the server
* responseSize should be set to the size of the response in bytes
+ *
+ * Available since API level 21.
*/
-
media_status_t AMediaDrm_provideKeyResponse(AMediaDrm *, const AMediaDrmScope *scope,
const uint8_t *response, size_t responseSize,
AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
@@ -305,8 +324,10 @@
* Restore persisted offline keys into a new session. keySetId identifies the
* keys to load, obtained from a prior call to AMediaDrm_provideKeyResponse.
*
- * sessionId is the session ID for the DRM session
- * keySetId identifies the saved key set to restore
+ * sessionId is the session ID for the DRM session.
+ * keySetId identifies the saved key set to restore.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_restoreKeys(AMediaDrm *, const AMediaDrmSessionId *sessionId,
const AMediaDrmKeySetId *keySetId) __INTRODUCED_IN(21);
@@ -314,7 +335,9 @@
/**
* Remove the current keys from a session.
*
- * keySetId identifies keys to remove
+ * keySetId identifies keys to remove.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_removeKeys(AMediaDrm *,
const AMediaDrmSessionId *keySetId) __INTRODUCED_IN(21);
@@ -331,6 +354,8 @@
* to the number of entries written to the array. If the number of {key, value} pairs
* to be returned is greater than *numPairs, MEDIADRM_SHORT_BUFFER will be returned
* and numPairs will be set to the number of pairs available.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_queryKeyStatus(AMediaDrm *, const AMediaDrmSessionId *sessionId,
AMediaDrmKeyValue *keyValuePairs, size_t *numPairs) __INTRODUCED_IN(21);
@@ -350,6 +375,8 @@
* 3. serverUrl will reference a NULL terminated string containing the URL
* the provisioning request should be sent to. It will remain accessible until
* the next call to getProvisionRequest.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_getProvisionRequest(AMediaDrm *, const uint8_t **provisionRequest,
size_t *provisionRequestSize, const char **serverUrl) __INTRODUCED_IN(21);
@@ -363,8 +390,10 @@
* DRM engine plugin.
* responseSize is the length of the provisioning response in bytes.
*
- * returns MEDIADRM_DEVICE_REVOKED_ERROR if the response indicates that the
+ * Returns MEDIADRM_DEVICE_REVOKED_ERROR if the response indicates that the
* server rejected the request
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_provideProvisionResponse(AMediaDrm *,
const uint8_t *response, size_t responseSize) __INTRODUCED_IN(21);
@@ -390,6 +419,8 @@
* If *numSecureStops is too small for the number of secure stops available,
* MEDIADRM_SHORT_BUFFER will be returned and *numSecureStops will be set to the
* number required.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_getSecureStops(AMediaDrm *,
AMediaDrmSecureStop *secureStops, size_t *numSecureStops) __INTRODUCED_IN(21);
@@ -399,6 +430,8 @@
* the message, remove the SecureStops identified in the response.
*
* ssRelease is the server response indicating which secure stops to release
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_releaseSecureStops(AMediaDrm *,
const AMediaDrmSecureStop *ssRelease) __INTRODUCED_IN(21);
@@ -432,6 +465,8 @@
* On return, propertyValue will be set to point to the property value. The
* memory that the value resides in is owned by the NDK MediaDrm API and
* will remain valid until the next call to AMediaDrm_getPropertyString.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_getPropertyString(AMediaDrm *, const char *propertyName,
const char **propertyValue) __INTRODUCED_IN(21);
@@ -447,18 +482,24 @@
* On return, *propertyValue will be set to point to the property value. The
* memory that the value resides in is owned by the NDK MediaDrm API and
* will remain valid until the next call to AMediaDrm_getPropertyByteArray.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_getPropertyByteArray(AMediaDrm *, const char *propertyName,
AMediaDrmByteArray *propertyValue) __INTRODUCED_IN(21);
/**
* Set a DRM engine plugin String property value.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_setPropertyString(AMediaDrm *, const char *propertyName,
const char *value) __INTRODUCED_IN(21);
/**
* Set a DRM engine plugin byte array property value.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_setPropertyByteArray(AMediaDrm *, const char *propertyName,
const uint8_t *value, size_t valueSize) __INTRODUCED_IN(21);
@@ -487,6 +528,8 @@
* ensure that the output buffer is large enough to accept dataSize bytes. The key
* to use is identified by the 16 byte keyId. The key must have been loaded into
* the session using provideKeyResponse.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_encrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
@@ -498,6 +541,8 @@
* ensure that the output buffer is large enough to accept dataSize bytes. The key
* to use is identified by the 16 byte keyId. The key must have been loaded into
* the session using provideKeyResponse.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_decrypt(AMediaDrm *, const AMediaDrmSessionId *sessionId,
const char *cipherAlgorithm, uint8_t *keyId, uint8_t *iv,
@@ -511,6 +556,8 @@
* *signatureSize is set to the buffer size required. The key to use is identified
* by the 16 byte keyId. The key must have been loaded into the session using
* provideKeyResponse.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_sign(AMediaDrm *, const AMediaDrmSessionId *sessionId,
const char *macAlgorithm, uint8_t *keyId, uint8_t *message, size_t messageSize,
@@ -522,6 +569,8 @@
* if the signature matches, otherwise MEDAIDRM_VERIFY_FAILED is returned. The key to
* use is identified by the 16 byte keyId. The key must have been loaded into the
* session using provideKeyResponse.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaDrm_verify(AMediaDrm *, const AMediaDrmSessionId *sessionId,
const char *macAlgorithm, uint8_t *keyId, const uint8_t *message, size_t messageSize,
diff --git a/media/ndk/include/media/NdkMediaExtractor.h b/media/ndk/include/media/NdkMediaExtractor.h
index e3d9fe6..14319c4 100644
--- a/media/ndk/include/media/NdkMediaExtractor.h
+++ b/media/ndk/include/media/NdkMediaExtractor.h
@@ -52,23 +52,31 @@
#if __ANDROID_API__ >= 21
/**
- * Create new media extractor
+ * Create new media extractor.
+ *
+ * Available since API level 21.
*/
AMediaExtractor* AMediaExtractor_new() __INTRODUCED_IN(21);
/**
- * Delete a previously created media extractor
+ * Delete a previously created media extractor.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_delete(AMediaExtractor*) __INTRODUCED_IN(21);
/**
- * Set the file descriptor from which the extractor will read.
+ * Set the file descriptor from which the extractor will read.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset,
off64_t length) __INTRODUCED_IN(21);
/**
* Set the URI from which the extractor will read.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_setDataSource(AMediaExtractor*,
const char *location) __INTRODUCED_IN(21);
@@ -77,6 +85,8 @@
/**
* Set the custom data source implementation from which the extractor will read.
+ *
+ * Available since API level 28.
*/
media_status_t AMediaExtractor_setDataSourceCustom(AMediaExtractor*,
AMediaDataSource *src) __INTRODUCED_IN(28);
@@ -85,11 +95,15 @@
/**
* Return the number of tracks in the previously specified media file
+ *
+ * Available since API level 21.
*/
size_t AMediaExtractor_getTrackCount(AMediaExtractor*) __INTRODUCED_IN(21);
/**
* Return the format of the specified track. The caller must free the returned format
+ *
+ * Available since API level 21.
*/
AMediaFormat* AMediaExtractor_getTrackFormat(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
@@ -98,41 +112,55 @@
* getSampleTime only retrieve information for the subset of tracks selected.
* Selecting the same track multiple times has no effect, the track is
* only selected once.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_selectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
/**
* Unselect the specified track. Subsequent calls to readSampleData, getSampleTrackIndex and
- * getSampleTime only retrieve information for the subset of tracks selected..
+ * getSampleTime only retrieve information for the subset of tracks selected.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_unselectTrack(AMediaExtractor*, size_t idx) __INTRODUCED_IN(21);
/**
* Read the current sample.
+ *
+ * Available since API level 21.
*/
ssize_t AMediaExtractor_readSampleData(AMediaExtractor*,
uint8_t *buffer, size_t capacity) __INTRODUCED_IN(21);
/**
* Read the current sample's flags.
+ *
+ * Available since API level 21.
*/
uint32_t AMediaExtractor_getSampleFlags(AMediaExtractor*) __INTRODUCED_IN(21);
/**
* Returns the track index the current sample originates from (or -1
* if no more samples are available)
+ *
+ * Available since API level 21.
*/
int AMediaExtractor_getSampleTrackIndex(AMediaExtractor*) __INTRODUCED_IN(21);
/**
* Returns the current sample's presentation time in microseconds.
* or -1 if no more samples are available.
+ *
+ * Available since API level 21.
*/
int64_t AMediaExtractor_getSampleTime(AMediaExtractor*) __INTRODUCED_IN(21);
/**
* Advance to the next sample. Returns false if no more sample data
* is available (end of stream).
+ *
+ * Available since API level 21.
*/
bool AMediaExtractor_advance(AMediaExtractor*) __INTRODUCED_IN(21);
@@ -143,7 +171,7 @@
} SeekMode;
/**
- *
+ * Available since API level 21.
*/
media_status_t AMediaExtractor_seekTo(AMediaExtractor*,
int64_t seekPosUs, SeekMode mode) __INTRODUCED_IN(21);
@@ -167,10 +195,14 @@
/**
* Get the PSSH info if present.
+ *
+ * Available since API level 21.
*/
PsshInfo* AMediaExtractor_getPsshInfo(AMediaExtractor*) __INTRODUCED_IN(21);
-
+/**
+ * Available since API level 21.
+ */
AMediaCodecCryptoInfo *AMediaExtractor_getSampleCryptoInfo(AMediaExtractor *) __INTRODUCED_IN(21);
enum {
@@ -186,6 +218,8 @@
*
* This function will always return a format; however, the format could be empty
* (no key-value pairs) if the media container does not provide format information.
+ *
+ * Available since API level 28.
*/
AMediaFormat* AMediaExtractor_getFileFormat(AMediaExtractor*) __INTRODUCED_IN(28);
@@ -198,6 +232,7 @@
* uint8_t *buf = new uint8_t[sampleSize];
* AMediaExtractor_readSampleData(ex, buf, sampleSize);
*
+ * Available since API level 28.
*/
ssize_t AMediaExtractor_getSampleSize(AMediaExtractor*) __INTRODUCED_IN(28);
@@ -211,6 +246,8 @@
* Returns -1 when the extractor is not reading from a network data source, or when the
* cached duration cannot be calculated (bitrate, duration, and file size information
* not available).
+ *
+ * Available since API level 28.
*/
int64_t AMediaExtractor_getCachedDuration(AMediaExtractor *) __INTRODUCED_IN(28);
@@ -222,6 +259,8 @@
* Returns AMEDIA_OK on success or AMEDIA_ERROR_* to indicate failure reason.
* Existing key-value pairs in |fmt| would be removed if this API returns AMEDIA_OK.
* The contents of |fmt| is undefined if this API returns AMEDIA_ERROR_*.
+ *
+ * Available since API level 28.
*/
media_status_t AMediaExtractor_getSampleFormat(AMediaExtractor *ex,
AMediaFormat *fmt) __INTRODUCED_IN(28);
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fd43f36..41c2378 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -48,40 +48,78 @@
#if __ANDROID_API__ >= 21
+/**
+ * Available since API level 21.
+ */
AMediaFormat *AMediaFormat_new() __INTRODUCED_IN(21);
+
+/**
+ * Available since API level 21.
+ */
media_status_t AMediaFormat_delete(AMediaFormat*) __INTRODUCED_IN(21);
/**
* Human readable representation of the format. The returned string is owned by the format,
* and remains valid until the next call to toString, or until the format is deleted.
+ *
+ * Available since API level 21.
*/
const char* AMediaFormat_toString(AMediaFormat*) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
bool AMediaFormat_getInt32(AMediaFormat*, const char *name, int32_t *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
bool AMediaFormat_getInt64(AMediaFormat*, const char *name, int64_t *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
bool AMediaFormat_getFloat(AMediaFormat*, const char *name, float *out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
bool AMediaFormat_getSize(AMediaFormat*, const char *name, size_t *out) __INTRODUCED_IN(21);
/**
* The returned data is owned by the format and remains valid as long as the named entry
* is part of the format.
+ *
+ * Available since API level 21.
*/
bool AMediaFormat_getBuffer(AMediaFormat*, const char *name, void** data, size_t *size) __INTRODUCED_IN(21);
/**
* The returned string is owned by the format, and remains valid until the next call to getString,
* or until the format is deleted.
+ *
+ * Available since API level 21.
*/
bool AMediaFormat_getString(AMediaFormat*, const char *name, const char **out) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
void AMediaFormat_setInt32(AMediaFormat*, const char* name, int32_t value) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
void AMediaFormat_setInt64(AMediaFormat*, const char* name, int64_t value) __INTRODUCED_IN(21);
+/**
+ * Available since API level 21.
+ */
void AMediaFormat_setFloat(AMediaFormat*, const char* name, float value) __INTRODUCED_IN(21);
/**
* The provided string is copied into the format.
+ *
+ * Available since API level 21.
*/
void AMediaFormat_setString(AMediaFormat*, const char* name, const char* value) __INTRODUCED_IN(21);
/**
* The provided data is copied into the format.
+ *
+ * Available since API level 21.
*/
void AMediaFormat_setBuffer(AMediaFormat*, const char* name, const void* data, size_t size) __INTRODUCED_IN(21);
@@ -155,24 +193,43 @@
#endif /* __ANDROID_API__ >= 21 */
#if __ANDROID_API__ >= 28
+/**
+ * Available since API level 28.
+ */
bool AMediaFormat_getDouble(AMediaFormat*, const char *name, double *out) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
bool AMediaFormat_getRect(AMediaFormat*, const char *name,
int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
void AMediaFormat_setDouble(AMediaFormat*, const char* name, double value) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
void AMediaFormat_setSize(AMediaFormat*, const char* name, size_t value) __INTRODUCED_IN(28);
+/**
+ * Available since API level 28.
+ */
void AMediaFormat_setRect(AMediaFormat*, const char* name,
int32_t left, int32_t top, int32_t right, int32_t bottom) __INTRODUCED_IN(28);
#endif /* __ANDROID_API__ >= 28 */
#if __ANDROID_API__ >= 29
/**
- * remove all key/value pairs from the given AMediaFormat
+ * Remove all key/value pairs from the given AMediaFormat.
+ *
+ * Available since API level 29.
*/
void AMediaFormat_clear(AMediaFormat*) __INTRODUCED_IN(29);
/**
- * copy one AMediaFormat to another
+ * Copy one AMediaFormat to another.
+ *
+ * Available since API level 29.
*/
media_status_t AMediaFormat_copy(AMediaFormat *to, AMediaFormat *from) __INTRODUCED_IN(29);
diff --git a/media/ndk/include/media/NdkMediaMuxer.h b/media/ndk/include/media/NdkMediaMuxer.h
index 7393867..3fdeea4 100644
--- a/media/ndk/include/media/NdkMediaMuxer.h
+++ b/media/ndk/include/media/NdkMediaMuxer.h
@@ -56,12 +56,16 @@
#if __ANDROID_API__ >= 21
/**
- * Create new media muxer
+ * Create new media muxer.
+ *
+ * Available since API level 21.
*/
AMediaMuxer* AMediaMuxer_new(int fd, OutputFormat format) __INTRODUCED_IN(21);
/**
- * Delete a previously created media muxer
+ * Delete a previously created media muxer.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_delete(AMediaMuxer*) __INTRODUCED_IN(21);
@@ -75,6 +79,8 @@
* Both values are specified in degrees.
* Latitude must be in the range [-90, 90].
* Longitude must be in the range [-180, 180].
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_setLocation(AMediaMuxer*,
float latitude, float longitude) __INTRODUCED_IN(21);
@@ -90,6 +96,8 @@
* during playback.
* The angle is specified in degrees, clockwise.
* The supported angles are 0, 90, 180, and 270 degrees.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_setOrientationHint(AMediaMuxer*, int degrees) __INTRODUCED_IN(21);
@@ -97,18 +105,24 @@
* Adds a track with the specified format.
* Returns the index of the new track or a negative value in case of failure,
* which can be interpreted as a media_status_t.
+ *
+ * Available since API level 21.
*/
ssize_t AMediaMuxer_addTrack(AMediaMuxer*, const AMediaFormat* format) __INTRODUCED_IN(21);
/**
* Start the muxer. Should be called after AMediaMuxer_addTrack and
* before AMediaMuxer_writeSampleData.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_start(AMediaMuxer*) __INTRODUCED_IN(21);
/**
* Stops the muxer.
* Once the muxer stops, it can not be restarted.
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_stop(AMediaMuxer*) __INTRODUCED_IN(21);
@@ -118,6 +132,8 @@
* the right tracks. Also, it needs to make sure the samples for each track
* are written in chronological order (e.g. in the order they are provided
* by the encoder.)
+ *
+ * Available since API level 21.
*/
media_status_t AMediaMuxer_writeSampleData(AMediaMuxer *muxer,
size_t trackIdx, const uint8_t *data,
diff --git a/media/tests/benchmark/.clang-format b/media/tests/benchmark/.clang-format
new file mode 100644
index 0000000..bf1e355
--- /dev/null
+++ b/media/tests/benchmark/.clang-format
@@ -0,0 +1,13 @@
+BasedOnStyle: Google
+Standard: Cpp11
+AccessModifierOffset: -2
+AllowShortFunctionsOnASingleLine: Inline
+ColumnLimit: 100
+CommentPragmas: NOLINT:.*
+DerivePointerAlignment: false
+IncludeBlocks: Preserve
+IndentWidth: 4
+ContinuationIndentWidth: 8
+PointerAlignment: Right
+TabWidth: 4
+UseTab: Never
diff --git a/media/libstagefright/include/media/stagefright/NdkUtils.h b/media/tests/benchmark/Android.bp
similarity index 62%
copy from media/libstagefright/include/media/stagefright/NdkUtils.h
copy to media/tests/benchmark/Android.bp
index a68884a..de408dd 100644
--- a/media/libstagefright/include/media/stagefright/NdkUtils.h
+++ b/media/tests/benchmark/Android.bp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,18 +14,8 @@
* limitations under the License.
*/
-#ifndef NDK_UTILS_H_
-
-#define NDK_UTILS_H_
-
-#include <media/stagefright/MetaData.h>
-#include <media/NdkWrapper.h>
-
-namespace android {
-
-sp<MetaData> convertMediaFormatWrapperToMetaData(
- const sp<AMediaFormatWrapper> &fmt);
-
-} // namespace android
-
-#endif // NDK_UTILS_H_
+subdirs = [
+ "src",
+ "tests",
+ "MediaBenchmarkTest",
+]
diff --git a/media/tests/benchmark/MediaBenchmarkTest/Android.bp b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
new file mode 100644
index 0000000..91b03f1
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/Android.bp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+android_test {
+ name: "MediaBenchmarkTest",
+
+ // Include all the test code
+ srcs: ["src/androidTest/**/*.java"],
+
+ sdk_version: "system_current",
+
+ resource_dirs: ["res"],
+
+ libs: [
+ "android.test.runner",
+ "android.test.base",
+ ],
+
+ static_libs: [
+ "libMediaBenchmark",
+ "junit",
+ "androidx.test.runner",
+ ],
+}
+
+android_library {
+ name: "libMediaBenchmark",
+
+ // Include all the libraries
+ srcs: ["src/main/**/*.java"],
+
+ sdk_version: "system_current",
+
+ static_libs: [
+ "androidx.test.core",
+ ],
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/AndroidManifest.xml b/media/tests/benchmark/MediaBenchmarkTest/AndroidManifest.xml
new file mode 100644
index 0000000..eea9914
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/AndroidManifest.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+-->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ xmlns:tools="http://schemas.android.com/tools"
+ package="com.android.media.benchmark">
+ <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
+ <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
+ <uses-permission android:name="android.permission.READ_INTERNAL_STORAGE" />
+ <uses-permission android:name="android.permission.WRITE_INTERNAL_STORAGE" />
+
+ <application
+ tools:ignore="AllowBackup,GoogleAppIndexingWarning,MissingApplicationIcon"
+ tools:remove="android:appComponentFactory">
+ </application>
+
+ <instrumentation android:name="androidx.test.runner.AndroidJUnitRunner"
+ android:targetPackage="com.android.media.benchmark"
+ android:label="Benchmark Media Test"/>
+</manifest>
\ No newline at end of file
diff --git a/media/tests/benchmark/MediaBenchmarkTest/AndroidTest.xml b/media/tests/benchmark/MediaBenchmarkTest/AndroidTest.xml
new file mode 100644
index 0000000..89d6ce2
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/AndroidTest.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs Media Benchmark Tests">
+ <target_preparer class="com.android.tradefed.targetprep.TestAppInstallSetup">
+ <option name="test-file-name" value="MediaBenchmarkTest.apk" />
+ </target_preparer>
+
+ <option name="test-tag" value="MediaBenchmarkTest" />
+ <test class="com.android.tradefed.testtype.AndroidJUnitTest" >
+ <option name="package" value="com.android.media.benchmark" />
+ <option name="runner" value="androidx.test.runner.AndroidJUnitRunner" />
+ <option name="hidden-api-checks" value="false"/>
+ </test>
+</configuration>
diff --git a/media/tests/benchmark/MediaBenchmarkTest/build.gradle b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
new file mode 100644
index 0000000..b0ee692
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/build.gradle
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+buildscript {
+ repositories {
+ google()
+ jcenter()
+ }
+ dependencies {
+ classpath 'com.android.tools.build:gradle:3.5.0'
+ }
+}
+
+apply plugin: 'com.android.application'
+
+android {
+ compileSdkVersion 29
+ defaultConfig {
+ applicationId "com.android.media.benchmark"
+ minSdkVersion 21
+ targetSdkVersion 29
+ versionCode 1
+ versionName "1.0"
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
+ }
+ sourceSets {
+ main {
+ java.srcDirs 'src/main/java'
+ res.srcDirs 'res'
+ manifest.srcFile 'AndroidManifest.xml'
+ }
+ androidTest {
+ java.srcDirs 'src/androidTest/java'
+ res.srcDirs 'res'
+ manifest.srcFile 'AndroidManifest.xml'
+ }
+ }
+}
+
+repositories {
+ google()
+ jcenter()
+}
+
+dependencies {
+ implementation fileTree(dir: 'libs', include: ['*.jar'])
+ implementation 'androidx.appcompat:appcompat:1.1.0'
+ testImplementation 'junit:junit:4.12'
+ androidTestImplementation 'androidx.test:runner:1.2.0'
+ androidTestImplementation 'androidx.test.ext:junit:1.1.1'
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/MediaBenchmarkTest/res/values/strings.xml b/media/tests/benchmark/MediaBenchmarkTest/res/values/strings.xml
new file mode 100644
index 0000000..24dbccc
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/res/values/strings.xml
@@ -0,0 +1,4 @@
+<resources>
+ <string name="input_file_path">/data/local/tmp/MediaBenchmark/res/</string>
+ <string name="output_file_path">/data/local/tmp/MediaBenchmark/output/</string>
+</resources>
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/DecoderTest.java b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/DecoderTest.java
new file mode 100644
index 0000000..be2633d
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/DecoderTest.java
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.tests;
+
+import android.content.Context;
+import android.media.MediaCodec;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import com.android.media.benchmark.R;
+import com.android.media.benchmark.library.CodecUtils;
+import com.android.media.benchmark.library.Decoder;
+import com.android.media.benchmark.library.Extractor;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+
+@RunWith(Parameterized.class)
+public class DecoderTest {
+ private static final Context mContext =
+ InstrumentationRegistry.getInstrumentation().getTargetContext();
+ private static final String mInputFilePath = mContext.getString(R.string.input_file_path);
+ private static final String mOutputFilePath = mContext.getString(R.string.output_file_path);
+ private static final String TAG = "DecoderTest";
+ private static final long PER_TEST_TIMEOUT_MS = 60000;
+ private static final boolean DEBUG = false;
+ private static final boolean WRITE_OUTPUT = false;
+ private String mInputFile;
+ private boolean mAsyncMode;
+
+ public DecoderTest(String inputFile, boolean asyncMode) {
+ this.mInputFile = inputFile;
+ this.mAsyncMode = asyncMode;
+ }
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> input() {
+ return Arrays.asList(new Object[][]{
+ //Audio Sync Test
+ {"bbb_44100hz_2ch_128kbps_aac_30sec.mp4", false},
+ {"bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", false},
+ {"bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", false},
+ {"bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", false},
+ {"bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", false},
+ {"bbb_44100hz_2ch_600kbps_flac_30sec.mp4", false},
+ {"bbb_48000hz_2ch_100kbps_opus_30sec.webm", false},
+ // Audio Async Test
+ {"bbb_44100hz_2ch_128kbps_aac_30sec.mp4", true},
+ {"bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", true},
+ {"bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", true},
+ {"bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", true},
+ {"bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", true},
+ {"bbb_44100hz_2ch_600kbps_flac_30sec.mp4", true},
+ {"bbb_48000hz_2ch_100kbps_opus_30sec.webm", true},
+ // Video Sync Test
+ {"crowd_1920x1080_25fps_4000kbps_vp9.webm", false},
+ {"crowd_1920x1080_25fps_4000kbps_vp8.webm", false},
+ {"crowd_1920x1080_25fps_4000kbps_av1.webm", false},
+ {"crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", false},
+ {"crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", false},
+ {"crowd_352x288_25fps_6000kbps_h263.3gp", false},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts", false},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv", false},
+ // Video Async Test
+ {"crowd_1920x1080_25fps_4000kbps_vp9.webm", true},
+ {"crowd_1920x1080_25fps_4000kbps_vp8.webm", true},
+ {"crowd_1920x1080_25fps_4000kbps_av1.webm", true},
+ {"crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", true},
+ {"crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", true},
+ {"crowd_352x288_25fps_6000kbps_h263.3gp", true},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts", true},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv", true}});
+ }
+
+ @Test(timeout = PER_TEST_TIMEOUT_MS)
+ public void testDecoder() throws IOException {
+ File inputFile = new File(mInputFilePath + mInputFile);
+ if (inputFile.exists()) {
+ FileInputStream fileInput = new FileInputStream(inputFile);
+ FileDescriptor fileDescriptor = fileInput.getFD();
+ Extractor extractor = new Extractor();
+ int trackCount = extractor.setUpExtractor(fileDescriptor);
+ ArrayList<ByteBuffer> inputBuffer = new ArrayList<>();
+ ArrayList<MediaCodec.BufferInfo> frameInfo = new ArrayList<>();
+ if (trackCount <= 0) {
+ Log.e(TAG, "Extraction failed. No tracks for file: " + mInputFile);
+ return;
+ }
+ for (int currentTrack = 0; currentTrack < trackCount; currentTrack++) {
+ extractor.selectExtractorTrack(currentTrack);
+ MediaFormat format = extractor.getFormat(currentTrack);
+ String mime = format.getString(MediaFormat.KEY_MIME);
+ ArrayList<String> mediaCodecs = CodecUtils.selectCodecs(mime, false);
+ if (mediaCodecs.size() <= 0) {
+ Log.e(TAG,
+ "No suitable codecs found for file: " + mInputFile
+ + " track : " + currentTrack + " mime: " + mime);
+ continue;
+ }
+ // Get samples from extractor
+ int sampleSize;
+ do {
+ sampleSize = extractor.getFrameSample();
+ MediaCodec.BufferInfo bufInfo = new MediaCodec.BufferInfo();
+ MediaCodec.BufferInfo info = extractor.getBufferInfo();
+ ByteBuffer dataBuffer = ByteBuffer.allocate(info.size);
+ dataBuffer.put(extractor.getFrameBuffer().array(), 0, info.size);
+ bufInfo.set(info.offset, info.size, info.presentationTimeUs, info.flags);
+ inputBuffer.add(dataBuffer);
+ frameInfo.add(bufInfo);
+ if (DEBUG) {
+ Log.d(TAG,
+ "Extracted bufInfo: flag = " + bufInfo.flags + " timestamp = "
+ + bufInfo.presentationTimeUs + " size = " + bufInfo.size);
+ }
+ } while (sampleSize > 0);
+ for (String codecName : mediaCodecs) {
+ FileOutputStream decodeOutputStream = null;
+ if (WRITE_OUTPUT) {
+ if (!Paths.get(mOutputFilePath).toFile().exists()) {
+ Files.createDirectories(Paths.get(mOutputFilePath));
+ }
+ File outFile = new File(mOutputFilePath + "decoder.out");
+ if (outFile.exists()) {
+ if (!outFile.delete()) {
+ Log.e(TAG, " Unable to delete existing file" + outFile.toString());
+ }
+ }
+ if (outFile.createNewFile()) {
+ decodeOutputStream = new FileOutputStream(outFile);
+ } else {
+ Log.e(TAG, "Unable to create file: " + outFile.toString());
+ }
+ }
+ Decoder decoder = new Decoder();
+ decoder.setupDecoder(decodeOutputStream);
+ int status =
+ decoder.decode(inputBuffer, frameInfo, mAsyncMode, format, codecName);
+ decoder.deInitCodec();
+ if (status == 0) {
+ decoder.dumpStatistics(
+ mInputFile + " " + codecName, extractor.getClipDuration());
+ Log.i(TAG,
+ "Decoding Successful for file: " + mInputFile
+ + " with codec: " + codecName);
+ } else {
+ Log.e(TAG,
+ "Decoder returned error " + status + " for file: " + mInputFile
+ + " with codec: " + codecName);
+ }
+ decoder.resetDecoder();
+ if (decodeOutputStream != null) {
+ decodeOutputStream.close();
+ }
+ }
+ extractor.unselectExtractorTrack(currentTrack);
+ inputBuffer.clear();
+ frameInfo.clear();
+ }
+ extractor.deinitExtractor();
+ fileInput.close();
+ } else {
+ Log.w(TAG,
+ "Warning: Test Skipped. Cannot find " + mInputFile + " in directory "
+ + mInputFilePath);
+ }
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/EncoderTest.java b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/EncoderTest.java
new file mode 100644
index 0000000..9db9c84
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/EncoderTest.java
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.tests;
+
+import android.content.Context;
+import android.media.MediaCodec;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import com.android.media.benchmark.R;
+import com.android.media.benchmark.library.CodecUtils;
+import com.android.media.benchmark.library.Decoder;
+import com.android.media.benchmark.library.Encoder;
+import com.android.media.benchmark.library.Extractor;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.nio.ByteBuffer;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+
+@RunWith(Parameterized.class)
+public class EncoderTest {
+ private static final Context mContext =
+ InstrumentationRegistry.getInstrumentation().getTargetContext();
+ private static final String mInputFilePath = mContext.getString(R.string.input_file_path);
+ private static final String mOutputFilePath = mContext.getString(R.string.output_file_path);
+ private static final String TAG = "EncoderTest";
+ private static final long PER_TEST_TIMEOUT_MS = 120000;
+ private static final boolean DEBUG = false;
+ private static final boolean WRITE_OUTPUT = false;
+ private static final int ENCODE_DEFAULT_FRAME_RATE = 25;
+ private static final int ENCODE_DEFAULT_BIT_RATE = 8000000 /* 8 Mbps */;
+ private static final int ENCODE_MIN_BIT_RATE = 600000 /* 600 Kbps */;
+
+ private String mInputFile;
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> inputFiles() {
+ return Arrays.asList(new Object[][]{
+ // Audio Test
+ {"bbb_44100hz_2ch_128kbps_aac_30sec.mp4"},
+ {"bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp"},
+ {"bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp"},
+ {"bbb_44100hz_2ch_600kbps_flac_30sec.mp4"},
+ {"bbb_48000hz_2ch_100kbps_opus_30sec.webm"},
+ // Video Test
+ {"crowd_1920x1080_25fps_4000kbps_vp8.webm"},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts"},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv"},
+ {"crowd_1920x1080_25fps_4000kbps_vp9.webm"},
+ {"crowd_176x144_25fps_6000kbps_mpeg4.mp4"},
+ {"crowd_176x144_25fps_6000kbps_h263.3gp"}});
+ }
+
+ public EncoderTest(String inputFileName) {
+ this.mInputFile = inputFileName;
+ }
+
+ @Test(timeout = PER_TEST_TIMEOUT_MS)
+ public void sampleEncoderTest() throws Exception {
+ int status;
+ int frameSize;
+
+ //Parameters for video
+ int width = 0;
+ int height = 0;
+ int profile = 0;
+ int level = 0;
+ int frameRate = 0;
+
+ //Parameters for audio
+ int bitRate = 0;
+ int sampleRate = 0;
+ int numChannels = 0;
+
+ File inputFile = new File(mInputFilePath + mInputFile);
+ if (inputFile.exists()) {
+ FileInputStream fileInput = new FileInputStream(inputFile);
+ FileDescriptor fileDescriptor = fileInput.getFD();
+ Extractor extractor = new Extractor();
+ int trackCount = extractor.setUpExtractor(fileDescriptor);
+ if (trackCount <= 0) {
+ Log.e(TAG, "Extraction failed. No tracks for file: " + mInputFile);
+ return;
+ }
+ ArrayList<ByteBuffer> inputBuffer = new ArrayList<>();
+ ArrayList<MediaCodec.BufferInfo> frameInfo = new ArrayList<>();
+ for (int currentTrack = 0; currentTrack < trackCount; currentTrack++) {
+ extractor.selectExtractorTrack(currentTrack);
+ MediaFormat format = extractor.getFormat(currentTrack);
+ // Get samples from extractor
+ int sampleSize;
+ do {
+ sampleSize = extractor.getFrameSample();
+ MediaCodec.BufferInfo bufInfo = new MediaCodec.BufferInfo();
+ MediaCodec.BufferInfo info = extractor.getBufferInfo();
+ ByteBuffer dataBuffer = ByteBuffer.allocate(info.size);
+ dataBuffer.put(extractor.getFrameBuffer().array(), 0, info.size);
+ bufInfo.set(info.offset, info.size, info.presentationTimeUs, info.flags);
+ inputBuffer.add(dataBuffer);
+ frameInfo.add(bufInfo);
+ if (DEBUG) {
+ Log.d(TAG, "Extracted bufInfo: flag = " + bufInfo.flags + " timestamp = " +
+ bufInfo.presentationTimeUs + " size = " + bufInfo.size);
+ }
+ } while (sampleSize > 0);
+
+ int tid = android.os.Process.myTid();
+ File decodedFile = new File(mContext.getFilesDir() + "/decoder_" + tid + ".out");
+ FileOutputStream decodeOutputStream = new FileOutputStream(decodedFile);
+ Decoder decoder = new Decoder();
+ decoder.setupDecoder(decodeOutputStream);
+ status = decoder.decode(inputBuffer, frameInfo, false, format, "");
+ if (status == 0) {
+ Log.i(TAG, "Decoding complete.");
+ } else {
+ Log.e(TAG, "Decode returned error. Encoding did not take place." + status);
+ return;
+ }
+ decoder.deInitCodec();
+ extractor.unselectExtractorTrack(currentTrack);
+ inputBuffer.clear();
+ frameInfo.clear();
+ if (decodeOutputStream != null) {
+ decodeOutputStream.close();
+ }
+ String mime = format.getString(MediaFormat.KEY_MIME);
+ ArrayList<String> mediaCodecs = CodecUtils.selectCodecs(mime, true);
+ if (mediaCodecs.size() <= 0) {
+ Log.e(TAG, "No suitable codecs found for file: " + mInputFile + " track : " +
+ currentTrack + " mime: " + mime);
+ return;
+ }
+ Boolean[] encodeMode = {true, false};
+ /* Encoding the decoder's output */
+ for (Boolean asyncMode : encodeMode) {
+ for (String codecName : mediaCodecs) {
+ FileOutputStream encodeOutputStream = null;
+ if (WRITE_OUTPUT) {
+ File outEncodeFile = new File(mOutputFilePath + "encoder.out");
+ if (outEncodeFile.exists()) {
+ if (!outEncodeFile.delete()) {
+ Log.e(TAG, "Unable to delete existing file" +
+ decodedFile.toString());
+ }
+ }
+ if (outEncodeFile.createNewFile()) {
+ encodeOutputStream = new FileOutputStream(outEncodeFile);
+ } else {
+ Log.e(TAG, "Unable to create file to write encoder output: " +
+ outEncodeFile.toString());
+ }
+ }
+ File rawFile =
+ new File(mContext.getFilesDir() + "/decoder_" + tid + ".out");
+ if (rawFile.exists()) {
+ if (DEBUG) {
+ Log.i(TAG, "Path of decoded input file: " + rawFile.toString());
+ }
+ FileInputStream eleStream = new FileInputStream(rawFile);
+ if (mime.startsWith("video/")) {
+ width = format.getInteger(MediaFormat.KEY_WIDTH);
+ height = format.getInteger(MediaFormat.KEY_HEIGHT);
+ if (format.containsKey(MediaFormat.KEY_FRAME_RATE)) {
+ frameRate = format.getInteger(MediaFormat.KEY_FRAME_RATE);
+ } else if (frameRate <= 0) {
+ frameRate = ENCODE_DEFAULT_FRAME_RATE;
+ }
+ if (format.containsKey(MediaFormat.KEY_BIT_RATE)) {
+ bitRate = format.getInteger(MediaFormat.KEY_BIT_RATE);
+ } else if (bitRate <= 0) {
+ if (mime.contains("video/3gpp") ||
+ mime.contains("video/mp4v-es")) {
+ bitRate = ENCODE_MIN_BIT_RATE;
+ } else {
+ bitRate = ENCODE_DEFAULT_BIT_RATE;
+ }
+ }
+ if (format.containsKey(MediaFormat.KEY_PROFILE)) {
+ profile = format.getInteger(MediaFormat.KEY_PROFILE);
+ }
+ if (format.containsKey(MediaFormat.KEY_PROFILE)) {
+ level = format.getInteger(MediaFormat.KEY_LEVEL);
+ }
+ } else {
+ sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
+ numChannels = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT);
+ bitRate = sampleRate * numChannels * 16;
+ }
+ /*Setup Encode Format*/
+ MediaFormat encodeFormat;
+ if (mime.startsWith("video/")) {
+ frameSize = width * height * 3 / 2;
+ encodeFormat = MediaFormat.createVideoFormat(mime, width, height);
+ encodeFormat.setInteger(MediaFormat.KEY_FRAME_RATE, frameRate);
+ encodeFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
+ encodeFormat.setInteger(MediaFormat.KEY_PROFILE, profile);
+ encodeFormat.setInteger(MediaFormat.KEY_LEVEL, level);
+ encodeFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1);
+ encodeFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, frameSize);
+ } else {
+ encodeFormat = MediaFormat
+ .createAudioFormat(mime, sampleRate, numChannels);
+ encodeFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
+ frameSize = 4096;
+ }
+ Encoder encoder = new Encoder();
+ encoder.setupEncoder(encodeOutputStream, eleStream);
+ status = encoder.encode(codecName, encodeFormat, mime, frameRate,
+ sampleRate, frameSize, asyncMode);
+ encoder.deInitEncoder();
+ if (status == 0) {
+ encoder.dumpStatistics(mInputFile + "with " + codecName + " for " +
+ "aSyncMode = " + asyncMode, extractor.getClipDuration());
+ Log.i(TAG, "Encoding complete for file: " + mInputFile +
+ " with codec: " + codecName + " for aSyncMode = " +
+ asyncMode);
+ } else {
+ Log.e(TAG,
+ codecName + " encoder returned error " + status + " for " +
+ "file:" + " " + mInputFile);
+ }
+ encoder.resetEncoder();
+ eleStream.close();
+ if (encodeOutputStream != null) {
+ encodeOutputStream.close();
+ }
+ }
+ }
+ }
+ //Cleanup temporary input file
+ if (decodedFile.exists()) {
+ if (decodedFile.delete()) {
+ Log.i(TAG, "Successfully deleted decoded file");
+ } else {
+ Log.e(TAG, "Unable to delete decoded file");
+ }
+ }
+ }
+ extractor.deinitExtractor();
+ fileInput.close();
+ } else {
+ Log.w(TAG, "Warning: Test Skipped. Cannot find " + mInputFile + " in directory " +
+ mInputFilePath);
+ }
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/ExtractorTest.java b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/ExtractorTest.java
new file mode 100644
index 0000000..a02011c
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/ExtractorTest.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.tests;
+
+import com.android.media.benchmark.R;
+import com.android.media.benchmark.library.Extractor;
+
+import android.content.Context;
+import android.util.Log;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+@RunWith(Parameterized.class)
+public class ExtractorTest {
+ private static Context mContext =
+ InstrumentationRegistry.getInstrumentation().getTargetContext();
+ private static final String mInputFilePath = mContext.getString(R.string.input_file_path);
+ private static final String TAG = "ExtractorTest";
+ private String mInputFileName;
+ private int mTrackId;
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> inputFiles() {
+ return Arrays.asList(new Object[][]{/* Parameters: filename, trackId*/
+ {"crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", 0},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts", 0},
+ {"crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", 0},
+ {"crowd_1920x1080_25fps_4000kbps_av1.webm", 0},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv", 0},
+ {"crowd_1920x1080_25fps_4000kbps_vp8.webm", 0},
+ {"bbb_44100hz_2ch_128kbps_aac_5mins.mp4", 0},
+ {"bbb_44100hz_2ch_128kbps_mp3_5mins.mp3", 0},
+ {"bbb_44100hz_2ch_600kbps_flac_5mins.flac", 0},
+ {"bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", 0},
+ {"bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", 0},
+ {"bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", 0},
+ {"bbb_48000hz_2ch_100kbps_opus_5mins.webm", 0}});
+ }
+
+ public ExtractorTest(String filename, int track) {
+ this.mInputFileName = filename;
+ this.mTrackId = track;
+ }
+
+ @Test
+ public void sampleExtractTest() throws IOException {
+ int status = -1;
+ File inputFile = new File(mInputFilePath + mInputFileName);
+ if (inputFile.exists()) {
+ FileInputStream fileInput = new FileInputStream(inputFile);
+ FileDescriptor fileDescriptor = fileInput.getFD();
+ Extractor extractor = new Extractor();
+ extractor.setUpExtractor(fileDescriptor);
+ status = extractor.extractSample(mTrackId);
+ extractor.deinitExtractor();
+ extractor.dumpStatistics(mInputFileName);
+ fileInput.close();
+ } else {
+ Log.e(TAG, "Cannot find " + mInputFileName + " in directory " + mInputFilePath);
+ }
+ assertThat(status, is(equalTo(0)));
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java
new file mode 100644
index 0000000..8c3080c
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/androidTest/java/com/android/media/benchmark/tests/MuxerTest.java
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media.benchmark.tests;
+
+import com.android.media.benchmark.R;
+import com.android.media.benchmark.library.Extractor;
+import com.android.media.benchmark.library.Muxer;
+
+import androidx.test.platform.app.InstrumentationRegistry;
+
+import android.content.Context;
+import android.media.MediaCodec;
+import android.media.MediaFormat;
+import android.media.MediaMuxer;
+import android.util.Log;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Hashtable;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+
+@RunWith(Parameterized.class)
+public class MuxerTest {
+ private static Context mContext =
+ InstrumentationRegistry.getInstrumentation().getTargetContext();
+ private static final String mInputFilePath = mContext.getString(R.string.input_file_path);
+ private static final String TAG = "MuxerTest";
+ private static final Map<String, Integer> mMapFormat = new Hashtable<String, Integer>() {
+ {
+ put("mp4", MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
+ put("webm", MediaMuxer.OutputFormat.MUXER_OUTPUT_WEBM);
+ put("3gpp", MediaMuxer.OutputFormat.MUXER_OUTPUT_3GPP);
+ put("ogg", MediaMuxer.OutputFormat.MUXER_OUTPUT_OGG);
+ }
+ };
+ private String mInputFileName;
+ private String mFormat;
+
+ @Parameterized.Parameters
+ public static Collection<Object[]> inputFiles() {
+ return Arrays.asList(new Object[][]{
+ /* Parameters: filename, format */
+ {"crowd_1920x1080_25fps_4000kbps_vp8.webm", "webm"},
+ {"crowd_1920x1080_25fps_4000kbps_vp9.webm", "webm"},
+ {"crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "mp4"},
+ {"crowd_352x288_25fps_6000kbps_h263.3gp", "mp4"},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts", "mp4"},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv", "mp4"},
+ {"crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "3gpp"},
+ {"crowd_352x288_25fps_6000kbps_h263.3gp", "3gpp"},
+ {"crowd_1920x1080_25fps_6700kbps_h264.ts", "3gpp"},
+ {"crowd_1920x1080_25fps_4000kbps_h265.mkv", "3gpp"},
+ {"bbb_48000hz_2ch_100kbps_opus_5mins.webm", "ogg"},
+ {"bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", "webm"},
+ {"bbb_48000hz_2ch_100kbps_opus_5mins.webm", "webm"},
+ {"bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "mp4"},
+ {"bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "mp4"},
+ {"bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "mp4"},
+ {"bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "3gpp"},
+ {"bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "3gpp"},
+ {"bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "3gpp"}});
+ }
+
+ public MuxerTest(String filename, String outputFormat) {
+ this.mInputFileName = filename;
+ this.mFormat = outputFormat;
+ }
+
+ @Test
+ public void sampleMuxerTest() throws IOException {
+ int status = -1;
+ File inputFile = new File(mInputFilePath + mInputFileName);
+ if (inputFile.exists()) {
+ FileInputStream fileInput = new FileInputStream(inputFile);
+ FileDescriptor fileDescriptor = fileInput.getFD();
+ ArrayList<ByteBuffer> inputBuffer = new ArrayList<>();
+ ArrayList<MediaCodec.BufferInfo> inputBufferInfo = new ArrayList<>();
+ Extractor extractor = new Extractor();
+ int trackCount = extractor.setUpExtractor(fileDescriptor);
+ for (int currentTrack = 0; currentTrack < trackCount; currentTrack++) {
+ extractor.selectExtractorTrack(currentTrack);
+ while (true) {
+ int sampleSize = extractor.getFrameSample();
+ MediaCodec.BufferInfo bufferInfo = extractor.getBufferInfo();
+ MediaCodec.BufferInfo tempBufferInfo = new MediaCodec.BufferInfo();
+ tempBufferInfo
+ .set(bufferInfo.offset, bufferInfo.size, bufferInfo.presentationTimeUs,
+ bufferInfo.flags);
+ inputBufferInfo.add(tempBufferInfo);
+ ByteBuffer tempSampleBuffer = ByteBuffer.allocate(tempBufferInfo.size);
+ tempSampleBuffer.put(extractor.getFrameBuffer().array(), 0, bufferInfo.size);
+ inputBuffer.add(tempSampleBuffer);
+ if (sampleSize < 0) {
+ break;
+ }
+ }
+ MediaFormat format = extractor.getFormat(currentTrack);
+ int outputFormat = mMapFormat.getOrDefault(mFormat, -1);
+ if (outputFormat != -1) {
+ Muxer muxer = new Muxer();
+ int trackIndex = muxer.setUpMuxer(mContext, outputFormat, format);
+ status = muxer.mux(trackIndex, inputBuffer, inputBufferInfo);
+ if (status != 0) {
+ Log.e(TAG, "Cannot perform write operation for " + mInputFileName);
+ }
+ muxer.deInitMuxer();
+ muxer.dumpStatistics(mInputFileName, extractor.getClipDuration());
+ muxer.resetMuxer();
+ extractor.unselectExtractorTrack(currentTrack);
+ inputBufferInfo.clear();
+ inputBuffer.clear();
+ } else {
+ Log.e(TAG, "Test failed for " + mInputFileName + ". Returned invalid " +
+ "output format for given " + mFormat + " format.");
+ }
+ }
+ extractor.deinitExtractor();
+ fileInput.close();
+ } else {
+ Log.w(TAG, "Warning: Test Skipped. Cannot find " + mInputFileName + " in directory " +
+ mInputFilePath);
+ }
+ assertThat(status, is(equalTo(0)));
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/CodecUtils.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/CodecUtils.java
new file mode 100644
index 0000000..08035c9
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/CodecUtils.java
@@ -0,0 +1,39 @@
+package com.android.media.benchmark.library;
+
+import android.media.MediaCodecInfo;
+import android.media.MediaCodecList;
+import android.os.Build;
+
+import java.util.ArrayList;
+
+public class CodecUtils {
+ private CodecUtils() {}
+
+ /**
+ * Queries the MediaCodecList and returns codec names of supported codecs.
+ *
+ * @param mimeType Mime type of input
+ * @param isEncoder Specifies encoder or decoder
+ * @return ArrayList of codec names
+ */
+ public static ArrayList<String> selectCodecs(String mimeType, boolean isEncoder) {
+ MediaCodecList codecList = new MediaCodecList(MediaCodecList.REGULAR_CODECS);
+ MediaCodecInfo[] codecInfos = codecList.getCodecInfos();
+ ArrayList<String> supportedCodecs = new ArrayList<>();
+ for (MediaCodecInfo codecInfo : codecInfos) {
+ if (isEncoder != codecInfo.isEncoder()) {
+ continue;
+ }
+ if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q && codecInfo.isAlias()) {
+ continue;
+ }
+ String[] types = codecInfo.getSupportedTypes();
+ for (String type : types) {
+ if (type.equalsIgnoreCase(mimeType)) {
+ supportedCodecs.add(codecInfo.getName());
+ }
+ }
+ }
+ return supportedCodecs;
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
new file mode 100644
index 0000000..2cd27c2
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.library;
+
+import android.media.MediaCodec;
+import android.media.MediaCodec.BufferInfo;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import androidx.annotation.NonNull;
+
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+
+public class Decoder {
+ private static final String TAG = "Decoder";
+ private static final boolean DEBUG = false;
+ private static final int kQueueDequeueTimeoutUs = 1000;
+
+ private final Object mLock = new Object();
+ private MediaCodec mCodec;
+ private ArrayList<BufferInfo> mInputBufferInfo;
+ private Stats mStats;
+
+ private boolean mSawInputEOS;
+ private boolean mSawOutputEOS;
+ private boolean mSignalledError;
+
+ private int mNumOutputFrame;
+ private int mIndex;
+
+ private ArrayList<ByteBuffer> mInputBuffer;
+ private FileOutputStream mOutputStream;
+
+ public Decoder() { mStats = new Stats(); }
+
+ /**
+ * Setup of decoder
+ *
+ * @param outputStream Will dump the output in this stream if not null.
+ */
+ public void setupDecoder(FileOutputStream outputStream) {
+ mSignalledError = false;
+ mOutputStream = outputStream;
+ }
+
+ private MediaCodec createCodec(String codecName, MediaFormat format) throws IOException {
+ String mime = format.getString(MediaFormat.KEY_MIME);
+ try {
+ MediaCodec codec;
+ if (codecName.isEmpty()) {
+ Log.i(TAG, "File mime type: " + mime);
+ if (mime != null) {
+ codec = MediaCodec.createDecoderByType(mime);
+ Log.i(TAG, "Decoder created for mime type " + mime);
+ return codec;
+ } else {
+ Log.e(TAG, "Mime type is null, please specify a mime type to create decoder");
+ return null;
+ }
+ } else {
+ codec = MediaCodec.createByCodecName(codecName);
+ Log.i(TAG, "Decoder created with codec name: " + codecName + " mime: " + mime);
+ return codec;
+ }
+ } catch (IllegalArgumentException ex) {
+ ex.printStackTrace();
+ Log.e(TAG, "Failed to create decoder for " + codecName + " mime:" + mime);
+ return null;
+ }
+ }
+
+ /**
+ * Decodes the given input buffer,
+ * provided valid list of buffer info and format are passed as inputs.
+ *
+ * @param inputBuffer Decode the provided list of ByteBuffers
+ * @param inputBufferInfo List of buffer info corresponding to provided input buffers
+ * @param asyncMode Will run on async implementation if true
+ * @param format For creating the decoder if codec name is empty and configuring it
+ * @param codecName Will create the decoder with codecName
+ * @return 0 if decode was successful , -1 for fail, -2 for decoder not created
+ * @throws IOException if the codec cannot be created.
+ */
+ public int decode(@NonNull ArrayList<ByteBuffer> inputBuffer,
+ @NonNull ArrayList<BufferInfo> inputBufferInfo, final boolean asyncMode,
+ @NonNull MediaFormat format, String codecName) throws IOException {
+ mInputBuffer = new ArrayList<>(inputBuffer.size());
+ mInputBuffer.addAll(inputBuffer);
+ mInputBufferInfo = new ArrayList<>(inputBufferInfo.size());
+ mInputBufferInfo.addAll(inputBufferInfo);
+ mSawInputEOS = false;
+ mSawOutputEOS = false;
+ mNumOutputFrame = 0;
+ mIndex = 0;
+ long sTime = mStats.getCurTime();
+ mCodec = createCodec(codecName, format);
+ if (mCodec == null) {
+ return -2;
+ }
+ if (asyncMode) {
+ mCodec.setCallback(new MediaCodec.Callback() {
+ @Override
+ public void onInputBufferAvailable(
+ @NonNull MediaCodec mediaCodec, int inputBufferId) {
+ try {
+ mStats.addInputTime();
+ onInputAvailable(inputBufferId, mediaCodec);
+ } catch (Exception e) {
+ e.printStackTrace();
+ Log.e(TAG, e.toString());
+ }
+ }
+
+ @Override
+ public void onOutputBufferAvailable(@NonNull MediaCodec mediaCodec,
+ int outputBufferId, @NonNull MediaCodec.BufferInfo bufferInfo) {
+ mStats.addOutputTime();
+ onOutputAvailable(mediaCodec, outputBufferId, bufferInfo);
+ if (mSawOutputEOS) {
+ Log.i(TAG, "Saw output EOS");
+ synchronized (mLock) { mLock.notify(); }
+ }
+ }
+
+ @Override
+ public void onOutputFormatChanged(
+ @NonNull MediaCodec mediaCodec, @NonNull MediaFormat format) {
+ Log.i(TAG, "Output format changed. Format: " + format.toString());
+ }
+
+ @Override
+ public void onError(
+ @NonNull MediaCodec mediaCodec, @NonNull MediaCodec.CodecException e) {
+ mSignalledError = true;
+ Log.e(TAG, "Codec Error: " + e.toString());
+ e.printStackTrace();
+ synchronized (mLock) { mLock.notify(); }
+ }
+ });
+ }
+ int isEncoder = 0;
+ if (DEBUG) {
+ Log.d(TAG, "Media Format : " + format.toString());
+ }
+ mCodec.configure(format, null, null, isEncoder);
+ mCodec.start();
+ Log.i(TAG, "Codec started ");
+ long eTime = mStats.getCurTime();
+ mStats.setInitTime(mStats.getTimeDiff(sTime, eTime));
+ mStats.setStartTime();
+ if (asyncMode) {
+ try {
+ synchronized (mLock) { mLock.wait(); }
+ if (mSignalledError) {
+ return -1;
+ }
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ } else {
+ while (!mSawOutputEOS && !mSignalledError) {
+ /* Queue input data */
+ if (!mSawInputEOS) {
+ int inputBufferId = mCodec.dequeueInputBuffer(kQueueDequeueTimeoutUs);
+ if (inputBufferId < 0 && inputBufferId != MediaCodec.INFO_TRY_AGAIN_LATER) {
+ Log.e(TAG,
+ "MediaCodec.dequeueInputBuffer "
+ + " returned invalid index : " + inputBufferId);
+ return -1;
+ }
+ mStats.addInputTime();
+ onInputAvailable(inputBufferId, mCodec);
+ }
+ /* Dequeue output data */
+ BufferInfo outputBufferInfo = new BufferInfo();
+ int outputBufferId =
+ mCodec.dequeueOutputBuffer(outputBufferInfo, kQueueDequeueTimeoutUs);
+ if (outputBufferId < 0) {
+ if (outputBufferId == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
+ MediaFormat outFormat = mCodec.getOutputFormat();
+ Log.i(TAG, "Output format changed. Format: " + outFormat.toString());
+ } else if (outputBufferId == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
+ Log.i(TAG, "Ignoring deprecated flag: INFO_OUTPUT_BUFFERS_CHANGED");
+ } else if (outputBufferId != MediaCodec.INFO_TRY_AGAIN_LATER) {
+ Log.e(TAG,
+ "MediaCodec.dequeueOutputBuffer"
+ + " returned invalid index " + outputBufferId);
+ return -1;
+ }
+ } else {
+ mStats.addOutputTime();
+ if (DEBUG) {
+ Log.d(TAG, "Dequeue O/P buffer with BufferID " + outputBufferId);
+ }
+ onOutputAvailable(mCodec, outputBufferId, outputBufferInfo);
+ }
+ if (outputBufferInfo.flags == MediaCodec.BUFFER_FLAG_END_OF_STREAM) {
+ Log.i(TAG, "Saw output EOS");
+ }
+ }
+ }
+ mInputBuffer.clear();
+ mInputBufferInfo.clear();
+ return 0;
+ }
+
+ /**
+ * Stops the codec and releases codec resources.
+ */
+ public void deInitCodec() {
+ long sTime = mStats.getCurTime();
+ if (mCodec != null) {
+ mCodec.stop();
+ mCodec.release();
+ mCodec = null;
+ }
+ long eTime = mStats.getCurTime();
+ mStats.setDeInitTime(mStats.getTimeDiff(sTime, eTime));
+ }
+
+ /**
+ * Prints out the statistics in the information log
+ *
+ * @param inputReference The operation being performed, in this case decode
+ * @param durationUs Duration of the clip in microseconds
+ */
+ public void dumpStatistics(String inputReference, long durationUs) {
+ String operation = "decode";
+ mStats.dumpStatistics(operation, inputReference, durationUs);
+ }
+
+ /**
+ * Resets the stats
+ */
+ public void resetDecoder() { mStats.reset(); }
+
+ private void onInputAvailable(int inputBufferId, MediaCodec mediaCodec) {
+ if ((inputBufferId >= 0) && !mSawInputEOS) {
+ ByteBuffer inputCodecBuffer = mediaCodec.getInputBuffer(inputBufferId);
+ BufferInfo bufInfo = mInputBufferInfo.get(mIndex);
+ inputCodecBuffer.put(mInputBuffer.get(mIndex).array());
+ mIndex++;
+ if (bufInfo.flags == MediaCodec.BUFFER_FLAG_END_OF_STREAM) {
+ mSawInputEOS = true;
+ Log.i(TAG, "Saw input EOS");
+ }
+ mStats.addFrameSize(bufInfo.size);
+ mediaCodec.queueInputBuffer(inputBufferId, bufInfo.offset, bufInfo.size,
+ bufInfo.presentationTimeUs, bufInfo.flags);
+ if (DEBUG) {
+ Log.d(TAG,
+ "Codec Input: "
+ + "flag = " + bufInfo.flags + " timestamp = "
+ + bufInfo.presentationTimeUs + " size = " + bufInfo.size);
+ }
+ }
+ }
+
+ private void onOutputAvailable(
+ MediaCodec mediaCodec, int outputBufferId, BufferInfo outputBufferInfo) {
+ if (mSawOutputEOS || outputBufferId < 0) {
+ return;
+ }
+ mNumOutputFrame++;
+ if (DEBUG) {
+ Log.d(TAG,
+ "In OutputBufferAvailable ,"
+ + " output frame number = " + mNumOutputFrame);
+ }
+ if (mOutputStream != null) {
+ try {
+ ByteBuffer outputBuffer = mediaCodec.getOutputBuffer(outputBufferId);
+ byte[] bytesOutput = new byte[outputBuffer.remaining()];
+ outputBuffer.get(bytesOutput);
+ mOutputStream.write(bytesOutput);
+ } catch (IOException e) {
+ e.printStackTrace();
+ Log.d(TAG, "Error Dumping File: Exception " + e.toString());
+ }
+ }
+ mediaCodec.releaseOutputBuffer(outputBufferId, false);
+ mSawOutputEOS = (outputBufferInfo.flags == MediaCodec.BUFFER_FLAG_END_OF_STREAM);
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Encoder.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Encoder.java
new file mode 100644
index 0000000..03db294
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Encoder.java
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.library;
+
+import android.media.MediaCodec;
+import android.media.MediaCodec.CodecException;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import androidx.annotation.NonNull;
+
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class Encoder {
+ private static final int ENCODE_DEFAULT_MAX_INPUT_SIZE = 3840;
+ private static final String TAG = "Encoder";
+ private static final boolean DEBUG = false;
+ private static final int kQueueDequeueTimeoutUs = 1000;
+
+ private final Object mLock = new Object();
+ private MediaCodec mCodec;
+ private String mMime;
+ private Stats mStats;
+
+ private int mOffset;
+ private int mFrameSize;
+ private int mNumInputFrame;
+ private int mNumFrames;
+ private int mFrameRate;
+ private int mSampleRate;
+ private long mInputBufferSize;
+
+ private boolean mSawInputEOS;
+ private boolean mSawOutputEOS;
+ private boolean mSignalledError;
+
+ private FileInputStream mInputStream;
+ private FileOutputStream mOutputStream;
+
+ public Encoder() {
+ mStats = new Stats();
+ mNumInputFrame = 0;
+ mSawInputEOS = false;
+ mSawOutputEOS = false;
+ mSignalledError = false;
+ }
+
+ /**
+ * Setup of encoder
+ *
+ * @param encoderOutputStream Will dump the encoder output in this stream if not null.
+ * @param fileInputStream Will read the decoded output from this stream
+ */
+ public void setupEncoder(FileOutputStream encoderOutputStream,
+ FileInputStream fileInputStream) {
+ this.mInputStream = fileInputStream;
+ this.mOutputStream = encoderOutputStream;
+ }
+
+ private MediaCodec createCodec(String codecName, String mime) throws IOException {
+ try {
+ MediaCodec codec;
+ if (codecName.isEmpty()) {
+ Log.i(TAG, "Mime type: " + mime);
+ if (mime != null) {
+ codec = MediaCodec.createEncoderByType(mime);
+ Log.i(TAG, "Encoder created for mime type " + mime);
+ return codec;
+ } else {
+ Log.e(TAG, "Mime type is null, please specify a mime type to create encoder");
+ return null;
+ }
+ } else {
+ codec = MediaCodec.createByCodecName(codecName);
+ Log.i(TAG, "Encoder created with codec name: " + codecName + " and mime: " + mime);
+ return codec;
+ }
+ } catch (IllegalArgumentException ex) {
+ ex.printStackTrace();
+ Log.e(TAG, "Failed to create encoder for " + codecName + " mime: " + mime);
+ return null;
+ }
+ }
+
+ /**
+ * Encodes the given raw input file and measures the performance of encode operation,
+ * provided a valid list of parameters are passed as inputs.
+ *
+ * @param codecName Will create the encoder with codecName
+ * @param mime For creating encode format
+ * @param encodeFormat Format of the output data
+ * @param frameSize Size of the frame
+ * @param asyncMode Will run on async implementation if true
+ * @return 0 if encode was successful , -1 for fail, -2 for encoder not created
+ * @throws IOException If the codec cannot be created.
+ */
+ public int encode(String codecName, MediaFormat encodeFormat, String mime, int frameRate,
+ int sampleRate, int frameSize, boolean asyncMode) throws IOException {
+ mInputBufferSize = mInputStream.getChannel().size();
+ mMime = mime;
+ mOffset = 0;
+ mFrameRate = frameRate;
+ mSampleRate = sampleRate;
+ long sTime = mStats.getCurTime();
+ mCodec = createCodec(codecName, mime);
+ if (mCodec == null) {
+ return -2;
+ }
+ /*Configure Codec*/
+ try {
+ mCodec.configure(encodeFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
+ } catch (IllegalArgumentException | IllegalStateException | MediaCodec.CryptoException e) {
+ Log.e(TAG, "Failed to configure " + mCodec.getName() + " encoder.");
+ e.printStackTrace();
+ return -2;
+ }
+ if (mMime.startsWith("video/")) {
+ mFrameSize = frameSize;
+ } else {
+ int maxInputSize = ENCODE_DEFAULT_MAX_INPUT_SIZE;
+ MediaFormat format = mCodec.getInputFormat();
+ if (format.containsKey(MediaFormat.KEY_MAX_INPUT_SIZE)) {
+ maxInputSize = format.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
+ }
+ mFrameSize = frameSize;
+ if (mFrameSize > maxInputSize && maxInputSize > 0) {
+ mFrameSize = maxInputSize;
+ }
+ }
+ mNumFrames = (int) ((mInputBufferSize + mFrameSize - 1) / mFrameSize);
+ if (asyncMode) {
+ mCodec.setCallback(new MediaCodec.Callback() {
+ @Override
+ public void onInputBufferAvailable(@NonNull MediaCodec mediaCodec,
+ int inputBufferId) {
+ try {
+ mStats.addInputTime();
+ onInputAvailable(mediaCodec, inputBufferId);
+ } catch (Exception e) {
+ e.printStackTrace();
+ Log.e(TAG, e.toString());
+ }
+ }
+
+ @Override
+ public void onOutputBufferAvailable(@NonNull MediaCodec mediaCodec,
+ int outputBufferId,
+ @NonNull MediaCodec.BufferInfo bufferInfo) {
+ mStats.addOutputTime();
+ onOutputAvailable(mediaCodec, outputBufferId, bufferInfo);
+ if (mSawOutputEOS) {
+ Log.i(TAG, "Saw output EOS");
+ synchronized (mLock) { mLock.notify(); }
+ }
+ }
+
+ @Override
+ public void onError(@NonNull MediaCodec mediaCodec, @NonNull CodecException e) {
+ mediaCodec.stop();
+ mediaCodec.release();
+ Log.e(TAG, "CodecError: " + e.toString());
+ e.printStackTrace();
+ }
+
+ @Override
+ public void onOutputFormatChanged(@NonNull MediaCodec mediaCodec,
+ @NonNull MediaFormat format) {
+ Log.i(TAG, "Output format changed. Format: " + format.toString());
+ }
+ });
+ }
+ mCodec.start();
+ long eTime = mStats.getCurTime();
+ mStats.setInitTime(mStats.getTimeDiff(sTime, eTime));
+ mStats.setStartTime();
+ if (asyncMode) {
+ try {
+ synchronized (mLock) { mLock.wait(); }
+ if (mSignalledError) {
+ return -1;
+ }
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ } else {
+ while (!mSawOutputEOS && !mSignalledError) {
+ /* Queue input data */
+ if (!mSawInputEOS) {
+ int inputBufferId = mCodec.dequeueInputBuffer(kQueueDequeueTimeoutUs);
+ if (inputBufferId < 0 && inputBufferId != MediaCodec.INFO_TRY_AGAIN_LATER) {
+ Log.e(TAG, "MediaCodec.dequeueInputBuffer " + "returned invalid index : " +
+ inputBufferId);
+ return -1;
+ }
+ mStats.addInputTime();
+ onInputAvailable(mCodec, inputBufferId);
+ }
+ /* Dequeue output data */
+ MediaCodec.BufferInfo outputBufferInfo = new MediaCodec.BufferInfo();
+ int outputBufferId =
+ mCodec.dequeueOutputBuffer(outputBufferInfo, kQueueDequeueTimeoutUs);
+ if (outputBufferId < 0) {
+ if (outputBufferId == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
+ MediaFormat outFormat = mCodec.getOutputFormat();
+ Log.i(TAG, "Output format changed. Format: " + outFormat.toString());
+ } else if (outputBufferId != MediaCodec.INFO_TRY_AGAIN_LATER) {
+ Log.e(TAG, "MediaCodec.dequeueOutputBuffer" + " returned invalid index " +
+ outputBufferId);
+ return -1;
+ }
+ } else {
+ mStats.addOutputTime();
+ if (DEBUG) {
+ Log.d(TAG, "Dequeue O/P buffer with BufferID " + outputBufferId);
+ }
+ onOutputAvailable(mCodec, outputBufferId, outputBufferInfo);
+ }
+ }
+ }
+ return 0;
+ }
+
+ private void onOutputAvailable(MediaCodec mediaCodec, int outputBufferId,
+ MediaCodec.BufferInfo outputBufferInfo) {
+ if (mSawOutputEOS || outputBufferId < 0) {
+ if (mSawOutputEOS) {
+ Log.i(TAG, "Saw output EOS");
+ }
+ return;
+ }
+ ByteBuffer outputBuffer = mediaCodec.getOutputBuffer(outputBufferId);
+ if (mOutputStream != null) {
+ try {
+
+ byte[] bytesOutput = new byte[outputBuffer.remaining()];
+ outputBuffer.get(bytesOutput);
+ mOutputStream.write(bytesOutput);
+ } catch (IOException e) {
+ e.printStackTrace();
+ Log.d(TAG, "Error Dumping File: Exception " + e.toString());
+ return;
+ }
+ }
+ mStats.addFrameSize(outputBuffer.remaining());
+ mediaCodec.releaseOutputBuffer(outputBufferId, false);
+ mSawOutputEOS = (outputBufferInfo.flags == MediaCodec.BUFFER_FLAG_END_OF_STREAM);
+ }
+
+ private void onInputAvailable(MediaCodec mediaCodec, int inputBufferId) throws IOException {
+ if (mSawOutputEOS || inputBufferId < 0) {
+ if (mSawOutputEOS) {
+ Log.i(TAG, "Saw input EOS");
+ }
+ return;
+ }
+ if (mInputBufferSize < mOffset) {
+ Log.e(TAG, "Out of bound access of input buffer");
+ mSignalledError = true;
+ return;
+ }
+ ByteBuffer inputBuffer = mCodec.getInputBuffer(inputBufferId);
+ if (inputBuffer == null) {
+ mSignalledError = true;
+ return;
+ }
+ int bufSize = inputBuffer.capacity();
+ int bytesRead = mFrameSize;
+ if (mInputBufferSize - mOffset < mFrameSize) {
+ bytesRead = (int) (mInputBufferSize - mOffset);
+ }
+ if (bufSize < bytesRead) {
+ mSignalledError = true;
+ return;
+ }
+ byte[] inputArray = new byte[bytesRead];
+ mInputStream.read(inputArray, 0, bytesRead);
+ inputBuffer.put(inputArray);
+ int flag = 0;
+ if (mNumInputFrame >= mNumFrames - 1 || bytesRead == 0) {
+ Log.i(TAG, "Sending EOS on input last frame");
+ mSawInputEOS = true;
+ flag = MediaCodec.BUFFER_FLAG_END_OF_STREAM;
+ }
+ int presentationTimeUs;
+ if (mMime.startsWith("video/")) {
+ presentationTimeUs = mNumInputFrame * (1000000 / mFrameRate);
+ } else {
+ presentationTimeUs = mNumInputFrame * mFrameSize * 1000000 / mSampleRate;
+ }
+ mediaCodec.queueInputBuffer(inputBufferId, 0, bytesRead, presentationTimeUs, flag);
+ mNumInputFrame++;
+ mOffset += bytesRead;
+ }
+
+ /**
+ * Stops the codec and releases codec resources.
+ */
+ public void deInitEncoder() {
+ long sTime = mStats.getCurTime();
+ if (mCodec != null) {
+ mCodec.stop();
+ mCodec.release();
+ mCodec = null;
+ }
+ long eTime = mStats.getCurTime();
+ mStats.setDeInitTime(mStats.getTimeDiff(sTime, eTime));
+ }
+
+ /**
+ * Prints out the statistics in the information log
+ *
+ * @param inputReference The operation being performed, in this case encode
+ * @param durationUs Duration of the clip in microseconds
+ */
+ public void dumpStatistics(String inputReference, long durationUs) {
+ String operation = "encode";
+ mStats.dumpStatistics(operation, inputReference, durationUs);
+ }
+
+ /**
+ * Resets the stats
+ */
+ public void resetEncoder() {
+ mOffset = 0;
+ mInputBufferSize = 0;
+ mNumInputFrame = 0;
+ mSawInputEOS = false;
+ mSawOutputEOS = false;
+ mSignalledError = false;
+ mStats.reset();
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Extractor.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Extractor.java
new file mode 100644
index 0000000..459e2a9
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Extractor.java
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.library;
+
+import android.media.MediaCodec;
+import android.media.MediaExtractor;
+import android.media.MediaFormat;
+import android.util.Log;
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+public class Extractor {
+ private static final String TAG = "Extractor";
+ private static final int kMaxBufSize = 1024 * 1024 * 16;
+ private MediaExtractor mExtractor;
+ private ByteBuffer mFrameBuffer;
+ private MediaCodec.BufferInfo mBufferInfo;
+ private Stats mStats;
+ private long mDurationUs;
+
+ public Extractor() {
+ mFrameBuffer = ByteBuffer.allocate(kMaxBufSize);
+ mBufferInfo = new MediaCodec.BufferInfo();
+ mStats = new Stats();
+ }
+
+ /**
+ * Creates a Media Extractor and sets data source(FileDescriptor)to use
+ *
+ * @param fileDescriptor FileDescriptor for the file which is to be extracted
+ * @return TrackCount of the sample
+ * @throws IOException If FileDescriptor is null
+ */
+ public int setUpExtractor(FileDescriptor fileDescriptor) throws IOException {
+ long sTime = mStats.getCurTime();
+ mExtractor = new MediaExtractor();
+ mExtractor.setDataSource(fileDescriptor);
+ long eTime = mStats.getCurTime();
+ long timeTaken = mStats.getTimeDiff(sTime, eTime);
+ mStats.setInitTime(timeTaken);
+ return mExtractor.getTrackCount();
+ }
+
+ /**
+ * Returns the track format of the specified index
+ *
+ * @param trackID Index of the track
+ * @return Format of the track
+ */
+ public MediaFormat getFormat(int trackID) { return mExtractor.getTrackFormat(trackID); }
+
+ /**
+ * Returns the extracted buffer for the input clip
+ */
+ public ByteBuffer getFrameBuffer() { return this.mFrameBuffer; }
+
+ /**
+ * Returns the information of buffer related to sample
+ */
+ public MediaCodec.BufferInfo getBufferInfo() { return this.mBufferInfo; }
+
+ /**
+ * Returns the duration of the sample
+ */
+ public long getClipDuration() { return this.mDurationUs; }
+
+ /**
+ * Retrieve the current sample and store it in the byte buffer
+ * Also, sets the information related to extracted sample and store it in buffer info
+ *
+ * @return Sample size of the extracted sample
+ */
+ public int getFrameSample() {
+ int sampleSize = mExtractor.readSampleData(mFrameBuffer, 0);
+ if (sampleSize < 0) {
+ mBufferInfo.flags = MediaCodec.BUFFER_FLAG_END_OF_STREAM;
+ mBufferInfo.size = 0;
+ } else {
+ mBufferInfo.size = sampleSize;
+ mBufferInfo.offset = 0;
+ mBufferInfo.flags = mExtractor.getSampleFlags();
+ mBufferInfo.presentationTimeUs = mExtractor.getSampleTime();
+ mExtractor.advance();
+ }
+ return sampleSize;
+ }
+
+ /**
+ * Setup the track format and get the duration of the sample
+ * Track is selected here for extraction
+ *
+ * @param trackId Track index to be selected
+ * @return 0 for valid track, otherwise -1
+ */
+ public int selectExtractorTrack(int trackId) {
+ MediaFormat trackFormat = mExtractor.getTrackFormat(trackId);
+ mDurationUs = trackFormat.getLong(MediaFormat.KEY_DURATION);
+ if (mDurationUs < 0) {
+ Log.e(TAG, "Invalid Clip");
+ return -1;
+ }
+ mExtractor.selectTrack(trackId);
+ return 0;
+ }
+
+ /**
+ * Unselect the track
+ *
+ * @param trackId Track Index to be unselected
+ */
+ public void unselectExtractorTrack(int trackId) { mExtractor.unselectTrack(trackId); }
+
+ /**
+ * Free up the resources
+ */
+ public void deinitExtractor() {
+ long sTime = mStats.getCurTime();
+ mExtractor.release();
+ long eTime = mStats.getCurTime();
+ long timeTaken = mStats.getTimeDiff(sTime, eTime);
+ mStats.setDeInitTime(timeTaken);
+ }
+
+ /**
+ * Performs extract operation
+ *
+ * @param currentTrack Track index to be extracted
+ * @return Status as 0 if extraction is successful, -1 otherwise
+ */
+ public int extractSample(int currentTrack) {
+ int status;
+ status = selectExtractorTrack(currentTrack);
+ if (status == -1) {
+ Log.e(TAG, "Failed to select track");
+ return -1;
+ }
+ mStats.setStartTime();
+ while (true) {
+ int readSampleSize = getFrameSample();
+ if (readSampleSize <= 0) {
+ break;
+ }
+ mStats.addOutputTime();
+ mStats.addFrameSize(readSampleSize);
+ }
+ unselectExtractorTrack(currentTrack);
+ return 0;
+ }
+
+ /**
+ * Write the benchmark logs for the given input file
+ *
+ * @param inputReference Name of the input file
+ */
+ public void dumpStatistics(String inputReference) {
+ String operation = "extract";
+ mStats.dumpStatistics(operation, inputReference, mDurationUs);
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Muxer.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Muxer.java
new file mode 100644
index 0000000..49eaa1c
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Muxer.java
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.media.benchmark.library;
+
+import android.content.Context;
+import android.media.MediaCodec;
+import android.media.MediaFormat;
+import android.media.MediaMuxer;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+
+public class Muxer {
+ private Stats mStats;
+ private MediaMuxer mMuxer;
+
+ /**
+ * Creates a Media Muxer for the specified path
+ *
+ * @param context App context to specify the output file path
+ * @param outputFormat Format of the output media file
+ * @param trackFormat Format of the current track
+ * @return Returns the track index of the newly added track, -1 otherwise
+ */
+ public int setUpMuxer(Context context, int outputFormat, MediaFormat trackFormat) {
+ try {
+ mStats = new Stats();
+ long sTime = mStats.getCurTime();
+ mMuxer = new MediaMuxer(context.getFilesDir().getPath() + "/mux.out.", outputFormat);
+ int trackIndex = mMuxer.addTrack(trackFormat);
+ mMuxer.start();
+ long eTime = mStats.getCurTime();
+ long timeTaken = mStats.getTimeDiff(sTime, eTime);
+ mStats.setInitTime(timeTaken);
+ return trackIndex;
+ } catch (IllegalArgumentException | IOException e) {
+ e.printStackTrace();
+ return -1;
+ }
+ }
+
+ /**
+ * Performs the Mux operation
+ *
+ * @param trackIndex Track index of the sample
+ * @param inputExtractedBuffer Buffer containing encoded samples
+ * @param inputBufferInfo Buffer information related to these samples
+ * @return Returns Status as 0 if write operation is successful, -1 otherwise
+ */
+ public int mux(int trackIndex, ArrayList<ByteBuffer> inputExtractedBuffer,
+ ArrayList<MediaCodec.BufferInfo> inputBufferInfo) {
+ mStats.setStartTime();
+ for (int sampleCount = 0; sampleCount < inputExtractedBuffer.size(); sampleCount++) {
+ try {
+ mMuxer.writeSampleData(trackIndex, inputExtractedBuffer.get(sampleCount),
+ inputBufferInfo.get(sampleCount));
+ mStats.addOutputTime();
+ mStats.addFrameSize(inputBufferInfo.get(sampleCount).size);
+ } catch (IllegalArgumentException | IllegalStateException e) {
+ e.printStackTrace();
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ /**
+ * Stops the muxer and free up the resources
+ */
+ public void deInitMuxer() {
+ long sTime = mStats.getCurTime();
+ mMuxer.stop();
+ mMuxer.release();
+ long eTime = mStats.getCurTime();
+ long timeTaken = mStats.getTimeDiff(sTime, eTime);
+ mStats.setDeInitTime(timeTaken);
+ }
+
+ /**
+ * Resets the stats
+ */
+ public void resetMuxer() {
+ mStats.reset();
+ }
+
+ /**
+ * Write the benchmark logs for the given input file
+ *
+ * @param inputReference Name of the input file
+ * @param clipDuration Duration of the given inputReference file
+ */
+ public void dumpStatistics(String inputReference, long clipDuration) {
+ String operation = "mux";
+ mStats.dumpStatistics(operation, inputReference, clipDuration);
+ }
+}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java
new file mode 100644
index 0000000..18ab5be
--- /dev/null
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Stats.java
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.media.benchmark.library;
+
+import android.util.Log;
+
+import java.util.ArrayList;
+
+/**
+ * Measures Performance.
+ */
+public class Stats {
+ private static final String TAG = "Stats";
+ private long mInitTimeNs;
+ private long mDeInitTimeNs;
+ private long mStartTimeNs;
+ private ArrayList<Integer> mFrameSizes;
+ private ArrayList<Long> mInputTimer;
+ private ArrayList<Long> mOutputTimer;
+
+ public Stats() {
+ mFrameSizes = new ArrayList<>();
+ mInputTimer = new ArrayList<>();
+ mOutputTimer = new ArrayList<>();
+ mInitTimeNs = 0;
+ mDeInitTimeNs = 0;
+ }
+
+ public long getCurTime() { return System.nanoTime(); }
+
+ public void setInitTime(long initTime) { mInitTimeNs = initTime; }
+
+ public void setDeInitTime(long deInitTime) { mDeInitTimeNs = deInitTime; }
+
+ public void setStartTime() { mStartTimeNs = System.nanoTime(); }
+
+ public void addFrameSize(int size) { mFrameSizes.add(size); }
+
+ public void addInputTime() { mInputTimer.add(System.nanoTime()); }
+
+ public void addOutputTime() { mOutputTimer.add(System.nanoTime()); }
+
+ public void reset() {
+ if (mFrameSizes.size() != 0) {
+ mFrameSizes.clear();
+ }
+
+ if (mInputTimer.size() != 0) {
+ mInputTimer.clear();
+ }
+
+ if (mOutputTimer.size() != 0) {
+ mOutputTimer.clear();
+ }
+ }
+
+ public long getInitTime() { return mInitTimeNs; }
+
+ public long getDeInitTime() { return mDeInitTimeNs; }
+
+ public long getTimeDiff(long sTime, long eTime) { return (eTime - sTime); }
+
+ private long getTotalTime() {
+ if (mOutputTimer.size() == 0) {
+ return -1;
+ }
+ long lastTime = mOutputTimer.get(mOutputTimer.size() - 1);
+ return lastTime - mStartTimeNs;
+ }
+
+ private long getTotalSize() {
+ long totalSize = 0;
+ for (long size : mFrameSizes) {
+ totalSize += size;
+ }
+ return totalSize;
+ }
+
+ /**
+ * Dumps the stats of the operation for a given input media.
+ * <p>
+ * \param operation describes the operation performed on the input media
+ * (i.e. extract/mux/decode/encode)
+ * \param inputReference input media
+ * \param durationUs is a duration of the input media in microseconds.
+ */
+ public void dumpStatistics(String operation, String inputReference, long durationUs) {
+ if (mOutputTimer.size() == 0) {
+ Log.e(TAG, "No output produced");
+ return;
+ }
+ long totalTimeTakenNs = getTotalTime();
+ long timeTakenPerSec = (totalTimeTakenNs * 1000000) / durationUs;
+ long timeToFirstFrameNs = mOutputTimer.get(0) - mStartTimeNs;
+ long size = getTotalSize();
+ // get min and max output intervals.
+ long intervalNs;
+ long minTimeTakenNs = Long.MAX_VALUE;
+ long maxTimeTakenNs = 0;
+ long prevIntervalNs = mStartTimeNs;
+ for (int idx = 0; idx < mOutputTimer.size() - 1; idx++) {
+ intervalNs = mOutputTimer.get(idx) - prevIntervalNs;
+ prevIntervalNs = mOutputTimer.get(idx);
+ if (minTimeTakenNs > intervalNs) {
+ minTimeTakenNs = intervalNs;
+ } else if (maxTimeTakenNs < intervalNs) {
+ maxTimeTakenNs = intervalNs;
+ }
+ }
+ // Print the Stats
+ Log.i(TAG, "Input Reference : " + inputReference);
+ Log.i(TAG, "Setup Time in nano sec : " + mInitTimeNs);
+ Log.i(TAG, "Average Time in nano sec : " + totalTimeTakenNs / mOutputTimer.size());
+ Log.i(TAG, "Time to first frame in nano sec : " + timeToFirstFrameNs);
+ Log.i(TAG, "Time taken (in nano sec) to " + operation + " 1 sec of content : " +
+ timeTakenPerSec);
+ Log.i(TAG, "Total bytes " + operation + "ed : " + size);
+ Log.i(TAG, "Number of bytes " + operation + "ed per second : " +
+ (size * 1000000000) / totalTimeTakenNs);
+ Log.i(TAG, "Minimum Time in nano sec : " + minTimeTakenNs);
+ Log.i(TAG, "Maximum Time in nano sec : " + maxTimeTakenNs);
+ Log.i(TAG, "Destroy Time in nano sec : " + mDeInitTimeNs);
+ }
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/README.md b/media/tests/benchmark/README.md
new file mode 100644
index 0000000..487ddb8
--- /dev/null
+++ b/media/tests/benchmark/README.md
@@ -0,0 +1,122 @@
+# Benchmark tests
+
+Benchmark app analyses the time taken by MediaCodec, MediaExtractor and MediaMuxer for given set of inputs. It is used to benchmark these modules on android devices.
+Benchmark results are emitted to logcat.
+
+This page describes steps to run the NDK and SDK layer test.
+
+Run the following steps to build the test suite:
+```
+mmm frameworks/av/media/tests/benchmark/
+```
+
+# NDK
+
+To run the test suite for measuring performance of the native layer, follow the following steps:
+
+The binaries will be created in the following path : ${OUT}/data/nativetest64/
+
+adb push $(OUT)/data/nativetest64/* /data/local/tmp/
+
+Eg. adb push $(OUT)/data/nativetest64/extractorTest/extractorTest /data/local/tmp/
+
+To run the binary, follow the commands mentioned below under each module.
+
+The resource file for the tests is taken from [here](https://drive.google.com/open?id=1ghMr17BBJ7n0pqbm7oREiTN_MNemJUqy)
+
+Download the MediaBenchmark.zip file, unzip and push it to /data/local/tmp/ on the device.
+
+```
+unzip MediaBenchmark.zip
+adb push MediaBenchmark /data/local/tmp
+```
+
+## Extractor
+
+The test extracts elementary stream and benchmarks the extractors available in NDK.
+
+The resource files are assumed to be at /data/local/tmp/MediaBenchmark/res/. You can use a different location, but you have to modify the rest of the instructions to replace /data/local/tmp/MediaBenchmark/res/ with wherever you chose to put the files.
+
+The path to these files on the device is required to be given for the test.
+
+```
+adb shell /data/local/tmp/extractorTest -P /data/local/tmp/MediaBenchmark/res/
+```
+
+## Decoder
+
+The test decodes input stream and benchmarks the decoders available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/decoderTest -P /data/local/tmp/MediaBenchmark/res/
+```
+
+## Muxer
+
+The test muxes elementary stream and benchmarks the muxers available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/muxerTest -P /data/local/tmp/MediaBenchmark/res/
+```
+
+## Encoder
+
+The test encodes input stream and benchmarks the encoders available in NDK.
+
+Setup steps are same as extractor.
+
+```
+adb shell /data/local/tmp/encoderTest -P /data/local/tmp/MediaBenchmark/res/
+```
+
+# SDK
+
+To run the test suite for measuring performance of the SDK APIs, follow the following steps:
+
+The apk will be created at the following path:
+${OUT}/testcases/MediaBenchmarkApp/arm64/
+
+To get the resorce files for the test follow instructions given in [NDK](#NDK)
+
+For installing the apk, run the command:
+```
+adb install -f -r ${OUT}/testcases/MediaBenchmarkApp/arm64/MediaBenchmarkApp.apk
+```
+
+For running all the tests, run the command:
+```
+adb shell am instrument -w -r -e package com.android.media.benchmark.tests com.android.media.benchmark/androidx.test.runner.AndroidJUnitRunner
+```
+
+## Extractor
+
+The test extracts elementary stream and benchmarks the extractors available in SDK.
+```
+adb shell am instrument -w -r -e class 'com.android.media.benchmark.tests.ExtractorTest' com.android.media.benchmark/androidx.test.runner.AndroidJUnitRunner
+```
+
+## Decoder
+
+The test decodes input stream and benchmarks the decoders available in SDK.
+```
+adb shell am instrument -w -r -e class 'com.android.media.benchmark.tests.DecoderTest' com.android.media.benchmark/androidx.test.runner.AndroidJUnitRunner
+```
+
+## Muxer
+
+The test muxes elementary stream and benchmarks different writers available in SDK.
+```
+adb shell am instrument -w -r -e class 'com.android.media.benchmark.tests.MuxerTest' com.android.media.benchmark/androidx.test.runner.AndroidJUnitRunner
+```
+
+## Encoder
+
+The test encodes input stream and benchmarks the encoders available in SDK.
+```
+adb shell am instrument -w -r -e class 'com.android.media.benchmark.tests.EncoderTest' com.android.media.benchmark/androidx.test.runner.AndroidJUnitRunner
+
+```
diff --git a/media/tests/benchmark/src/native/common/Android.bp b/media/tests/benchmark/src/native/common/Android.bp
new file mode 100644
index 0000000..1da0102
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Android.bp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+ name: "libmediabenchmark_common",
+ defaults: [
+ "libmediabenchmark-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: [
+ "BenchmarkCommon.cpp",
+ "Stats.cpp",
+ ],
+
+ export_include_dirs: ["."],
+
+ ldflags: ["-Wl,-Bsymbolic"]
+}
+
+cc_defaults {
+ name: "libmediabenchmark_common-defaults",
+
+ defaults: [
+ "libmediabenchmark-defaults",
+ ],
+
+ static_libs: [
+ "libmediabenchmark_common",
+ ],
+}
+
+cc_defaults {
+ name: "libmediabenchmark-defaults",
+
+ shared_libs: [
+ "libmediandk",
+ "liblog",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ]
+}
+
+// public dependency for native implementation
+// to be used by code under media/benchmark/* only
+cc_defaults {
+ name: "libmediabenchmark_soft_sanitize_all-defaults",
+
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ cfi: true,
+ }
+}
diff --git a/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp b/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp
new file mode 100644
index 0000000..5bdb48a
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/BenchmarkCommon.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BenchmarkCommon"
+
+#include "BenchmarkCommon.h"
+#include <iostream>
+
+void CallBackHandle::ioThread() {
+ ALOGV("In %s mIsDone : %d, mSawError : %d ", __func__, mIsDone, mSawError);
+ while (!mIsDone && !mSawError) {
+ auto task = mIOQueue.pop();
+ task();
+ }
+}
+
+void OnInputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index) {
+ ALOGV("OnInputAvailableCB: index(%d)", index);
+ CallBackHandle *self = (CallBackHandle *)userdata;
+ self->getStats()->addInputTime();
+ self->mIOQueue.push([self, codec, index]() { self->onInputAvailable(codec, index); });
+}
+
+void OnOutputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index,
+ AMediaCodecBufferInfo *bufferInfo) {
+ ALOGV("OnOutputAvailableCB: index(%d), (%d, %d, %lld, 0x%x)", index, bufferInfo->offset,
+ bufferInfo->size, (long long)bufferInfo->presentationTimeUs, bufferInfo->flags);
+ CallBackHandle *self = (CallBackHandle *)userdata;
+ self->getStats()->addOutputTime();
+ AMediaCodecBufferInfo bufferInfoCopy = *bufferInfo;
+ self->mIOQueue.push([self, codec, index, bufferInfoCopy]() {
+ AMediaCodecBufferInfo bc = bufferInfoCopy;
+ self->onOutputAvailable(codec, index, &bc);
+ });
+}
+
+void OnFormatChangedCB(AMediaCodec *codec, void *userdata, AMediaFormat *format) {
+ ALOGV("OnFormatChangedCB: format(%s)", AMediaFormat_toString(format));
+ CallBackHandle *self = (CallBackHandle *)userdata;
+ self->mIOQueue.push([self, codec, format]() { self->onFormatChanged(codec, format); });
+}
+
+void OnErrorCB(AMediaCodec *codec, void *userdata, media_status_t err, int32_t actionCode,
+ const char *detail) {
+ (void)codec;
+ ALOGV("OnErrorCB: err(%d), actionCode(%d), detail(%s)", err, actionCode, detail);
+ CallBackHandle *self = (CallBackHandle *)userdata;
+ self->mSawError = true;
+}
+
+AMediaCodec *createMediaCodec(AMediaFormat *format, const char *mime, string codecName,
+ bool isEncoder) {
+ ALOGV("In %s", __func__);
+ if (!mime) {
+ ALOGE("Please specify a mime type to create codec");
+ return nullptr;
+ }
+
+ AMediaCodec *codec;
+ if (!codecName.empty()) {
+ codec = AMediaCodec_createCodecByName(codecName.c_str());
+ if (!codec) {
+ ALOGE("Unable to create codec by name: %s", codecName.c_str());
+ return nullptr;
+ }
+ } else {
+ if (isEncoder) {
+ codec = AMediaCodec_createEncoderByType(mime);
+ } else {
+ codec = AMediaCodec_createDecoderByType(mime);
+ }
+ if (!codec) {
+ ALOGE("Unable to create codec by mime: %s", mime);
+ return nullptr;
+ }
+ }
+
+ /* Configure codec with the given format*/
+ const char *s = AMediaFormat_toString(format);
+ ALOGV("Input format: %s\n", s);
+
+ media_status_t status = AMediaCodec_configure(codec, format, nullptr, nullptr, isEncoder);
+ if (status != AMEDIA_OK) {
+ ALOGE("AMediaCodec_configure failed %d", status);
+ return nullptr;
+ }
+ return codec;
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/src/native/common/BenchmarkCommon.h b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
new file mode 100644
index 0000000..df16baf
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/BenchmarkCommon.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BENCHMARK_COMMON_H__
+#define __BENCHMARK_COMMON_H__
+
+#include <utils/Log.h>
+
+#include <inttypes.h>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include <media/NdkMediaCodec.h>
+#include <media/NdkMediaError.h>
+
+#include "Stats.h"
+
+using namespace std;
+
+constexpr uint32_t kQueueDequeueTimeoutUs = 1000;
+constexpr uint32_t kMaxCSDStrlen = 16;
+constexpr uint32_t kMaxBufferSize = 1024 * 1024 * 16;
+
+template <typename T>
+class CallBackQueue {
+ public:
+ CallBackQueue() {}
+ ~CallBackQueue() {}
+
+ void push(T elem) {
+ bool needsNotify = false;
+ {
+ lock_guard<mutex> lock(mMutex);
+ needsNotify = mQueue.empty();
+ mQueue.push(move(elem));
+ }
+ if (needsNotify) mQueueNotEmptyCondition.notify_one();
+ }
+
+ T pop() {
+ unique_lock<mutex> lock(mMutex);
+ if (mQueue.empty()) {
+ mQueueNotEmptyCondition.wait(lock, [this]() { return !mQueue.empty(); });
+ }
+ auto result = mQueue.front();
+ mQueue.pop();
+ return result;
+ }
+
+ private:
+ mutex mMutex;
+ queue<T> mQueue;
+ condition_variable mQueueNotEmptyCondition;
+};
+
+class CallBackHandle {
+ public:
+ CallBackHandle() : mSawError(false), mIsDone(false), mStats(nullptr) {
+ mStats = new Stats();
+ }
+
+ virtual ~CallBackHandle() {
+ if (mIOThread.joinable()) mIOThread.join();
+ if (mStats) delete mStats;
+ }
+
+ void ioThread();
+
+ // Implementation in child class (Decoder/Encoder)
+ virtual void onInputAvailable(AMediaCodec *codec, int32_t index) {
+ (void)codec;
+ (void)index;
+ }
+ virtual void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) {
+ (void)codec;
+ (void)format;
+ }
+ virtual void onOutputAvailable(AMediaCodec *codec, int32_t index,
+ AMediaCodecBufferInfo *bufferInfo) {
+ (void)codec;
+ (void)index;
+ (void)bufferInfo;
+ }
+
+ Stats *getStats() { return mStats; }
+
+ // Keep a queue of all function callbacks.
+ typedef function<void()> IOTask;
+ CallBackQueue<IOTask> mIOQueue;
+ thread mIOThread;
+ bool mSawError;
+ bool mIsDone;
+
+ protected:
+ Stats *mStats;
+};
+
+// Async API's callback
+void OnInputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index);
+
+void OnOutputAvailableCB(AMediaCodec *codec, void *userdata, int32_t index,
+ AMediaCodecBufferInfo *bufferInfo);
+
+void OnFormatChangedCB(AMediaCodec *codec, void *userdata, AMediaFormat *format);
+
+void OnErrorCB(AMediaCodec *codec, void * /* userdata */, media_status_t err, int32_t actionCode,
+ const char *detail);
+
+// Utility to create and configure AMediaCodec
+AMediaCodec *createMediaCodec(AMediaFormat *format, const char *mime, string codecName,
+ bool isEncoder);
+
+#endif // __BENCHMARK_COMMON_H__
diff --git a/media/tests/benchmark/src/native/common/Stats.cpp b/media/tests/benchmark/src/native/common/Stats.cpp
new file mode 100644
index 0000000..6bcd3ce
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Stats.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "Stats"
+
+#include <iostream>
+#include <stdint.h>
+#include <utils/Log.h>
+
+#include "Stats.h"
+
+/**
+ * Dumps the stats of the operation for a given input media.
+ *
+ * \param operation describes the operation performed on the input media
+ * (i.e. extract/mux/decode/encode)
+ * \param inputReference input media
+ * \param duarationUs is a duration of the input media in microseconds.
+ */
+void Stats::dumpStatistics(std::string operation, std::string inputReference, int64_t duarationUs) {
+ ALOGV("In %s", __func__);
+ if (!mOutputTimer.size()) {
+ ALOGE("No output produced");
+ return;
+ }
+ nsecs_t totalTimeTakenNs = getTotalTime();
+ nsecs_t timeTakenPerSec = (totalTimeTakenNs * 1000000) / duarationUs;
+ nsecs_t timeToFirstFrameNs = *mOutputTimer.begin() - mStartTimeNs;
+ int32_t size = std::accumulate(mFrameSizes.begin(), mFrameSizes.end(), 0);
+ // get min and max output intervals.
+ nsecs_t intervalNs;
+ nsecs_t minTimeTakenNs = INT64_MAX;
+ nsecs_t maxTimeTakenNs = 0;
+ nsecs_t prevIntervalNs = mStartTimeNs;
+ for (int32_t idx = 0; idx < mOutputTimer.size() - 1; idx++) {
+ intervalNs = mOutputTimer.at(idx) - prevIntervalNs;
+ prevIntervalNs = mOutputTimer.at(idx);
+ if (minTimeTakenNs > intervalNs) minTimeTakenNs = intervalNs;
+ else if (maxTimeTakenNs < intervalNs) maxTimeTakenNs = intervalNs;
+ }
+
+ // Print the Stats
+ std::cout << "Input Reference : " << inputReference << endl;
+ std::cout << "Setup Time in nano sec : " << mInitTimeNs << endl;
+ std::cout << "Average Time in nano sec : " << totalTimeTakenNs / mOutputTimer.size() << endl;
+ std::cout << "Time to first frame in nano sec : " << timeToFirstFrameNs << endl;
+ std::cout << "Time taken (in nano sec) to " << operation
+ << " 1 sec of content : " << timeTakenPerSec << endl;
+ std::cout << "Total bytes " << operation << "ed : " << size << endl;
+ std::cout << "Minimum Time in nano sec : " << minTimeTakenNs << endl;
+ std::cout << "Maximum Time in nano sec : " << maxTimeTakenNs << endl;
+ std::cout << "Destroy Time in nano sec : " << mDeInitTimeNs << endl;
+}
diff --git a/media/tests/benchmark/src/native/common/Stats.h b/media/tests/benchmark/src/native/common/Stats.h
new file mode 100644
index 0000000..024319a
--- /dev/null
+++ b/media/tests/benchmark/src/native/common/Stats.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __STATS_H__
+#define __STATS_H__
+
+#include <sys/time.h>
+#include <algorithm>
+#include <numeric>
+#include <vector>
+#include <utils/Timers.h>
+
+using namespace std;
+
+class Stats {
+ public:
+ Stats() {
+ mInitTimeNs = 0;
+ mDeInitTimeNs = 0;
+ }
+
+ ~Stats() {
+ reset();
+ }
+
+ private:
+ nsecs_t mInitTimeNs;
+ nsecs_t mDeInitTimeNs;
+ nsecs_t mStartTimeNs;
+ std::vector<int32_t> mFrameSizes;
+ std::vector<nsecs_t> mInputTimer;
+ std::vector<nsecs_t> mOutputTimer;
+
+ public:
+ nsecs_t getCurTime() { return systemTime(CLOCK_MONOTONIC); }
+
+ void setInitTime(nsecs_t initTime) { mInitTimeNs = initTime; }
+
+ void setDeInitTime(nsecs_t deInitTime) { mDeInitTimeNs = deInitTime; }
+
+ void setStartTime() { mStartTimeNs = systemTime(CLOCK_MONOTONIC); }
+
+ void addFrameSize(int32_t size) { mFrameSizes.push_back(size); }
+
+ void addInputTime() { mInputTimer.push_back(systemTime(CLOCK_MONOTONIC)); }
+
+ void addOutputTime() { mOutputTimer.push_back(systemTime(CLOCK_MONOTONIC)); }
+
+ void reset() {
+ if (!mFrameSizes.empty()) mFrameSizes.clear();
+ if (!mInputTimer.empty()) mInputTimer.clear();
+ if (!mOutputTimer.empty()) mOutputTimer.clear();
+ }
+
+ std::vector<nsecs_t> getOutputTimer() { return mOutputTimer; }
+
+ nsecs_t getInitTime() { return mInitTimeNs; }
+
+ nsecs_t getDeInitTime() { return mDeInitTimeNs; }
+
+ nsecs_t getTimeDiff(nsecs_t sTime, nsecs_t eTime) { return (eTime - sTime); }
+
+ nsecs_t getTotalTime() {
+ if (mOutputTimer.empty()) return -1;
+ return (*(mOutputTimer.end() - 1) - mStartTimeNs);
+ }
+
+ void dumpStatistics(std::string operation, std::string inputReference, int64_t duarationUs);
+};
+
+#endif // __STATS_H__
diff --git a/media/tests/benchmark/src/native/decoder/Android.bp b/media/tests/benchmark/src/native/decoder/Android.bp
new file mode 100644
index 0000000..b6286d4
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Android.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+ name: "libmediabenchmark_decoder",
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["Decoder.cpp"],
+
+ static_libs: ["libmediabenchmark_extractor"],
+
+ export_include_dirs: ["."],
+
+ ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/decoder/Decoder.cpp b/media/tests/benchmark/src/native/decoder/Decoder.cpp
new file mode 100644
index 0000000..ef84537
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Decoder.cpp
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "decoder"
+
+#include <iostream>
+
+#include "Decoder.h"
+
+tuple<ssize_t, uint32_t, int64_t> readSampleData(uint8_t *inputBuffer, int32_t &offset,
+ vector<AMediaCodecBufferInfo> &frameInfo,
+ uint8_t *buf, int32_t frameID, size_t bufSize) {
+ ALOGV("In %s", __func__);
+ if (frameID == (int32_t)frameInfo.size()) {
+ return make_tuple(0, AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM, 0);
+ }
+ uint32_t flags = frameInfo[frameID].flags;
+ int64_t timestamp = frameInfo[frameID].presentationTimeUs;
+ ssize_t bytesCount = frameInfo[frameID].size;
+ if (bufSize < bytesCount) {
+ ALOGE("Error : insufficient resource");
+ return make_tuple(0, AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE, 0);
+ }
+
+ memcpy(buf, inputBuffer + offset, bytesCount);
+ offset += bytesCount;
+ return make_tuple(bytesCount, flags, timestamp);
+}
+
+void Decoder::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ if (mSawInputEOS || bufIdx < 0) return;
+ if (mSignalledError) {
+ CallBackHandle::mSawError = true;
+ mDecoderDoneCondition.notify_one();
+ return;
+ }
+
+ size_t bufSize;
+ uint8_t *buf = AMediaCodec_getInputBuffer(mCodec, bufIdx, &bufSize);
+ if (!buf) {
+ mSignalledError = true;
+ mDecoderDoneCondition.notify_one();
+ return;
+ }
+
+ ssize_t bytesRead = 0;
+ uint32_t flag = 0;
+ int64_t presentationTimeUs = 0;
+ tie(bytesRead, flag, presentationTimeUs) = readSampleData(
+ mInputBuffer, mOffset, mFrameMetaData, buf, mNumInputFrame, bufSize);
+ if (flag == AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE) {
+ mSignalledError = true;
+ mDecoderDoneCondition.notify_one();
+ return;
+ }
+
+ if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
+ ALOGV("%s bytesRead : %zd presentationTimeUs : %" PRId64 " mSawInputEOS : %s", __FUNCTION__,
+ bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
+
+ int status = AMediaCodec_queueInputBuffer(mCodec, bufIdx, 0 /* offset */, bytesRead,
+ presentationTimeUs, flag);
+ if (AMEDIA_OK != status) {
+ mSignalledError = true;
+ mDecoderDoneCondition.notify_one();
+ return;
+ }
+ mStats->addFrameSize(bytesRead);
+ mNumInputFrame++;
+ }
+}
+
+void Decoder::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
+ AMediaCodecBufferInfo *bufferInfo) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ if (mSawOutputEOS || bufIdx < 0) return;
+ if (mSignalledError) {
+ CallBackHandle::mSawError = true;
+ mDecoderDoneCondition.notify_one();
+ return;
+ }
+
+ if (mOutFp != nullptr) {
+ size_t bufSize;
+ uint8_t *buf = AMediaCodec_getOutputBuffer(mCodec, bufIdx, &bufSize);
+ if (buf) {
+ fwrite(buf, sizeof(char), bufferInfo->size, mOutFp);
+ ALOGV("bytes written into file %d\n", bufferInfo->size);
+ }
+ }
+
+ AMediaCodec_releaseOutputBuffer(mCodec, bufIdx, false);
+ mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
+ mNumOutputFrame++;
+ ALOGV("%s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
+ mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputFrame);
+
+ if (mSawOutputEOS) {
+ CallBackHandle::mIsDone = true;
+ mDecoderDoneCondition.notify_one();
+ }
+ }
+}
+
+void Decoder::onFormatChanged(AMediaCodec *mediaCodec, AMediaFormat *format) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ ALOGV("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
+ mFormat = format;
+ }
+}
+
+void Decoder::setupDecoder() {
+ if (!mFormat) mFormat = mExtractor->getFormat();
+}
+
+int32_t Decoder::decode(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfo,
+ string &codecName, bool asyncMode, FILE *outFp) {
+ ALOGV("In %s", __func__);
+ mInputBuffer = inputBuffer;
+ mFrameMetaData = frameInfo;
+ mOffset = 0;
+ mOutFp = outFp;
+
+ const char *mime = nullptr;
+ AMediaFormat_getString(mFormat, AMEDIAFORMAT_KEY_MIME, &mime);
+ if (!mime) return AMEDIA_ERROR_INVALID_OBJECT;
+
+ int64_t sTime = mStats->getCurTime();
+ mCodec = createMediaCodec(mFormat, mime, codecName, false /*isEncoder*/);
+ if (!mCodec) return AMEDIA_ERROR_INVALID_OBJECT;
+
+ if (asyncMode) {
+ AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
+ OnFormatChangedCB, OnErrorCB};
+ AMediaCodec_setAsyncNotifyCallback(mCodec, aCB, this);
+
+ mIOThread = thread(&CallBackHandle::ioThread, this);
+ }
+
+ AMediaCodec_start(mCodec);
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setInitTime(timeTaken);
+
+ mStats->setStartTime();
+ if (!asyncMode) {
+ while (!mSawOutputEOS && !mSignalledError) {
+ /* Queue input data */
+ if (!mSawInputEOS) {
+ ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mCodec, kQueueDequeueTimeoutUs);
+ if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
+ ALOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n", inIdx);
+ return AMEDIA_ERROR_IO;
+ } else if (inIdx >= 0) {
+ mStats->addInputTime();
+ onInputAvailable(mCodec, inIdx);
+ }
+ }
+
+ /* Dequeue output data */
+ AMediaCodecBufferInfo info;
+ ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mCodec, &info, kQueueDequeueTimeoutUs);
+ if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
+ mFormat = AMediaCodec_getOutputFormat(mCodec);
+ const char *s = AMediaFormat_toString(mFormat);
+ ALOGI("Output format: %s\n", s);
+ } else if (outIdx >= 0) {
+ mStats->addOutputTime();
+ onOutputAvailable(mCodec, outIdx, &info);
+ } else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
+ outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
+ ALOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n", outIdx);
+ return AMEDIA_ERROR_IO;
+ }
+ }
+ } else {
+ unique_lock<mutex> lock(mMutex);
+ mDecoderDoneCondition.wait(lock, [this]() { return (mSawOutputEOS || mSignalledError); });
+ }
+
+ if (codecName.empty()) {
+ char *decName;
+ AMediaCodec_getName(mCodec, &decName);
+ codecName.assign(decName);
+ AMediaCodec_releaseName(mCodec, decName);
+ }
+ return AMEDIA_OK;
+}
+
+void Decoder::deInitCodec() {
+ int64_t sTime = mStats->getCurTime();
+ if (mFormat) {
+ AMediaFormat_delete(mFormat);
+ mFormat = nullptr;
+ }
+ if (!mCodec) return;
+ AMediaCodec_stop(mCodec);
+ AMediaCodec_delete(mCodec);
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setDeInitTime(timeTaken);
+}
+
+void Decoder::dumpStatistics(string inputReference) {
+ int64_t durationUs = mExtractor->getClipDuration();
+ string operation = "decode";
+ mStats->dumpStatistics(operation, inputReference, durationUs);
+}
+
+void Decoder::resetDecoder() {
+ if (mStats) mStats->reset();
+ if (mInputBuffer) mInputBuffer = nullptr;
+ if (!mFrameMetaData.empty()) mFrameMetaData.clear();
+}
diff --git a/media/tests/benchmark/src/native/decoder/Decoder.h b/media/tests/benchmark/src/native/decoder/Decoder.h
new file mode 100644
index 0000000..7630e7b
--- /dev/null
+++ b/media/tests/benchmark/src/native/decoder/Decoder.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DECODER_H__
+#define __DECODER_H__
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "BenchmarkCommon.h"
+#include "Extractor.h"
+#include "Stats.h"
+
+class Decoder : public CallBackHandle {
+ public:
+ Decoder()
+ : mCodec(nullptr),
+ mFormat(nullptr),
+ mExtractor(nullptr),
+ mNumInputFrame(0),
+ mNumOutputFrame(0),
+ mSawInputEOS(false),
+ mSawOutputEOS(false),
+ mSignalledError(false),
+ mInputBuffer(nullptr),
+ mOutFp(nullptr) {
+ mExtractor = new Extractor();
+ }
+
+ virtual ~Decoder() {
+ if (mExtractor) delete mExtractor;
+ }
+
+ Extractor *getExtractor() { return mExtractor; }
+
+ // Decoder related utilities
+ void setupDecoder();
+
+ void deInitCodec();
+
+ void resetDecoder();
+
+ // Async callback APIs
+ void onInputAvailable(AMediaCodec *codec, int32_t index) override;
+
+ void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) override;
+
+ void onOutputAvailable(AMediaCodec *codec, int32_t index,
+ AMediaCodecBufferInfo *bufferInfo) override;
+
+ // Process the frames and give decoded output
+ int32_t decode(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfo,
+ string &codecName, bool asyncMode, FILE *outFp = nullptr);
+
+ void dumpStatistics(string inputReference);
+
+ private:
+ AMediaCodec *mCodec;
+ AMediaFormat *mFormat;
+
+ Extractor *mExtractor;
+
+ int32_t mNumInputFrame;
+ int32_t mNumOutputFrame;
+
+ bool mSawInputEOS;
+ bool mSawOutputEOS;
+ bool mSignalledError;
+
+ int32_t mOffset;
+ uint8_t *mInputBuffer;
+ vector<AMediaCodecBufferInfo> mFrameMetaData;
+ FILE *mOutFp;
+
+ /* Asynchronous locks */
+ mutex mMutex;
+ condition_variable mDecoderDoneCondition;
+};
+
+// Read input samples
+tuple<ssize_t, uint32_t, int64_t> readSampleData(uint8_t *inputBuffer, int32_t &offset,
+ vector<AMediaCodecBufferInfo> &frameSizes,
+ uint8_t *buf, int32_t frameID, size_t bufSize);
+
+#endif // __DECODER_H__
diff --git a/media/tests/benchmark/src/native/encoder/Android.bp b/media/tests/benchmark/src/native/encoder/Android.bp
new file mode 100644
index 0000000..239f378
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Android.bp
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+ name: "libmediabenchmark_encoder",
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["Encoder.cpp"],
+
+ static_libs: ["libmediabenchmark_extractor",
+ "libmediabenchmark_decoder",
+ ],
+
+ export_include_dirs: ["."],
+
+ ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/encoder/Encoder.cpp b/media/tests/benchmark/src/native/encoder/Encoder.cpp
new file mode 100644
index 0000000..5fdf9e3
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Encoder.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "encoder"
+
+#include <fstream>
+
+#include "Encoder.h"
+
+void Encoder::onInputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ if (mSawInputEOS || bufIdx < 0) return;
+ if (mSignalledError) {
+ CallBackHandle::mSawError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+
+ size_t bufSize = 0;
+ char *buf = (char *)AMediaCodec_getInputBuffer(mCodec, bufIdx, &bufSize);
+ if (!buf) {
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+
+ if (mInputBufferSize < mOffset) {
+ ALOGE("Out of bound access of input buffer\n");
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+ size_t bytesRead = mParams.frameSize;
+ if (mInputBufferSize - mOffset < mParams.frameSize) {
+ bytesRead = mInputBufferSize - mOffset;
+ }
+ if (bufSize < bytesRead) {
+ ALOGE("bytes to read %zu bufSize %zu \n", bytesRead, bufSize);
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+ if (bytesRead < mParams.frameSize && mNumInputFrame < mParams.numFrames - 1) {
+ ALOGE("Partial frame at frameID %d bytesRead %zu frameSize %d total numFrames %d\n",
+ mNumInputFrame, bytesRead, mParams.frameSize, mParams.numFrames);
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+ mEleStream->read(buf, bytesRead);
+ size_t bytesgcount = mEleStream->gcount();
+ if (bytesgcount != bytesRead) {
+ ALOGE("bytes to read %zu actual bytes read %zu \n", bytesRead, bytesgcount);
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+
+ uint32_t flag = 0;
+ if (mNumInputFrame == mParams.numFrames - 1 || bytesRead == 0) {
+ ALOGD("Sending EOS on input Last frame\n");
+ flag |= AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
+ }
+
+ uint64_t presentationTimeUs;
+ if (!strncmp(mMime, "video/", 6)) {
+ presentationTimeUs = mNumInputFrame * (1000000 / mParams.frameRate);
+ } else {
+ presentationTimeUs =
+ (uint64_t)mNumInputFrame * mParams.frameSize * 1000000 / mParams.sampleRate;
+ }
+
+ if (flag == AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM) mSawInputEOS = true;
+ ALOGV("%s bytesRead : %zd presentationTimeUs : %" PRIu64 " mSawInputEOS : %s", __FUNCTION__,
+ bytesRead, presentationTimeUs, mSawInputEOS ? "TRUE" : "FALSE");
+
+ int status = AMediaCodec_queueInputBuffer(mCodec, bufIdx, 0 /* offset */, bytesRead,
+ presentationTimeUs, flag);
+ if (AMEDIA_OK != status) {
+ mSignalledError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+ mNumInputFrame++;
+ mOffset += bytesRead;
+ }
+}
+
+void Encoder::onOutputAvailable(AMediaCodec *mediaCodec, int32_t bufIdx,
+ AMediaCodecBufferInfo *bufferInfo) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ if (mSawOutputEOS || bufIdx < 0) return;
+ if (mSignalledError) {
+ CallBackHandle::mSawError = true;
+ mEncoderDoneCondition.notify_one();
+ return;
+ }
+
+ mStats->addFrameSize(bufferInfo->size);
+ AMediaCodec_releaseOutputBuffer(mCodec, bufIdx, false);
+ mSawOutputEOS = (0 != (bufferInfo->flags & AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM));
+ mNumOutputFrame++;
+ ALOGV("%s index : %d mSawOutputEOS : %s count : %u", __FUNCTION__, bufIdx,
+ mSawOutputEOS ? "TRUE" : "FALSE", mNumOutputFrame);
+ if (mSawOutputEOS) {
+ CallBackHandle::mIsDone = true;
+ mEncoderDoneCondition.notify_one();
+ }
+ }
+}
+
+void Encoder::onFormatChanged(AMediaCodec *mediaCodec, AMediaFormat *format) {
+ ALOGV("In %s", __func__);
+ if (mediaCodec == mCodec && mediaCodec) {
+ ALOGV("%s { %s }", __FUNCTION__, AMediaFormat_toString(format));
+ mFormat = format;
+ }
+}
+
+void Encoder::setupEncoder() {
+ if (!mFormat) mFormat = AMediaFormat_new();
+}
+
+void Encoder::deInitCodec() {
+ int64_t sTime = mStats->getCurTime();
+ if (mFormat) {
+ AMediaFormat_delete(mFormat);
+ mFormat = nullptr;
+ }
+ AMediaCodec_stop(mCodec);
+ AMediaCodec_delete(mCodec);
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setDeInitTime(timeTaken);
+}
+
+void Encoder::resetEncoder() {
+ if (mStats) mStats->reset();
+ if (mEleStream) mEleStream = nullptr;
+ if (mMime) mMime = nullptr;
+ mInputBufferSize = 0;
+ memset(&mParams, 0, sizeof mParams);
+}
+
+void Encoder::dumpStatistics(string inputReference, int64_t durationUs) {
+ string operation = "encode";
+ mStats->dumpStatistics(operation, inputReference, durationUs);
+}
+
+int32_t Encoder::encode(string &codecName, ifstream &eleStream, size_t eleSize,
+ bool asyncMode, encParameter encParams, char *mime) {
+ ALOGV("In %s", __func__);
+ mEleStream = &eleStream;
+ mInputBufferSize = eleSize;
+ mParams = encParams;
+ mOffset = 0;
+ mMime = mime;
+ AMediaFormat_setString(mFormat, AMEDIAFORMAT_KEY_MIME, mMime);
+
+ // Set Format
+ if (!strncmp(mMime, "video/", 6)) {
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_WIDTH, mParams.width);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_HEIGHT, mParams.height);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_FRAME_RATE, mParams.frameRate);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_BIT_RATE, mParams.bitrate);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, 1);
+ if (mParams.profile && mParams.level) {
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_PROFILE, mParams.profile);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_LEVEL, mParams.level);
+ }
+ } else {
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_SAMPLE_RATE, mParams.sampleRate);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_CHANNEL_COUNT, mParams.numChannels);
+ AMediaFormat_setInt32(mFormat, AMEDIAFORMAT_KEY_BIT_RATE, mParams.bitrate);
+ }
+ const char *s = AMediaFormat_toString(mFormat);
+ ALOGV("Input format: %s\n", s);
+
+ int64_t sTime = mStats->getCurTime();
+ mCodec = createMediaCodec(mFormat, mMime, codecName, true /*isEncoder*/);
+ if (!mCodec) return AMEDIA_ERROR_INVALID_OBJECT;
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+
+ if (!strncmp(mMime, "video/", 6)) {
+ mParams.frameSize = mParams.width * mParams.height * 3 / 2;
+ } else {
+ mParams.frameSize = 4096;
+ // Get mInputMaxBufSize
+ AMediaFormat *inputFormat = AMediaCodec_getInputFormat(mCodec);
+ AMediaFormat_getInt32(inputFormat, AMEDIAFORMAT_KEY_MAX_INPUT_SIZE, &mParams.maxFrameSize);
+ if (mParams.maxFrameSize < 0) {
+ ALOGE("Invalid mParams.maxFrameSize %d\n", mParams.maxFrameSize);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ if (mParams.frameSize > mParams.maxFrameSize) {
+ mParams.frameSize = mParams.maxFrameSize;
+ }
+ }
+ mParams.numFrames = (mInputBufferSize + mParams.frameSize - 1) / mParams.frameSize;
+
+ sTime = mStats->getCurTime();
+ if (asyncMode) {
+ AMediaCodecOnAsyncNotifyCallback aCB = {OnInputAvailableCB, OnOutputAvailableCB,
+ OnFormatChangedCB, OnErrorCB};
+ AMediaCodec_setAsyncNotifyCallback(mCodec, aCB, this);
+ mIOThread = thread(&CallBackHandle::ioThread, this);
+ }
+ AMediaCodec_start(mCodec);
+ eTime = mStats->getCurTime();
+ timeTaken += mStats->getTimeDiff(sTime, eTime);
+ mStats->setInitTime(timeTaken);
+
+ mStats->setStartTime();
+ if (!asyncMode) {
+ while (!mSawOutputEOS && !mSignalledError) {
+ // Queue input data
+ if (!mSawInputEOS) {
+ ssize_t inIdx = AMediaCodec_dequeueInputBuffer(mCodec, kQueueDequeueTimeoutUs);
+ if (inIdx < 0 && inIdx != AMEDIACODEC_INFO_TRY_AGAIN_LATER) {
+ ALOGE("AMediaCodec_dequeueInputBuffer returned invalid index %zd\n", inIdx);
+ return AMEDIA_ERROR_IO;
+ } else if (inIdx >= 0) {
+ mStats->addInputTime();
+ onInputAvailable(mCodec, inIdx);
+ }
+ }
+
+ // Dequeue output data
+ AMediaCodecBufferInfo info;
+ ssize_t outIdx = AMediaCodec_dequeueOutputBuffer(mCodec, &info, kQueueDequeueTimeoutUs);
+ if (outIdx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
+ mFormat = AMediaCodec_getOutputFormat(mCodec);
+ const char *s = AMediaFormat_toString(mFormat);
+ ALOGI("Output format: %s\n", s);
+ } else if (outIdx >= 0) {
+ mStats->addOutputTime();
+ onOutputAvailable(mCodec, outIdx, &info);
+ } else if (!(outIdx == AMEDIACODEC_INFO_TRY_AGAIN_LATER ||
+ outIdx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED)) {
+ ALOGE("AMediaCodec_dequeueOutputBuffer returned invalid index %zd\n", outIdx);
+ return AMEDIA_ERROR_IO;
+ }
+ }
+ } else {
+ unique_lock<mutex> lock(mMutex);
+ mEncoderDoneCondition.wait(lock, [this]() { return (mSawOutputEOS || mSignalledError); });
+ }
+
+ if (codecName.empty()) {
+ char *encName;
+ AMediaCodec_getName(mCodec, &encName);
+ codecName.assign(encName);
+ AMediaCodec_releaseName(mCodec, encName);
+ }
+ return AMEDIA_OK;
+}
diff --git a/media/tests/benchmark/src/native/encoder/Encoder.h b/media/tests/benchmark/src/native/encoder/Encoder.h
new file mode 100644
index 0000000..75d9941
--- /dev/null
+++ b/media/tests/benchmark/src/native/encoder/Encoder.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ENCODER_H__
+#define __ENCODER_H__
+
+#include <chrono>
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+
+struct encParameter {
+ int32_t bitrate = -1;
+ int32_t numFrames = -1;
+ int32_t frameSize = -1;
+ int32_t sampleRate = 0;
+ int32_t numChannels = 0;
+ int32_t maxFrameSize = -1;
+ int32_t width = 0;
+ int32_t height = 0;
+ int32_t frameRate = -1;
+ int32_t profile = 0;
+ int32_t level = 0;
+};
+
+class Encoder : public CallBackHandle {
+ public:
+ Encoder()
+ : mCodec(nullptr),
+ mFormat(nullptr),
+ mNumInputFrame(0),
+ mNumOutputFrame(0),
+ mSawInputEOS(false),
+ mSawOutputEOS(false),
+ mSignalledError(false) {}
+
+ virtual ~Encoder() {}
+
+ // Encoder related utilities
+ void setupEncoder();
+
+ void deInitCodec();
+
+ void resetEncoder();
+
+ // Async callback APIs
+ void onInputAvailable(AMediaCodec *codec, int32_t index) override;
+
+ void onFormatChanged(AMediaCodec *codec, AMediaFormat *format) override;
+
+ void onOutputAvailable(AMediaCodec *codec, int32_t index,
+ AMediaCodecBufferInfo *bufferInfo) override;
+
+ // Process the frames and give encoded output
+ int32_t encode(std::string &codecName, std::ifstream &eleStream, size_t eleSize, bool asyncMode,
+ encParameter encParams, char *mime);
+
+ void dumpStatistics(string inputReference, int64_t durationUs);
+
+ private:
+ AMediaCodec *mCodec;
+ AMediaFormat *mFormat;
+
+ int32_t mNumInputFrame;
+ int32_t mNumOutputFrame;
+ bool mSawInputEOS;
+ bool mSawOutputEOS;
+ bool mSignalledError;
+
+ char *mMime;
+ int32_t mOffset;
+ std::ifstream *mEleStream;
+ size_t mInputBufferSize;
+ encParameter mParams;
+
+ // Asynchronous locks
+ std::mutex mMutex;
+ std::condition_variable mEncoderDoneCondition;
+};
+#endif // __ENCODER_H__
diff --git a/media/libstagefright/include/media/stagefright/NdkUtils.h b/media/tests/benchmark/src/native/extractor/Android.bp
similarity index 62%
copy from media/libstagefright/include/media/stagefright/NdkUtils.h
copy to media/tests/benchmark/src/native/extractor/Android.bp
index a68884a..dfd0d49 100644
--- a/media/libstagefright/include/media/stagefright/NdkUtils.h
+++ b/media/tests/benchmark/src/native/extractor/Android.bp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,18 +14,16 @@
* limitations under the License.
*/
-#ifndef NDK_UTILS_H_
+cc_library_static {
+ name: "libmediabenchmark_extractor",
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
-#define NDK_UTILS_H_
+ srcs: ["Extractor.cpp"],
-#include <media/stagefright/MetaData.h>
-#include <media/NdkWrapper.h>
+ export_include_dirs: ["."],
-namespace android {
-
-sp<MetaData> convertMediaFormatWrapperToMetaData(
- const sp<AMediaFormatWrapper> &fmt);
-
-} // namespace android
-
-#endif // NDK_UTILS_H_
+ ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.cpp b/media/tests/benchmark/src/native/extractor/Extractor.cpp
new file mode 100644
index 0000000..b4cad0b
--- /dev/null
+++ b/media/tests/benchmark/src/native/extractor/Extractor.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "extractor"
+
+#include <iostream>
+
+#include "Extractor.h"
+
+int32_t Extractor::initExtractor(int32_t fd, size_t fileSize) {
+ mStats = new Stats();
+
+ mFrameBuf = (uint8_t *)calloc(kMaxBufferSize, sizeof(uint8_t));
+ if (!mFrameBuf) return -1;
+
+ int64_t sTime = mStats->getCurTime();
+
+ mExtractor = AMediaExtractor_new();
+ if (!mExtractor) return AMEDIACODEC_ERROR_INSUFFICIENT_RESOURCE;
+ media_status_t status = AMediaExtractor_setDataSourceFd(mExtractor, fd, 0, fileSize);
+ if (status != AMEDIA_OK) return status;
+
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setInitTime(timeTaken);
+
+ return AMediaExtractor_getTrackCount(mExtractor);
+}
+
+void *Extractor::getCSDSample(AMediaCodecBufferInfo &frameInfo, int32_t csdIndex) {
+ char csdName[kMaxCSDStrlen];
+ void *csdBuffer = nullptr;
+ frameInfo.presentationTimeUs = 0;
+ frameInfo.flags = AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
+ snprintf(csdName, sizeof(csdName), "csd-%d", csdIndex);
+
+ size_t size;
+ bool csdFound = AMediaFormat_getBuffer(mFormat, csdName, &csdBuffer, &size);
+ if (!csdFound) return nullptr;
+ frameInfo.size = (int32_t)size;
+ mStats->addFrameSize(frameInfo.size);
+
+ return csdBuffer;
+}
+
+int32_t Extractor::getFrameSample(AMediaCodecBufferInfo &frameInfo) {
+ int32_t size = AMediaExtractor_readSampleData(mExtractor, mFrameBuf, kMaxBufferSize);
+ if (size < 0) return -1;
+
+ frameInfo.flags = AMediaExtractor_getSampleFlags(mExtractor);
+ frameInfo.size = size;
+ mStats->addFrameSize(frameInfo.size);
+ frameInfo.presentationTimeUs = AMediaExtractor_getSampleTime(mExtractor);
+ AMediaExtractor_advance(mExtractor);
+
+ return 0;
+}
+
+int32_t Extractor::setupTrackFormat(int32_t trackId) {
+ AMediaExtractor_selectTrack(mExtractor, trackId);
+ mFormat = AMediaExtractor_getTrackFormat(mExtractor, trackId);
+ if (!mFormat) return AMEDIA_ERROR_INVALID_OBJECT;
+
+ bool durationFound = AMediaFormat_getInt64(mFormat, AMEDIAFORMAT_KEY_DURATION, &mDurationUs);
+ if (!durationFound) return AMEDIA_ERROR_INVALID_OBJECT;
+
+ return AMEDIA_OK;
+}
+
+int32_t Extractor::extract(int32_t trackId) {
+ int32_t status = setupTrackFormat(trackId);
+ if (status != AMEDIA_OK) return status;
+
+ int32_t idx = 0;
+ AMediaCodecBufferInfo frameInfo;
+ while (1) {
+ memset(&frameInfo, 0, sizeof(AMediaCodecBufferInfo));
+ void *csdBuffer = getCSDSample(frameInfo, idx);
+ if (!csdBuffer || !frameInfo.size) break;
+ idx++;
+ }
+
+ mStats->setStartTime();
+ while (1) {
+ int32_t status = getFrameSample(frameInfo);
+ if (status || !frameInfo.size) break;
+ mStats->addOutputTime();
+ }
+
+ if (mFormat) {
+ AMediaFormat_delete(mFormat);
+ mFormat = nullptr;
+ }
+
+ AMediaExtractor_unselectTrack(mExtractor, trackId);
+
+ return AMEDIA_OK;
+}
+
+void Extractor::dumpStatistics(string inputReference) {
+ string operation = "extract";
+ mStats->dumpStatistics(operation, inputReference, mDurationUs);
+}
+
+void Extractor::deInitExtractor() {
+ if (mFrameBuf) {
+ free(mFrameBuf);
+ mFrameBuf = nullptr;
+ }
+
+ int64_t sTime = mStats->getCurTime();
+ if (mExtractor) {
+ // TODO: (b/140128505) Multiple calls result in DoS.
+ // Uncomment call to AMediaExtractor_delete() once this is resolved
+ // AMediaExtractor_delete(mExtractor);
+ mExtractor = nullptr;
+ }
+ int64_t eTime = mStats->getCurTime();
+ int64_t deInitTime = mStats->getTimeDiff(sTime, eTime);
+ mStats->setDeInitTime(deInitTime);
+}
diff --git a/media/tests/benchmark/src/native/extractor/Extractor.h b/media/tests/benchmark/src/native/extractor/Extractor.h
new file mode 100644
index 0000000..4c39a72
--- /dev/null
+++ b/media/tests/benchmark/src/native/extractor/Extractor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXTRACTOR_H__
+#define __EXTRACTOR_H__
+
+#include <media/NdkMediaExtractor.h>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+
+class Extractor {
+ public:
+ Extractor()
+ : mFormat(nullptr),
+ mExtractor(nullptr),
+ mStats(nullptr),
+ mFrameBuf{nullptr},
+ mDurationUs{0} {}
+
+ ~Extractor() {
+ if (mStats) delete mStats;
+ }
+
+ int32_t initExtractor(int32_t fd, size_t fileSize);
+
+ int32_t setupTrackFormat(int32_t trackId);
+
+ void *getCSDSample(AMediaCodecBufferInfo &frameInfo, int32_t csdIndex);
+
+ int32_t getFrameSample(AMediaCodecBufferInfo &frameInfo);
+
+ int32_t extract(int32_t trackId);
+
+ void dumpStatistics(std::string inputReference);
+
+ void deInitExtractor();
+
+ AMediaFormat *getFormat() { return mFormat; }
+
+ uint8_t *getFrameBuf() { return mFrameBuf; }
+
+ int64_t getClipDuration() { return mDurationUs; }
+
+ private:
+ AMediaFormat *mFormat;
+ AMediaExtractor *mExtractor;
+ Stats *mStats;
+ uint8_t *mFrameBuf;
+ int64_t mDurationUs;
+};
+
+#endif // __EXTRACTOR_H__
\ No newline at end of file
diff --git a/media/tests/benchmark/src/native/muxer/Android.bp b/media/tests/benchmark/src/native/muxer/Android.bp
new file mode 100644
index 0000000..f669d4a
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Android.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_library_static {
+ name: "libmediabenchmark_muxer",
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["Muxer.cpp"],
+
+ static_libs: ["libmediabenchmark_extractor"],
+
+ export_include_dirs: ["."],
+
+ ldflags: ["-Wl,-Bsymbolic"]
+}
diff --git a/media/tests/benchmark/src/native/muxer/Muxer.cpp b/media/tests/benchmark/src/native/muxer/Muxer.cpp
new file mode 100644
index 0000000..b297a66
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Muxer.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "muxer"
+
+#include <fstream>
+#include <iostream>
+
+#include "Muxer.h"
+
+int32_t Muxer::initMuxer(int32_t fd, MUXER_OUTPUT_T outputFormat) {
+ if (!mFormat) mFormat = mExtractor->getFormat();
+ if (!mStats) mStats = new Stats();
+
+ int64_t sTime = mStats->getCurTime();
+ mMuxer = AMediaMuxer_new(fd, (OutputFormat)outputFormat);
+ if (!mMuxer) {
+ cout << "[ WARN ] Test Skipped. Unable to create muxer \n";
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+ /*
+ * AMediaMuxer_addTrack returns the index of the new track or a negative value
+ * in case of failure, which can be interpreted as a media_status_t.
+ */
+ ssize_t index = AMediaMuxer_addTrack(mMuxer, mFormat);
+ if (index < 0) {
+ cout << "[ WARN ] Test Skipped. Format not supported \n";
+ return index;
+ }
+ AMediaMuxer_start(mMuxer);
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setInitTime(timeTaken);
+ return AMEDIA_OK;
+}
+
+void Muxer::deInitMuxer() {
+ int64_t sTime = mStats->getCurTime();
+ if (mFormat) {
+ AMediaFormat_delete(mFormat);
+ mFormat = nullptr;
+ }
+ if (!mMuxer) return;
+ AMediaMuxer_stop(mMuxer);
+ AMediaMuxer_delete(mMuxer);
+ int64_t eTime = mStats->getCurTime();
+ int64_t timeTaken = mStats->getTimeDiff(sTime, eTime);
+ mStats->setDeInitTime(timeTaken);
+}
+
+void Muxer::resetMuxer() {
+ if (mStats) mStats->reset();
+}
+
+void Muxer::dumpStatistics(string inputReference) {
+ string operation = "mux";
+ mStats->dumpStatistics(operation, inputReference, mExtractor->getClipDuration());
+}
+
+int32_t Muxer::mux(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameInfos) {
+ // Mux frame data
+ size_t frameIdx = 0;
+ mStats->setStartTime();
+ while (frameIdx < frameInfos.size()) {
+ AMediaCodecBufferInfo info = frameInfos.at(frameIdx);
+ media_status_t status = AMediaMuxer_writeSampleData(mMuxer, 0, inputBuffer, &info);
+ if (status != 0) {
+ ALOGE("Error in AMediaMuxer_writeSampleData");
+ return status;
+ }
+ mStats->addOutputTime();
+ mStats->addFrameSize(info.size);
+ frameIdx++;
+ }
+ return AMEDIA_OK;
+}
diff --git a/media/tests/benchmark/src/native/muxer/Muxer.h b/media/tests/benchmark/src/native/muxer/Muxer.h
new file mode 100644
index 0000000..eee3146
--- /dev/null
+++ b/media/tests/benchmark/src/native/muxer/Muxer.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MUXER_H__
+#define __MUXER_H__
+
+#include <media/NdkMediaMuxer.h>
+
+#include "BenchmarkCommon.h"
+#include "Stats.h"
+#include "Extractor.h"
+
+typedef enum {
+ MUXER_OUTPUT_FORMAT_MPEG_4 = 0,
+ MUXER_OUTPUT_FORMAT_WEBM = 1,
+ MUXER_OUTPUT_FORMAT_3GPP = 2,
+ MUXER_OUTPUT_FORMAT_OGG = 4,
+ MUXER_OUTPUT_FORMAT_INVALID = 5,
+} MUXER_OUTPUT_T;
+
+class Muxer {
+ public:
+ Muxer() : mFormat(nullptr), mMuxer(nullptr), mStats(nullptr) { mExtractor = new Extractor(); }
+
+ virtual ~Muxer() {
+ if (mStats) delete mStats;
+ if (mExtractor) delete mExtractor;
+ }
+
+ Stats *getStats() { return mStats; }
+ Extractor *getExtractor() { return mExtractor; }
+
+ /* Muxer related utilities */
+ int32_t initMuxer(int32_t fd, MUXER_OUTPUT_T outputFormat);
+ void deInitMuxer();
+ void resetMuxer();
+
+ /* Process the frames and give Muxed output */
+ int32_t mux(uint8_t *inputBuffer, vector<AMediaCodecBufferInfo> &frameSizes);
+
+ void dumpStatistics(string inputReference);
+
+ private:
+ AMediaFormat *mFormat;
+ AMediaMuxer *mMuxer;
+ Extractor *mExtractor;
+ Stats *mStats;
+};
+
+#endif // __MUXER_H__
diff --git a/media/tests/benchmark/tests/Android.bp b/media/tests/benchmark/tests/Android.bp
new file mode 100644
index 0000000..24fd68c
--- /dev/null
+++ b/media/tests/benchmark/tests/Android.bp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+cc_test {
+ name: "extractorTest",
+ gtest: true,
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["ExtractorTest.cpp"],
+
+ static_libs: ["libmediabenchmark_extractor"]
+}
+
+cc_test {
+ name: "decoderTest",
+ gtest: true,
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["DecoderTest.cpp"],
+
+ static_libs: [
+ "libmediabenchmark_extractor",
+ "libmediabenchmark_decoder",
+ ],
+}
+
+cc_test {
+ name: "muxerTest",
+ gtest: true,
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["MuxerTest.cpp"],
+
+ static_libs: [
+ "libmediabenchmark_extractor",
+ "libmediabenchmark_muxer",
+ ],
+}
+
+cc_test {
+ name: "encoderTest",
+ gtest: true,
+ defaults: [
+ "libmediabenchmark_common-defaults",
+ "libmediabenchmark_soft_sanitize_all-defaults",
+ ],
+
+ srcs: ["EncoderTest.cpp"],
+
+ static_libs: [
+ "libmediabenchmark_extractor",
+ "libmediabenchmark_decoder",
+ "libmediabenchmark_encoder",
+ ],
+}
diff --git a/media/tests/benchmark/tests/BenchmarkTestEnvironment.h b/media/tests/benchmark/tests/BenchmarkTestEnvironment.h
new file mode 100644
index 0000000..ae2eee1
--- /dev/null
+++ b/media/tests/benchmark/tests/BenchmarkTestEnvironment.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __BENCHMARK_TEST_ENVIRONMENT_H__
+#define __BENCHMARK_TEST_ENVIRONMENT_H__
+
+#include <gtest/gtest.h>
+
+#include <getopt.h>
+
+using namespace std;
+
+class BenchmarkTestEnvironment : public ::testing::Environment {
+ public:
+ BenchmarkTestEnvironment() : res("/sdcard/media/") {}
+
+ // Parses the command line argument
+ int initFromOptions(int argc, char **argv);
+
+ void setRes(const char *_res) { res = _res; }
+
+ const string getRes() const { return res; }
+
+ private:
+ string res;
+};
+
+int BenchmarkTestEnvironment::initFromOptions(int argc, char **argv) {
+ static struct option options[] = {{"path", required_argument, 0, 'P'}, {0, 0, 0, 0}};
+
+ while (true) {
+ int index = 0;
+ int c = getopt_long(argc, argv, "P:", options, &index);
+ if (c == -1) {
+ break;
+ }
+
+ switch (c) {
+ case 'P': {
+ setRes(optarg);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (optind < argc) {
+ fprintf(stderr,
+ "unrecognized option: %s\n\n"
+ "usage: %s <gtest options> <test options>\n\n"
+ "test options are:\n\n"
+ "-P, --path: Resource files directory location\n",
+ argv[optind ?: 1], argv[0]);
+ return 2;
+ }
+ return 0;
+}
+
+#endif // __BENCHMARK_TEST_ENVIRONMENT_H__
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
new file mode 100644
index 0000000..242178f
--- /dev/null
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "decoderTest"
+
+#include <fstream>
+#include <iostream>
+#include <limits>
+
+#include "Decoder.h"
+#include "BenchmarkTestEnvironment.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class DecoderTest : public ::testing::TestWithParam<tuple<string, string, bool>> {};
+
+TEST_P(DecoderTest, Decode) {
+ ALOGV("Decode the samples given by extractor");
+ tuple<string /* InputFile */, string /* CodecName */, bool /* asyncMode */> params = GetParam();
+
+ string inputFile = gEnv->getRes() + get<0>(params);
+ FILE *inputFp = fopen(inputFile.c_str(), "rb");
+ if (!inputFp) {
+ cout << "[ WARN ] Test Skipped. Unable to open input file for reading \n";
+ return;
+ }
+
+ Decoder *decoder = new Decoder();
+ Extractor *extractor = decoder->getExtractor();
+ if (!extractor) {
+ cout << "[ WARN ] Test Skipped. Extractor creation failed \n";
+ return;
+ }
+
+ // Read file properties
+ fseek(inputFp, 0, SEEK_END);
+ size_t fileSize = ftell(inputFp);
+ fseek(inputFp, 0, SEEK_SET);
+ int32_t fd = fileno(inputFp);
+
+ int32_t trackCount = extractor->initExtractor(fd, fileSize);
+ if (trackCount <= 0) {
+ cout << "[ WARN ] Test Skipped. initExtractor failed\n";
+ return;
+ }
+ for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+ int32_t status = extractor->setupTrackFormat(curTrack);
+ if (status != 0) {
+ cout << "[ WARN ] Test Skipped. Track Format invalid \n";
+ return;
+ }
+
+ uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ if (!inputBuffer) {
+ cout << "[ WARN ] Test Skipped. Insufficient memory \n";
+ return;
+ }
+ vector<AMediaCodecBufferInfo> frameInfo;
+ AMediaCodecBufferInfo info;
+ uint32_t inputBufferOffset = 0;
+
+ // Get frame data
+ while (1) {
+ status = extractor->getFrameSample(info);
+ if (status || !info.size) break;
+ // copy the meta data and buffer to be passed to decoder
+ if (inputBufferOffset + info.size > kMaxBufferSize) {
+ cout << "[ WARN ] Test Skipped. Memory allocated not sufficient\n";
+ free(inputBuffer);
+ return;
+ }
+ memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ frameInfo.push_back(info);
+ inputBufferOffset += info.size;
+ }
+
+ string codecName = get<1>(params);
+ bool asyncMode = get<2>(params);
+ decoder->setupDecoder();
+ status = decoder->decode(inputBuffer, frameInfo, codecName, asyncMode);
+ if (status != AMEDIA_OK) {
+ cout << "[ WARN ] Test Skipped. Decode returned error \n";
+ free(inputBuffer);
+ return;
+ }
+ decoder->deInitCodec();
+ cout << "codec : " << codecName << endl;
+ string inputReference = get<0>(params);
+ decoder->dumpStatistics(inputReference);
+ free(inputBuffer);
+ decoder->resetDecoder();
+ }
+ fclose(inputFp);
+ extractor->deInitExtractor();
+ delete decoder;
+}
+
+// TODO: (b/140549596)
+// Add wav files
+INSTANTIATE_TEST_SUITE_P(
+ AudioDecoderSyncTest, DecoderTest,
+ ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", false),
+ make_tuple("bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", "", false),
+ make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", false),
+ make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", false),
+ make_tuple("bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", "", false),
+ make_tuple("bbb_44100hz_2ch_600kbps_flac_30sec.mp4", "", false),
+ make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", false)));
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioDecoderAsyncTest, DecoderTest,
+ ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", true),
+ make_tuple("bbb_44100hz_2ch_128kbps_mp3_30sec.mp3", "", true),
+ make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", true),
+ make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", true),
+ make_tuple("bbb_44100hz_2ch_80kbps_vorbis_30sec.mp4", "", true),
+ make_tuple("bbb_44100hz_2ch_600kbps_flac_30sec.mp4", "", true),
+ make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", true)));
+
+INSTANTIATE_TEST_SUITE_P(VideDecoderSyncTest, DecoderTest,
+ ::testing::Values(
+ // Hardware codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm", "", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm", "", false),
+ make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", "", false),
+ make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "", false),
+ make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp", "", false),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", false),
+ // Software codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+ "c2.android.vp9.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+ "c2.android.vp8.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm",
+ "c2.android.av1.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4",
+ "c2.android.mpeg2.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4",
+ "c2.android.mpeg4.decoder", false),
+ make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp",
+ "c2.android.h263.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+ "c2.android.avc.decoder", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+ "c2.android.hevc.decoder", false)));
+
+INSTANTIATE_TEST_SUITE_P(VideoDecoderAsyncTest, DecoderTest,
+ ::testing::Values(
+ // Hardware codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm", "", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm", "", true),
+ make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", "", true),
+ make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "", true),
+ make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp", "", true),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", true),
+ // Software codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+ "c2.android.vp9.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+ "c2.android.vp8.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_av1.webm",
+ "c2.android.av1.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4",
+ "c2.android.mpeg2.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4",
+ "c2.android.mpeg4.decoder", true),
+ make_tuple("crowd_352x288_25fps_6000kbps_h263.3gp",
+ "c2.android.h263.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+ "c2.android.avc.decoder", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+ "c2.android.hevc.decoder", true)));
+
+int main(int argc, char **argv) {
+ gEnv = new BenchmarkTestEnvironment();
+ ::testing::AddGlobalTestEnvironment(gEnv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = gEnv->initFromOptions(argc, argv);
+ if (status == 0) {
+ status = RUN_ALL_TESTS();
+ ALOGD("Decoder Test result = %d\n", status);
+ }
+ return status;
+}
\ No newline at end of file
diff --git a/media/tests/benchmark/tests/EncoderTest.cpp b/media/tests/benchmark/tests/EncoderTest.cpp
new file mode 100644
index 0000000..9f42c64
--- /dev/null
+++ b/media/tests/benchmark/tests/EncoderTest.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "encoderTest"
+
+#include <fstream>
+
+#include "BenchmarkTestEnvironment.h"
+#include "Encoder.h"
+#include "Decoder.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class EncoderTest : public ::testing::TestWithParam<tuple<string, string, bool>> {};
+
+TEST_P(EncoderTest, Encode) {
+ ALOGD("Encode test for all codecs");
+ tuple<string /* InputFile */, string /* CodecName */, bool /* asyncMode */> params = GetParam();
+
+ string inputFile = gEnv->getRes() + get<0>(params);
+ FILE *inputFp = fopen(inputFile.c_str(), "rb");
+ if (!inputFp) {
+ cout << "[ WARN ] Test Skipped. Unable to open input file for reading \n";
+ return;
+ }
+
+ Decoder *decoder = new Decoder();
+ Extractor *extractor = decoder->getExtractor();
+ if (!extractor) {
+ cout << "[ WARN ] Test Skipped. Extractor creation failed \n";
+ return;
+ }
+ // Read file properties
+ fseek(inputFp, 0, SEEK_END);
+ size_t fileSize = ftell(inputFp);
+ fseek(inputFp, 0, SEEK_SET);
+ int32_t fd = fileno(inputFp);
+
+ int32_t trackCount = extractor->initExtractor(fd, fileSize);
+ if (trackCount <= 0) {
+ cout << "[ WARN ] Test Skipped. initExtractor failed\n";
+ return;
+ }
+
+ Encoder *encoder = new Encoder();
+ for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+ int32_t status = extractor->setupTrackFormat(curTrack);
+ if (status != 0) {
+ cout << "[ WARN ] Test Skipped. Track Format invalid \n";
+ return;
+ }
+
+ uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ if (!inputBuffer) {
+ cout << "[ WARN ] Test Skipped. Insufficient memory \n";
+ return;
+ }
+ vector<AMediaCodecBufferInfo> frameInfo;
+ AMediaCodecBufferInfo info;
+ uint32_t inputBufferOffset = 0;
+
+ // Get frame data
+ while (1) {
+ status = extractor->getFrameSample(info);
+ if (status || !info.size) break;
+ // copy the meta data and buffer to be passed to decoder
+ if (inputBufferOffset + info.size > kMaxBufferSize) {
+ cout << "[ WARN ] Test Skipped. Memory allocated not sufficient\n";
+ free(inputBuffer);
+ return;
+ }
+ memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ frameInfo.push_back(info);
+ inputBufferOffset += info.size;
+ }
+
+ string decName = "";
+ string outputFileName = "decode.out";
+ FILE *outFp = fopen(outputFileName.c_str(), "wb");
+ if (outFp == nullptr) {
+ ALOGE("Unable to open output file for writing");
+ return;
+ }
+ decoder->setupDecoder();
+ status = decoder->decode(inputBuffer, frameInfo, decName, false /*asyncMode */, outFp);
+ if (status != AMEDIA_OK) {
+ cout << "[ WARN ] Test Skipped. Decode returned error \n";
+ return;
+ }
+
+ ifstream eleStream;
+ eleStream.open(outputFileName.c_str(), ifstream::binary | ifstream::ate);
+ ASSERT_EQ(eleStream.is_open(), true) << outputFileName.c_str() << " - file not found";
+ size_t eleSize = eleStream.tellg();
+ eleStream.seekg(0, ifstream::beg);
+
+ AMediaFormat *format = extractor->getFormat();
+ const char *mime = nullptr;
+ AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime);
+ if (!mime) {
+ ALOGE("Error in AMediaFormat_getString");
+ return;
+ }
+ // Get encoder params
+ encParameter encParams;
+ if (!strncmp(mime, "video/", 6)) {
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_WIDTH, &encParams.width);
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_HEIGHT, &encParams.height);
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_FRAME_RATE, &encParams.frameRate);
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_BIT_RATE, &encParams.bitrate);
+ if (encParams.bitrate <= 0 || encParams.frameRate <= 0) {
+ encParams.frameRate = 25;
+ if (!strcmp(mime, "video/3gpp") || !strcmp(mime, "video/mp4v-es")) {
+ encParams.bitrate = 600000 /* 600 Kbps */;
+ } else {
+ encParams.bitrate = 8000000 /* 8 Mbps */;
+ }
+ }
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_PROFILE, &encParams.profile);
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_LEVEL, &encParams.level);
+ } else {
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_SAMPLE_RATE, &encParams.sampleRate);
+ AMediaFormat_getInt32(format, AMEDIAFORMAT_KEY_CHANNEL_COUNT, &encParams.numChannels);
+ encParams.bitrate =
+ encParams.sampleRate * encParams.numChannels * 16 /* bitsPerSample */;
+ }
+
+ encoder->setupEncoder();
+ string codecName = get<1>(params);
+ bool asyncMode = get<2>(params);
+ status = encoder->encode(codecName, eleStream, eleSize, asyncMode, encParams, (char *)mime);
+ ASSERT_EQ(status, 0);
+ encoder->deInitCodec();
+ cout << "codec : " << codecName << endl;
+ string inputReference = get<0>(params);
+ encoder->dumpStatistics(inputReference, extractor->getClipDuration());
+ eleStream.close();
+ if (outFp) fclose(outFp);
+
+ if (format) {
+ AMediaFormat_delete(format);
+ format = nullptr;
+ }
+ encoder->resetEncoder();
+ decoder->deInitCodec();
+ free(inputBuffer);
+ decoder->resetDecoder();
+ }
+ delete encoder;
+ fclose(inputFp);
+ extractor->deInitExtractor();
+ delete decoder;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioEncoderSyncTest, EncoderTest,
+ ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", false),
+ make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", false),
+ make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", false),
+ make_tuple("bbb_44100hz_2ch_600kbps_flac_30sec.mp4", "", false),
+ make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", false)));
+
+INSTANTIATE_TEST_SUITE_P(
+ AudioEncoderAsyncTest, EncoderTest,
+ ::testing::Values(make_tuple("bbb_44100hz_2ch_128kbps_aac_30sec.mp4", "", true),
+ make_tuple("bbb_8000hz_1ch_8kbps_amrnb_30sec.3gp", "", true),
+ make_tuple("bbb_16000hz_1ch_9kbps_amrwb_30sec.3gp", "", true),
+ make_tuple("bbb_44100hz_2ch_600kbps_flac_30sec.mp4", "", true),
+ make_tuple("bbb_48000hz_2ch_100kbps_opus_30sec.webm", "", true)));
+
+INSTANTIATE_TEST_SUITE_P(VideEncoderSyncTest, EncoderTest,
+ ::testing::Values(
+ // Hardware codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", false),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", false),
+ // Software codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+ "c2.android.vp9.encoder", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+ "c2.android.vp8.encoder", false),
+ make_tuple("crowd_176x144_25fps_6000kbps_mpeg4.mp4",
+ "c2.android.mpeg4.encoder", false),
+ make_tuple("crowd_176x144_25fps_6000kbps_h263.3gp",
+ "c2.android.h263.encoder", false),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+ "c2.android.avc.encoder", false),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+ "c2.android.hevc.encoder", false)));
+
+INSTANTIATE_TEST_SUITE_P(VideoEncoderAsyncTest, EncoderTest,
+ ::testing::Values(
+ // Hardware codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm", "", true),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts", "", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv", "", true),
+ // Software codecs
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp9.webm",
+ "c2.android.vp9.encoder", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_vp8.webm",
+ "c2.android.vp8.encoder", true),
+ make_tuple("crowd_176x144_25fps_6000kbps_mpeg4.mp4",
+ "c2.android.mpeg4.encoder", true),
+ make_tuple("crowd_176x144_25fps_6000kbps_h263.3gp",
+ "c2.android.h263.encoder", true),
+ make_tuple("crowd_1920x1080_25fps_6700kbps_h264.ts",
+ "c2.android.avc.encoder", true),
+ make_tuple("crowd_1920x1080_25fps_4000kbps_h265.mkv",
+ "c2.android.hevc.encoder", true)));
+
+int main(int argc, char **argv) {
+ gEnv = new BenchmarkTestEnvironment();
+ ::testing::AddGlobalTestEnvironment(gEnv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = gEnv->initFromOptions(argc, argv);
+ if (status == 0) {
+ status = RUN_ALL_TESTS();
+ ALOGD("Encoder Test result = %d\n", status);
+ }
+ return status;
+}
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
new file mode 100644
index 0000000..dd0d711
--- /dev/null
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "extractorTest"
+
+#include <gtest/gtest.h>
+
+#include "Extractor.h"
+#include "BenchmarkTestEnvironment.h"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class ExtractorTest : public ::testing::TestWithParam<pair<string, int32_t>> {};
+
+TEST_P(ExtractorTest, Extract) {
+ Extractor *extractObj = new Extractor();
+
+ string inputFile = gEnv->getRes() + GetParam().first;
+ FILE *inputFp = fopen(inputFile.c_str(), "rb");
+ if (!inputFp) {
+ cout << "[ WARN ] Test Skipped. Unable to open input file for reading \n";
+ return;
+ }
+
+ // Read file properties
+ size_t fileSize = 0;
+ fseek(inputFp, 0, SEEK_END);
+ fileSize = ftell(inputFp);
+ fseek(inputFp, 0, SEEK_SET);
+ int32_t fd = fileno(inputFp);
+
+ int32_t trackCount = extractObj->initExtractor(fd, fileSize);
+ if (trackCount <= 0) {
+ cout << "[ WARN ] Test Skipped. initExtractor failed\n";
+ return;
+ }
+
+ int32_t trackID = GetParam().second;
+ int32_t status = extractObj->extract(trackID);
+ if (status != AMEDIA_OK) {
+ cout << "[ WARN ] Test Skipped. Extraction failed \n";
+ return;
+ }
+
+ extractObj->deInitExtractor();
+
+ extractObj->dumpStatistics(GetParam().first);
+
+ fclose(inputFp);
+ delete extractObj;
+}
+
+INSTANTIATE_TEST_SUITE_P(ExtractorTestAll, ExtractorTest,
+ ::testing::Values(make_pair("crowd_1920x1080_25fps_4000kbps_vp9.webm", 0),
+ make_pair("crowd_1920x1080_25fps_6000kbps_h263.3gp", 0),
+ make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", 0),
+ make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", 0),
+ make_pair("crowd_1920x1080_25fps_7300kbps_mpeg2.mp4", 0),
+ make_pair("crowd_1920x1080_25fps_4000kbps_av1.webm", 0),
+ make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", 0),
+ make_pair("crowd_1920x1080_25fps_4000kbps_vp8.webm", 0),
+ make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", 0),
+ make_pair("bbb_44100hz_2ch_128kbps_mp3_5mins.mp3", 0),
+ make_pair("bbb_44100hz_2ch_600kbps_flac_5mins.flac", 0),
+ make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", 0),
+ make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", 0),
+ make_pair("bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", 0),
+ make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", 0)));
+
+int main(int argc, char **argv) {
+ gEnv = new BenchmarkTestEnvironment();
+ ::testing::AddGlobalTestEnvironment(gEnv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = gEnv->initFromOptions(argc, argv);
+ if (status == 0) {
+ status = RUN_ALL_TESTS();
+ ALOGD(" Extractor Test result = %d\n", status);
+ }
+ return status;
+}
diff --git a/media/tests/benchmark/tests/MuxerTest.cpp b/media/tests/benchmark/tests/MuxerTest.cpp
new file mode 100644
index 0000000..e814f90
--- /dev/null
+++ b/media/tests/benchmark/tests/MuxerTest.cpp
@@ -0,0 +1,181 @@
+
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "muxerTest"
+
+#include <fstream>
+#include <iostream>
+
+#include "Muxer.h"
+#include "BenchmarkTestEnvironment.h"
+
+#define OUTPUT_FILE_NAME "/data/local/tmp/mux.out"
+
+static BenchmarkTestEnvironment *gEnv = nullptr;
+
+class MuxerTest : public ::testing::TestWithParam<pair<string, string>> {};
+
+static MUXER_OUTPUT_T getMuxerOutFormat(string fmt) {
+ static const struct {
+ string name;
+ MUXER_OUTPUT_T value;
+ } kFormatMaps[] = {{"mp4", MUXER_OUTPUT_FORMAT_MPEG_4},
+ {"webm", MUXER_OUTPUT_FORMAT_WEBM},
+ {"3gpp", MUXER_OUTPUT_FORMAT_3GPP},
+ {"ogg", MUXER_OUTPUT_FORMAT_OGG}};
+
+ MUXER_OUTPUT_T format = MUXER_OUTPUT_FORMAT_INVALID;
+ for (size_t i = 0; i < sizeof(kFormatMaps) / sizeof(kFormatMaps[0]); ++i) {
+ if (!fmt.compare(kFormatMaps[i].name)) {
+ format = kFormatMaps[i].value;
+ break;
+ }
+ }
+ return format;
+}
+
+TEST_P(MuxerTest, Mux) {
+ ALOGV("Mux the samples given by extractor");
+ string inputFile = gEnv->getRes() + GetParam().first;
+ FILE *inputFp = fopen(inputFile.c_str(), "rb");
+ if (!inputFp) {
+ cout << "[ WARN ] Test Skipped. Unable to open input file for reading \n";
+ return;
+ }
+ string fmt = GetParam().second;
+ MUXER_OUTPUT_T outputFormat = getMuxerOutFormat(fmt);
+ if (outputFormat == MUXER_OUTPUT_FORMAT_INVALID) {
+ ALOGE("output format is MUXER_OUTPUT_FORMAT_INVALID");
+ return;
+ }
+
+ Muxer *muxerObj = new Muxer();
+ Extractor *extractor = muxerObj->getExtractor();
+ if (!extractor) {
+ cout << "[ WARN ] Test Skipped. Extractor creation failed \n";
+ return;
+ }
+
+ // Read file properties
+ size_t fileSize = 0;
+ fseek(inputFp, 0, SEEK_END);
+ fileSize = ftell(inputFp);
+ fseek(inputFp, 0, SEEK_SET);
+ int32_t fd = fileno(inputFp);
+
+ int32_t trackCount = extractor->initExtractor(fd, fileSize);
+ if (trackCount <= 0) {
+ cout << "[ WARN ] Test Skipped. initExtractor failed\n";
+ return;
+ }
+
+ for (int curTrack = 0; curTrack < trackCount; curTrack++) {
+ int32_t status = extractor->setupTrackFormat(curTrack);
+ if (status != 0) {
+ cout << "[ WARN ] Test Skipped. Track Format invalid \n";
+ return;
+ }
+
+ uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ if (!inputBuffer) {
+ std::cout << "[ WARN ] Test Skipped. Insufficient memory \n";
+ return;
+ }
+ // AMediaCodecBufferInfo : <size of frame> <flags> <presentationTimeUs> <offset>
+ vector<AMediaCodecBufferInfo> frameInfos;
+ AMediaCodecBufferInfo info;
+ uint32_t inputBufferOffset = 0;
+
+ // Get Frame Data
+ while (1) {
+ status = extractor->getFrameSample(info);
+ if (status || !info.size) break;
+ // copy the meta data and buffer to be passed to muxer
+ if (inputBufferOffset + info.size > kMaxBufferSize) {
+ cout << "[ WARN ] Test Skipped. Memory allocated not sufficient\n";
+ free(inputBuffer);
+ return;
+ }
+ memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ info.offset = inputBufferOffset;
+ frameInfos.push_back(info);
+ inputBufferOffset += info.size;
+ }
+
+ string outputFileName = OUTPUT_FILE_NAME;
+ FILE *outputFp = fopen(outputFileName.c_str(), "w+b");
+ if (!outputFp) {
+ cout << "[ WARN ] Test Skipped. Unable to open output file for writing \n";
+ return;
+ }
+ int32_t fd = fileno(outputFp);
+ status = muxerObj->initMuxer(fd, outputFormat);
+ if (status != 0) {
+ cout << "[ WARN ] Test Skipped. initMuxer failed\n";
+ return;
+ }
+
+ status = muxerObj->mux(inputBuffer, frameInfos);
+ if (status != 0) {
+ cout << "[ WARN ] Test Skipped. Mux failed \n";
+ return;
+ }
+ muxerObj->deInitMuxer();
+ muxerObj->dumpStatistics(GetParam().first + "." + fmt.c_str());
+ free(inputBuffer);
+ fclose(outputFp);
+ muxerObj->resetMuxer();
+ }
+ fclose(inputFp);
+ extractor->deInitExtractor();
+ delete muxerObj;
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ MuxerTestAll, MuxerTest,
+ ::testing::Values(make_pair("crowd_1920x1080_25fps_4000kbps_vp8.webm", "webm"),
+ make_pair("crowd_1920x1080_25fps_4000kbps_vp9.webm", "webm"),
+ make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "mp4"),
+ make_pair("crowd_352x288_25fps_6000kbps_h263.3gp", "mp4"),
+ make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", "mp4"),
+ make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "mp4"),
+ make_pair("crowd_1920x1080_25fps_6000kbps_mpeg4.mp4", "3gpp"),
+ make_pair("crowd_352x288_25fps_6000kbps_h263.3gp", "3gpp"),
+ make_pair("crowd_1920x1080_25fps_6700kbps_h264.ts", "3gpp"),
+ make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "3gpp"),
+ make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", "ogg"),
+ make_pair("bbb_44100hz_2ch_80kbps_vorbis_5mins.mp4", "webm"),
+ make_pair("bbb_48000hz_2ch_100kbps_opus_5mins.webm", "webm"),
+ make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "mp4"),
+ make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "mp4"),
+ make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "mp4"),
+ make_pair("bbb_44100hz_2ch_128kbps_aac_5mins.mp4", "3gpp"),
+ make_pair("bbb_8000hz_1ch_8kbps_amrnb_5mins.3gp", "3gpp"),
+ make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "3gpp")));
+
+int main(int argc, char **argv) {
+ gEnv = new BenchmarkTestEnvironment();
+ ::testing::AddGlobalTestEnvironment(gEnv);
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = gEnv->initFromOptions(argc, argv);
+ if (status == 0) {
+ status = RUN_ALL_TESTS();
+ ALOGV("Test result = %d\n", status);
+ }
+ return status;
+}
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 0ed92f7..8a9039c 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -30,6 +30,7 @@
],
shared_libs: [
"libbinder",
+ "libcutils",
"liblog",
"libutils",
"libhidlbase",
@@ -45,10 +46,11 @@
"-Werror",
],
- include_dirs: [
- // For android_mallopt definitions.
- "bionic/libc/private"
+ header_libs: [
+ "bionic_libc_platform_headers",
+ "libmedia_headers",
],
+
local_include_dirs: ["include"],
export_include_dirs: ["include"],
}
diff --git a/media/utils/MemoryLeakTrackUtil.cpp b/media/utils/MemoryLeakTrackUtil.cpp
index 2988b52..6166859 100644
--- a/media/utils/MemoryLeakTrackUtil.cpp
+++ b/media/utils/MemoryLeakTrackUtil.cpp
@@ -22,7 +22,7 @@
#include "media/MemoryLeakTrackUtil.h"
#include <sstream>
-#include <bionic_malloc.h>
+#include <bionic/malloc.h>
/*
* The code here originally resided in MediaPlayerService.cpp
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index 27f1a79..113e4a7 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -23,6 +23,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IProcessInfoService.h>
#include <binder/IServiceManager.h>
+#include <private/android_filesystem_config.h>
namespace android {
@@ -55,8 +56,9 @@
bool ProcessInfo::isValidPid(int pid) {
int callingPid = IPCThreadState::self()->getCallingPid();
+ int callingUid = IPCThreadState::self()->getCallingUid();
// Trust it if this is called from the same process otherwise pid has to match the calling pid.
- return (callingPid == getpid()) || (callingPid == pid);
+ return (callingPid == getpid()) || (callingPid == pid) || (callingUid == AID_MEDIA);
}
ProcessInfo::~ProcessInfo() {}
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index db13903..971ae9f 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -61,9 +61,12 @@
static bool checkRecordingInternal(const String16& opPackageName, pid_t pid,
uid_t uid, bool start) {
- // Okay to not track in app ops as audio server is us and if
+ // Okay to not track in app ops as audio server or media server is us and if
// device is rooted security model is considered compromised.
- if (isAudioServerOrRootUid(uid)) return true;
+ // system_server loses its RECORD_AUDIO permission when a secondary
+ // user is active, but it is a core system service so let it through.
+ // TODO(b/141210120): UserManager.DISALLOW_RECORD_AUDIO should not affect system user 0
+ if (isAudioServerOrMediaServerOrSystemServerOrRootUid(uid)) return true;
// We specify a pid and uid here as mediaserver (aka MediaRecorder or StageFrightRecorder)
// may open a record track on behalf of a client. Note that pid may be a tid.
@@ -222,9 +225,9 @@
off_t size = lseek(heap->getHeapID(), 0, SEEK_END);
lseek(heap->getHeapID(), 0, SEEK_SET);
- if (iMemory->pointer() == NULL || size < (off_t)iMemory->size()) {
+ if (iMemory->unsecurePointer() == NULL || size < (off_t)iMemory->size()) {
ALOGE("%s check failed: pointer %p size %zu fd size %u",
- __FUNCTION__, iMemory->pointer(), iMemory->size(), (uint32_t)size);
+ __FUNCTION__, iMemory->unsecurePointer(), iMemory->size(), (uint32_t)size);
return BAD_VALUE;
}
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 59cf4ef..96f7802 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -16,8 +16,8 @@
#include <utils/Log.h>
-#include <media/TimeCheck.h>
-#include <media/EventLog.h>
+#include <mediautils/TimeCheck.h>
+#include <mediautils/EventLog.h>
namespace android {
@@ -82,10 +82,10 @@
if (waitTimeNs > 0) {
status = mCond.waitRelative(mMutex, waitTimeNs);
}
- }
- if (status != NO_ERROR) {
- LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
- LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+ if (status != NO_ERROR) {
+ LOG_EVENT_STRING(LOGTAG_AUDIO_BINDER_TIMEOUT, tag);
+ LOG_ALWAYS_FATAL("TimeCheck timeout for %s", tag);
+ }
}
return true;
}
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 2a6e609..2595761 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -58,6 +58,13 @@
return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER;
}
+// used for calls that should come from system_server or audio_server or media server and
+// include AID_ROOT for command-line tests.
+static inline bool isAudioServerOrMediaServerOrSystemServerOrRootUid(uid_t uid) {
+ return multiuser_get_app_id(uid) == AID_SYSTEM || uid == AID_AUDIOSERVER
+ || uid == AID_MEDIA || uid == AID_ROOT;
+}
+
// Mediaserver may forward the client PID and UID as part of a binder interface call;
// otherwise the calling UID must be equal to the client UID.
static inline bool isAudioServerOrMediaServerUid(uid_t uid) {
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 96ad54b..de8c7e7 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -34,6 +34,7 @@
],
shared_libs: [
+ "libaudiofoundation",
"libaudiohal",
"libaudioprocessing",
"libaudiospdif",
@@ -60,6 +61,10 @@
"libsndfile",
],
+ header_libs: [
+ "libmedia_headers",
+ ],
+
cflags: [
"-DSTATE_QUEUE_INSTANTIATIONS=\"StateQueueInstantiations.cpp\"",
"-fvisibility=hidden",
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 355d945..eba0b20 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -19,6 +19,9 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+// Define AUDIO_ARRAYS_STATIC_CHECK to check all audio arrays are correct
+#define AUDIO_ARRAYS_STATIC_CHECK 1
+
#include "Configuration.h"
#include <dirent.h>
#include <math.h>
@@ -378,7 +381,7 @@
AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
audio_module_handle_t module,
- audio_devices_t devices)
+ audio_devices_t deviceType)
{
// if module is 0, the request comes from an old policy manager and we should load
// well known modules
@@ -393,7 +396,7 @@
sp<DeviceHalInterface> dev = audioHwDevice->hwDevice();
uint32_t supportedDevices;
if (dev->getSupportedDevices(&supportedDevices) == OK &&
- (supportedDevices & devices) == devices) {
+ (supportedDevices & deviceType) == deviceType) {
return audioHwDevice;
}
}
@@ -652,7 +655,7 @@
return new NBLog::Writer();
}
success:
- NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
+ NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->unsecurePointer();
new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
// explicit destructor not needed since it is POD
sMediaLogService->registerWriter(shared, size, name);
@@ -810,7 +813,33 @@
continue;
}
- size_t frameCount = std::lcm(thread->frameCount(), secondaryThread->frameCount());
+ size_t sourceFrameCount = thread->frameCount() * output.sampleRate
+ / thread->sampleRate();
+ size_t sinkFrameCount = secondaryThread->frameCount() * output.sampleRate
+ / secondaryThread->sampleRate();
+ // If the secondary output has just been opened, the first secondaryThread write
+ // will not block as it will fill the empty startup buffer of the HAL,
+ // so a second sink buffer needs to be ready for the immediate next blocking write.
+ // Additionally, have a margin of one main thread buffer as the scheduling jitter
+ // can reorder the writes (eg if thread A&B have the same write intervale,
+ // the scheduler could schedule AB...BA)
+ size_t frameCountToBeReady = 2 * sinkFrameCount + sourceFrameCount;
+ // Total secondary output buffer must be at least as the read frames plus
+ // the margin of a few buffers on both sides in case the
+ // threads scheduling has some jitter.
+ // That value should not impact latency as the secondary track is started before
+ // its buffer is full, see frameCountToBeReady.
+ size_t frameCount = frameCountToBeReady + 2 * (sourceFrameCount + sinkFrameCount);
+ // The frameCount should also not be smaller than the secondary thread min frame
+ // count
+ size_t minFrameCount = AudioSystem::calculateMinFrameCount(
+ [&] { Mutex::Autolock _l(secondaryThread->mLock);
+ return secondaryThread->latency_l(); }(),
+ secondaryThread->mNormalFrameCount,
+ secondaryThread->mSampleRate,
+ output.sampleRate,
+ input.speed);
+ frameCount = std::max(frameCount, minFrameCount);
using namespace std::chrono_literals;
auto inChannelMask = audio_channel_mask_out_to_in(input.config.channel_mask);
@@ -843,7 +872,8 @@
patchRecord->buffer(),
patchRecord->bufferSize(),
outputFlags,
- 0ns /* timeout */);
+ 0ns /* timeout */,
+ frameCountToBeReady);
status = patchTrack->initCheck();
if (status != NO_ERROR) {
ALOGE("Secondary output patchTrack init failed: %d", status);
@@ -1132,16 +1162,16 @@
return mute;
}
-void AudioFlinger::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::setRecordSilenced(audio_port_handle_t portId, bool silenced)
{
- ALOGV("AudioFlinger::setRecordSilenced(uid:%d, silenced:%d)", uid, silenced);
+ ALOGV("AudioFlinger::setRecordSilenced(portId:%d, silenced:%d)", portId, silenced);
AutoMutex lock(mLock);
for (size_t i = 0; i < mRecordThreads.size(); i++) {
- mRecordThreads[i]->setRecordSilenced(uid, silenced);
+ mRecordThreads[i]->setRecordSilenced(portId, silenced);
}
for (size_t i = 0; i < mMmapThreads.size(); i++) {
- mMmapThreads[i]->setRecordSilenced(uid, silenced);
+ mMmapThreads[i]->setRecordSilenced(portId, silenced);
}
}
@@ -1933,7 +1963,8 @@
&output.notificationFrameCount,
callingPid, clientUid, &output.flags,
input.clientInfo.clientTid,
- &lStatus, portId);
+ &lStatus, portId,
+ input.opPackageName);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
// lStatus == BAD_TYPE means FAST flag was rejected: request a new input from
@@ -2273,13 +2304,13 @@
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
- audio_io_handle_t *output,
- audio_config_t *config,
- audio_devices_t devices,
- const String8& address,
- audio_output_flags_t flags)
+ audio_io_handle_t *output,
+ audio_config_t *config,
+ audio_devices_t deviceType,
+ const String8& address,
+ audio_output_flags_t flags)
{
- AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
+ AudioHwDevice *outHwDev = findSuitableHwDev_l(module, deviceType);
if (outHwDev == NULL) {
return 0;
}
@@ -2320,7 +2351,7 @@
status_t status = outHwDev->openOutputStream(
&outputStream,
*output,
- devices,
+ deviceType,
flags,
config,
address.string());
@@ -2331,7 +2362,7 @@
if (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) {
sp<MmapPlaybackThread> thread =
new MmapPlaybackThread(this, *output, outHwDev, outputStream,
- devices, AUDIO_DEVICE_NONE, mSystemReady);
+ deviceType, AUDIO_DEVICE_NONE, mSystemReady);
mMmapThreads.add(*output, thread);
ALOGV("openOutput_l() created mmap playback thread: ID %d thread %p",
*output, thread.get());
@@ -2339,17 +2370,18 @@
} else {
sp<PlaybackThread> thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
+ thread = new OffloadThread(this, outputStream, *output, deviceType, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
|| !isValidPcmSinkChannelMask(config->channel_mask)) {
- thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
+ thread = new DirectOutputThread(
+ this, outputStream, *output, deviceType, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
} else {
- thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
+ thread = new MixerThread(this, outputStream, *output, deviceType, mSystemReady);
ALOGV("openOutput_l() created mixer output: ID %d thread %p",
*output, thread.get());
}
@@ -2365,27 +2397,29 @@
status_t AudioFlinger::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
- ALOGI("openOutput() this %p, module %d Device %#x, SamplingRate %d, Format %#08x, "
+ ALOGI("openOutput() this %p, module %d Device %s, SamplingRate %d, Format %#08x, "
"Channels %#x, flags %#x",
this, module,
- (devices != NULL) ? *devices : 0,
+ device->toString().c_str(),
config->sample_rate,
config->format,
config->channel_mask,
flags);
- if (devices == NULL || *devices == AUDIO_DEVICE_NONE) {
+ audio_devices_t deviceType = device->type();
+ const String8 address = String8(device->address().c_str());
+
+ if (deviceType == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
- sp<ThreadBase> thread = openOutput_l(module, output, config, *devices, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, output, config, deviceType, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 72e669a..65be06d 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -67,11 +67,11 @@
#include <media/AudioBufferProvider.h>
#include <media/AudioMixer.h>
#include <media/ExtendedAudioBufferProvider.h>
-#include <media/LinearMap.h>
#include <media/VolumeShaper.h>
#include <audio_utils/clock.h>
#include <audio_utils/FdToString.h>
+#include <audio_utils/LinearMap.h>
#include <audio_utils/SimpleLog.h>
#include <audio_utils/TimestampVerifier.h>
@@ -162,7 +162,7 @@
virtual status_t setMicMute(bool state);
virtual bool getMicMute() const;
- virtual void setRecordSilenced(uid_t uid, bool silenced);
+ virtual void setRecordSilenced(audio_port_handle_t portId, bool silenced);
virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs);
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
@@ -175,8 +175,7 @@
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags);
@@ -372,7 +371,7 @@
virtual void onFirstRef();
AudioHwDevice* findSuitableHwDev_l(audio_module_handle_t module,
- audio_devices_t devices);
+ audio_devices_t deviceType);
// Set kEnableExtendedChannels to true to enable greater than stereo output
// for the MixerThread and device sink. Number of channels allowed is
@@ -547,6 +546,16 @@
bool mute;
};
+ // Abstraction for the Audio Source for the RecordThread (HAL or PassthruPatchRecord).
+ struct Source
+ {
+ virtual ~Source() = default;
+ // The following methods have the same signatures as in StreamHalInterface.
+ virtual status_t read(void *buffer, size_t bytes, size_t *read) = 0;
+ virtual status_t getCapturePosition(int64_t *frames, int64_t *time) = 0;
+ virtual status_t standby() = 0;
+ };
+
// --- PlaybackThread ---
#ifdef FLOAT_EFFECT_CHAIN
#define EFFECT_BUFFER_FORMAT AUDIO_FORMAT_PCM_FLOAT
@@ -668,11 +677,11 @@
audio_devices_t outputDevice,
const String8& outputDeviceAddress);
sp<ThreadBase> openOutput_l(audio_module_handle_t module,
- audio_io_handle_t *output,
- audio_config_t *config,
- audio_devices_t devices,
- const String8& address,
- audio_output_flags_t flags);
+ audio_io_handle_t *output,
+ audio_config_t *config,
+ audio_devices_t deviceType,
+ const String8& address,
+ audio_output_flags_t flags);
void closeOutputFinish(const sp<PlaybackThread>& thread);
void closeInputFinish(const sp<RecordThread>& thread);
@@ -749,7 +758,7 @@
// For emphasis, we could also make all pointers to them be "const *",
// but that would clutter the code unnecessarily.
- struct AudioStreamIn {
+ struct AudioStreamIn : public Source {
AudioHwDevice* const audioHwDev;
sp<StreamInHalInterface> stream;
audio_input_flags_t flags;
@@ -758,6 +767,13 @@
AudioStreamIn(AudioHwDevice *dev, sp<StreamInHalInterface> in, audio_input_flags_t flags) :
audioHwDev(dev), stream(in), flags(flags) {}
+ status_t read(void *buffer, size_t bytes, size_t *read) override {
+ return stream->read(buffer, bytes, read);
+ }
+ status_t getCapturePosition(int64_t *frames, int64_t *time) override {
+ return stream->getCapturePosition(frames, time);
+ }
+ status_t standby() override { return stream->standby(); }
};
struct TeePatch {
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
index b109d06..dda164c 100644
--- a/services/audioflinger/AudioHwDevice.cpp
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -34,7 +34,7 @@
status_t AudioHwDevice::openOutputStream(
AudioStreamOut **ppStreamOut,
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address)
@@ -50,7 +50,7 @@
config->sample_rate,
config->format,
config->channel_mask);
- status_t status = outputStream->open(handle, devices, config, address);
+ status_t status = outputStream->open(handle, deviceType, config, address);
if (status != NO_ERROR) {
delete outputStream;
@@ -75,7 +75,7 @@
if (wrapperNeeded) {
if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
- status = outputStream->open(handle, devices, &originalConfig, address);
+ status = outputStream->open(handle, deviceType, &originalConfig, address);
if (status != NO_ERROR) {
ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
status);
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index d4299b0..6709d17 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -76,7 +76,7 @@
status_t openOutputStream(
AudioStreamOut **ppStreamOut,
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
audio_output_flags_t flags,
struct audio_config *config,
const char *address);
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
index a60a5f2..d13cb8f 100644
--- a/services/audioflinger/AudioStreamOut.cpp
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -118,7 +118,7 @@
status_t AudioStreamOut::open(
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
struct audio_config *config,
const char *address)
{
@@ -130,7 +130,7 @@
int status = hwDev()->openOutputStream(
handle,
- devices,
+ deviceType,
customFlags,
config,
address,
@@ -152,7 +152,7 @@
status = hwDev()->openOutputStream(
handle,
- devices,
+ deviceType,
customFlags,
&customConfig,
address,
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
index b16b1af..16fbcf2 100644
--- a/services/audioflinger/AudioStreamOut.h
+++ b/services/audioflinger/AudioStreamOut.h
@@ -47,7 +47,7 @@
virtual status_t open(
audio_io_handle_t handle,
- audio_devices_t devices,
+ audio_devices_t deviceType,
struct audio_config *config,
const char *address);
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 13152d0..d54ab42 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1588,7 +1588,7 @@
int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
if (mCblkMemory == 0 ||
- (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
+ (mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
sizeof(effect_param_cblk_t));
mCblkMemory.clear();
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index edb331d..18cb53b 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -483,19 +483,6 @@
// Fast mode is not available in this case.
inputFlags = (audio_input_flags_t) (inputFlags & ~AUDIO_INPUT_FLAG_FAST);
}
- sp<RecordThread::PatchRecord> tempRecordTrack = new (std::nothrow) RecordThread::PatchRecord(
- mRecord.thread().get(),
- sampleRate,
- inChannelMask,
- format,
- frameCount,
- NULL,
- (size_t)0 /* bufferSize */,
- inputFlags);
- status = mRecord.checkTrack(tempRecordTrack.get());
- if (status != NO_ERROR) {
- return status;
- }
audio_output_flags_t outputFlags = mAudioPatch.sinks[0].config_mask & AUDIO_PORT_CONFIG_FLAGS ?
mAudioPatch.sinks[0].flags.output : AUDIO_OUTPUT_FLAG_NONE;
@@ -512,9 +499,34 @@
outputFlags = (audio_output_flags_t) (outputFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
+ sp<RecordThread::PatchRecord> tempRecordTrack;
+ if ((inputFlags & AUDIO_INPUT_FLAG_DIRECT) && (outputFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
+ tempRecordTrack = new RecordThread::PassthruPatchRecord(
+ mRecord.thread().get(),
+ sampleRate,
+ inChannelMask,
+ format,
+ frameCount,
+ inputFlags);
+ } else {
+ tempRecordTrack = new RecordThread::PatchRecord(
+ mRecord.thread().get(),
+ sampleRate,
+ inChannelMask,
+ format,
+ frameCount,
+ nullptr,
+ (size_t)0 /* bufferSize */,
+ inputFlags);
+ }
+ status = mRecord.checkTrack(tempRecordTrack.get());
+ if (status != NO_ERROR) {
+ return status;
+ }
+
// create a special playback track to render to playback thread.
// this track is given the same buffer as the PatchRecord buffer
- sp<PlaybackThread::PatchTrack> tempPatchTrack = new (std::nothrow) PlaybackThread::PatchTrack(
+ sp<PlaybackThread::PatchTrack> tempPatchTrack = new PlaybackThread::PatchTrack(
mPlayback.thread().get(),
streamType,
sampleRate,
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a093893..1ff03c4 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -74,7 +74,10 @@
uid_t uid,
audio_output_flags_t flags,
track_type type,
- audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE,
+ /** default behaviour is to start when there are as many frames
+ * ready as possible (aka. Buffer is full). */
+ size_t frameCountToBeReady = SIZE_MAX);
virtual ~Track();
virtual status_t initCheck() const;
@@ -263,11 +266,11 @@
};
sp<AudioVibrationController> mAudioVibrationController;
sp<os::ExternalVibration> mExternalVibration;
+ /** How many frames should be in the buffer before the track is considered ready */
+ const size_t mFrameCountToBeReady;
private:
void interceptBuffer(const AudioBufferProvider::Buffer& buffer);
- /** Write the source data in the buffer provider. @return written frame count. */
- size_t writeFrames(AudioBufferProvider* dest, const void* src, size_t frameCount);
template <class F>
void forEachTeePatchTrack(F f) {
for (auto& tp : mTeePatches) { f(tp.patchTrack); }
@@ -384,9 +387,15 @@
void *buffer,
size_t bufferSize,
audio_output_flags_t flags,
- const Timeout& timeout = {});
+ const Timeout& timeout = {},
+ size_t frameCountToBeReady = 1 /** Default behaviour is to start
+ * as soon as possible to have
+ * the lowest possible latency
+ * even if it might glitch. */);
virtual ~PatchTrack();
+ size_t framesReady() const override;
+
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
@@ -402,5 +411,4 @@
private:
void restartIfDisabled();
-
}; // end of PatchTrack
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 08660dd..d87239d 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -19,6 +19,40 @@
#error This header file should only be included from AudioFlinger.h
#endif
+// Checks and monitors OP_RECORD_AUDIO
+class OpRecordAudioMonitor : public RefBase {
+public:
+ ~OpRecordAudioMonitor() override;
+ bool hasOpRecordAudio() const;
+
+ static sp<OpRecordAudioMonitor> createIfNeeded
+ (uid_t uid, const audio_attributes_t& attr, const String16& opPackageName);
+
+private:
+ OpRecordAudioMonitor(uid_t uid, const String16& opPackageName);
+ void onFirstRef() override;
+
+ AppOpsManager mAppOpsManager;
+
+ class RecordAudioOpCallback : public BnAppOpsCallback {
+ public:
+ explicit RecordAudioOpCallback(const wp<OpRecordAudioMonitor>& monitor);
+ void opChanged(int32_t op, const String16& packageName) override;
+
+ private:
+ const wp<OpRecordAudioMonitor> mMonitor;
+ };
+
+ sp<RecordAudioOpCallback> mOpCallback;
+ // called by RecordAudioOpCallback when OP_RECORD_AUDIO is updated in AppOp callback
+ // and in onFirstRef()
+ void checkRecordAudio();
+
+ std::atomic_bool mHasOpRecordAudio;
+ const uid_t mUid;
+ const String16 mPackage;
+};
+
// record track
class RecordTrack : public TrackBase {
public:
@@ -36,6 +70,7 @@
uid_t uid,
audio_input_flags_t flags,
track_type type,
+ const String16& opPackageName,
audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE);
virtual ~RecordTrack();
virtual status_t initCheck() const;
@@ -68,7 +103,7 @@
{ return (mFlags & AUDIO_INPUT_FLAG_DIRECT) != 0; }
void setSilenced(bool silenced) { if (!isPatchTrack()) mSilenced = silenced; }
- bool isSilenced() const { return mSilenced; }
+ bool isSilenced() const;
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
@@ -111,6 +146,11 @@
audio_input_flags_t mFlags;
bool mSilenced;
+
+ // used to enforce OP_RECORD_AUDIO
+ uid_t mUid;
+ String16 mOpPackageName;
+ sp<OpRecordAudioMonitor> mOpRecordAudioMonitor;
};
// playback track, used by PatchPanel
@@ -128,6 +168,8 @@
const Timeout& timeout = {});
virtual ~PatchRecord();
+ virtual Source* getSource() { return nullptr; }
+
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
@@ -136,4 +178,71 @@
virtual status_t obtainBuffer(Proxy::Buffer *buffer,
const struct timespec *timeOut = NULL);
virtual void releaseBuffer(Proxy::Buffer *buffer);
+
+ size_t writeFrames(const void* src, size_t frameCount, size_t frameSize) {
+ return writeFrames(this, src, frameCount, frameSize);
+ }
+
+protected:
+ /** Write the source data into the buffer provider. @return written frame count. */
+ static size_t writeFrames(AudioBufferProvider* dest, const void* src,
+ size_t frameCount, size_t frameSize);
+
}; // end of PatchRecord
+
+class PassthruPatchRecord : public PatchRecord, public Source {
+public:
+ PassthruPatchRecord(RecordThread *recordThread,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_format_t format,
+ size_t frameCount,
+ audio_input_flags_t flags);
+
+ Source* getSource() override { return static_cast<Source*>(this); }
+
+ // Source interface
+ status_t read(void *buffer, size_t bytes, size_t *read) override;
+ status_t getCapturePosition(int64_t *frames, int64_t *time) override;
+ status_t standby() override;
+
+ // AudioBufferProvider interface
+ // This interface is used by RecordThread to pass the data obtained
+ // from HAL or other source to the client. PassthruPatchRecord receives
+ // the data in 'obtainBuffer' so these calls are stubbed out.
+ status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) override;
+ void releaseBuffer(AudioBufferProvider::Buffer* buffer) override;
+
+ // PatchProxyBufferProvider interface
+ // This interface is used from DirectOutputThread to acquire data from HAL.
+ bool producesBufferOnDemand() const override { return true; }
+ status_t obtainBuffer(Proxy::Buffer *buffer, const struct timespec *timeOut = nullptr) override;
+ void releaseBuffer(Proxy::Buffer *buffer) override;
+
+private:
+ // This is to use with PatchRecord::writeFrames
+ struct PatchRecordAudioBufferProvider : public AudioBufferProvider {
+ explicit PatchRecordAudioBufferProvider(PassthruPatchRecord& passthru) :
+ mPassthru(passthru) {}
+ status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) override {
+ return mPassthru.PatchRecord::getNextBuffer(buffer);
+ }
+ void releaseBuffer(AudioBufferProvider::Buffer* buffer) override {
+ return mPassthru.PatchRecord::releaseBuffer(buffer);
+ }
+ private:
+ PassthruPatchRecord& mPassthru;
+ };
+
+ sp<StreamInHalInterface> obtainStream(sp<ThreadBase>* thread);
+
+ PatchRecordAudioBufferProvider mPatchRecordAudioBufferProvider;
+ std::unique_ptr<void, decltype(free)*> mSinkBuffer; // frame size aligned continuous buffer
+ std::unique_ptr<void, decltype(free)*> mStubBuffer; // buffer used for AudioBufferProvider
+ size_t mUnconsumedFrames = 0;
+ std::mutex mReadLock;
+ std::condition_variable mReadCV;
+ size_t mReadBytes = 0; // GUARDED_BY(mReadLock)
+ status_t mReadError = NO_ERROR; // GUARDED_BY(mReadLock)
+ int64_t mLastReadFrames = 0; // accessed on RecordThread only
+};
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
index a44ab2a..c7aba79 100644
--- a/services/audioflinger/SpdifStreamOut.cpp
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -59,6 +59,7 @@
// TODO Move this into the audio_utils as a static method.
switch(config->format) {
case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_E_AC3_JOC:
mRateMultiplier = 4;
break;
case AUDIO_FORMAT_AC3:
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 48ba9d4..8704d16 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2079,9 +2079,9 @@
// More than 2 channels does not require stronger alignment than stereo
alignment <<= 1;
}
- if (((uintptr_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
+ if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
ALOGE("Invalid buffer alignment: address %p, channel count %u",
- sharedBuffer->pointer(), channelCount);
+ sharedBuffer->unsecurePointer(), channelCount);
lStatus = BAD_VALUE;
goto Exit;
}
@@ -6679,6 +6679,7 @@
) :
ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
mInput(input),
+ mSource(mInput),
mActiveTracks(&this->mLocalLog),
mRsmpInBuffer(NULL),
// mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
@@ -6756,7 +6757,7 @@
sp<IMemory> pipeMemory;
if ((roHeap == 0) ||
(pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
- (pipeBuffer = pipeMemory->pointer()) == nullptr) {
+ (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
ALOGE("not enough memory for pipe buffer size=%zu; "
"roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
@@ -7131,7 +7132,7 @@
} else {
ATRACE_BEGIN("read");
size_t bytesRead;
- status_t result = mInput->stream->read(
+ status_t result = mSource->read(
(uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
ATRACE_END();
if (result < 0) {
@@ -7153,7 +7154,7 @@
int64_t position, time;
if (mStandby) {
mTimestampVerifier.discontinuity();
- } else if (mInput->stream->getCapturePosition(&position, &time) == NO_ERROR
+ } else if (mSource->getCapturePosition(&position, &time) == NO_ERROR
&& time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
mTimestampVerifier.add(position, time, mSampleRate);
@@ -7302,7 +7303,7 @@
// Sanitize before releasing if the track has no access to the source data
// An idle UID receives silence from non virtual devices until active
if (activeTrack->isSilenced()) {
- memset(activeTrack->mSink.raw, 0, framesOut * mFrameSize);
+ memset(activeTrack->mSink.raw, 0, framesOut * activeTrack->frameSize());
}
activeTrack->releaseBuffer(&activeTrack->mSink);
}
@@ -7434,7 +7435,7 @@
sq->end(false /*didModify*/);
}
}
- status_t result = mInput->stream->standby();
+ status_t result = mSource->standby();
ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
// If going into standby, flush the pipe source.
@@ -7463,7 +7464,8 @@
audio_input_flags_t *flags,
pid_t tid,
status_t *status,
- audio_port_handle_t portId)
+ audio_port_handle_t portId,
+ const String16& opPackageName)
{
size_t frameCount = *pFrameCount;
size_t notificationFrameCount = *pNotificationFrameCount;
@@ -7597,7 +7599,7 @@
track = new RecordTrack(this, client, attr, sampleRate,
format, channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid, uid,
- *flags, TrackBase::TYPE_DEFAULT, portId);
+ *flags, TrackBase::TYPE_DEFAULT, opPackageName, portId);
lStatus = track->initCheck();
if (lStatus != NO_ERROR) {
@@ -7933,12 +7935,12 @@
write(fd, result.string(), result.size());
}
-void AudioFlinger::RecordThread::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::RecordThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
{
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mTracks.size() ; i++) {
sp<RecordTrack> track = mTracks[i];
- if (track != 0 && track->uid() == uid) {
+ if (track != 0 && track->portId() == portId) {
track->setSilenced(silenced);
}
}
@@ -8419,11 +8421,17 @@
{
Mutex::Autolock _l(mLock);
mTracks.add(record);
+ if (record->getSource()) {
+ mSource = record->getSource();
+ }
}
void AudioFlinger::RecordThread::deletePatchTrack(const sp<PatchRecord>& record)
{
Mutex::Autolock _l(mLock);
+ if (mSource == record->getSource()) {
+ mSource = mInput;
+ }
destroyTrack_l(record);
}
@@ -9476,11 +9484,11 @@
mInput->stream->updateSinkMetadata(metadata);
}
-void AudioFlinger::MmapCaptureThread::setRecordSilenced(uid_t uid, bool silenced)
+void AudioFlinger::MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
{
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mActiveTracks.size() ; i++) {
- if (mActiveTracks[i]->uid() == uid) {
+ if (mActiveTracks[i]->portId() == portId) {
mActiveTracks[i]->setSilenced_l(silenced);
broadcast_l();
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index fc8aa13..34a3f34 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1553,7 +1553,8 @@
audio_input_flags_t *flags,
pid_t tid,
status_t *status /*non-NULL*/,
- audio_port_handle_t portId);
+ audio_port_handle_t portId,
+ const String16& opPackageName);
status_t start(RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
@@ -1615,7 +1616,7 @@
void checkBtNrec();
// Sets the UID records silence
- void setRecordSilenced(uid_t uid, bool silenced);
+ void setRecordSilenced(audio_port_handle_t portId, bool silenced);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
@@ -1646,6 +1647,7 @@
void checkBtNrec_l();
AudioStreamIn *mInput;
+ Source *mSource;
SortedVector < sp<RecordTrack> > mTracks;
// mActiveTracks has dual roles: it indicates the current active track(s), and
// is used together with mStartStopCond to indicate start()/stop() progress
@@ -1784,7 +1786,8 @@
virtual void invalidateTracks(audio_stream_type_t streamType __unused) {}
// Sets the UID records silence
- virtual void setRecordSilenced(uid_t uid __unused, bool silenced __unused) {}
+ virtual void setRecordSilenced(audio_port_handle_t portId __unused,
+ bool silenced __unused) {}
protected:
void dumpInternals_l(int fd, const Vector<String16>& args) override;
@@ -1871,7 +1874,8 @@
void updateMetadata_l() override;
void processVolume_l() override;
- void setRecordSilenced(uid_t uid, bool silenced) override;
+ void setRecordSilenced(audio_port_handle_t portId,
+ bool silenced) override;
virtual void toAudioPortConfig(struct audio_port_config *config);
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 8f720b5..051f1e3 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -205,6 +205,16 @@
protected:
DISALLOW_COPY_AND_ASSIGN(TrackBase);
+ void releaseCblk() {
+ if (mCblk != nullptr) {
+ mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
+ if (mClient == 0) {
+ free(mCblk);
+ }
+ mCblk = nullptr;
+ }
+ }
+
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
@@ -215,6 +225,8 @@
uint32_t channelCount() const { return mChannelCount; }
+ size_t frameSize() const { return mFrameSize; }
+
audio_channel_mask_t channelMask() const { return mChannelMask; }
virtual uint32_t sampleRate() const { return mSampleRate; }
@@ -325,6 +337,7 @@
virtual ~PatchProxyBufferProvider() {}
+ virtual bool producesBufferOnDemand() const = 0;
virtual status_t obtainBuffer(Proxy::Buffer* buffer,
const struct timespec *requested = NULL) = 0;
virtual void releaseBuffer(Proxy::Buffer* buffer) = 0;
@@ -347,6 +360,8 @@
mPeerProxy = nullptr;
}
+ bool producesBufferOnDemand() const override { return false; }
+
protected:
const sp<ClientProxy> mProxy;
sp<RefBase> mPeerReferenceHold; // keeps mPeerProxy alive during access.
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 78db80c..23c2209 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -18,12 +18,14 @@
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
+#define ATRACE_TAG ATRACE_TAG_AUDIO
#include "Configuration.h"
#include <linux/futex.h>
#include <math.h>
#include <sys/syscall.h>
#include <utils/Log.h>
+#include <utils/Trace.h>
#include <private/media/AudioTrackShared.h>
@@ -148,7 +150,7 @@
if (client != 0) {
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory == 0 ||
- (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
+ (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
client->heap()->dump("AudioTrack");
mCblkMemory.clear();
@@ -170,7 +172,7 @@
const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
if (roHeap == 0 ||
(mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
- (mBuffer = mBufferMemory->pointer()) == NULL) {
+ (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
__func__, mId, bufferSize);
if (roHeap != 0) {
@@ -185,7 +187,7 @@
case ALLOC_PIPE:
mBufferMemory = thread->pipeMemory();
// mBuffer is the virtual address as seen from current process (mediaserver),
- // and should normally be coming from mBufferMemory->pointer().
+ // and should normally be coming from mBufferMemory->unsecurePointer().
// However in this case the TrackBase does not reference the buffer directly.
// It should references the buffer via the pipe.
// Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
@@ -237,12 +239,7 @@
{
// delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
mServerProxy.clear();
- if (mCblk != NULL) {
- mCblk->~audio_track_cblk_t(); // destroy our shared-structure.
- if (mClient == 0) {
- free(mCblk);
- }
- }
+ releaseCblk();
mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
if (mClient != 0) {
// Client destructor must run with AudioFlinger client mutex locked
@@ -442,7 +439,7 @@
return mHasOpPlayAudio.load();
}
-// Note this method is never called (and never to be) for audio server / root track
+// Note this method is never called (and never to be) for audio server / patch record track
// - not called from constructor due to check on UID,
// - not called from PlayAudioOpCallback because the callback is not installed in this case
void AudioFlinger::PlaybackThread::OpPlayAudioMonitor::checkPlayAudioForUsage()
@@ -511,9 +508,14 @@
uid_t uid,
audio_output_flags_t flags,
track_type type,
- audio_port_handle_t portId)
+ audio_port_handle_t portId,
+ size_t frameCountToBeReady)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
- (sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, creatorPid, uid, true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
@@ -530,6 +532,7 @@
mVolumeHandler(new media::VolumeHandler(sampleRate)),
mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(uid, attr, id(), streamType)),
// mSinkTimestamp
+ mFrameCountToBeReady(frameCountToBeReady),
mFastIndex(-1),
mCachedVolume(1.0),
/* The track might not play immediately after being active, similarly as if its volume was 0.
@@ -543,25 +546,27 @@
ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
- __func__, mId, sharedBuffer->pointer(), sharedBuffer->size());
+ __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
if (mCblk == NULL) {
return;
}
+ if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
+ ALOGE("%s(%d): no more tracks available", __func__, mId);
+ releaseCblk(); // this makes the track invalid.
+ return;
+ }
+
if (sharedBuffer == 0) {
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, !isExternalTrack(), sampleRate);
} else {
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
- mFrameSize);
+ mFrameSize, sampleRate);
}
mServerProxy = mAudioTrackServerProxy;
- if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
- ALOGE("%s(%d): no more tracks available", __func__, mId);
- return;
- }
// only allocate a fast track index if we were able to allocate a normal track name
if (flags & AUDIO_OUTPUT_FLAG_FAST) {
// FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
@@ -820,16 +825,9 @@
}
for (auto& teePatch : mTeePatches) {
RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
-
- size_t framesWritten = writeFrames(patchRecord, sourceBuffer.i8, frameCount);
- // On buffer wrap, the buffer frame count will be less than requested,
- // when this happens a second buffer needs to be used to write the leftover audio
- size_t framesLeft = frameCount - framesWritten;
- if (framesWritten != 0 && framesLeft != 0) {
- framesWritten +=
- writeFrames(patchRecord, sourceBuffer.i8 + framesWritten * mFrameSize, framesLeft);
- framesLeft = frameCount - framesWritten;
- }
+ const size_t framesWritten = patchRecord->writeFrames(
+ sourceBuffer.i8, frameCount, mFrameSize);
+ const size_t framesLeft = frameCount - framesWritten;
ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
"buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->mId,
framesWritten, frameCount, framesLeft);
@@ -837,30 +835,10 @@
auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
using namespace std::chrono_literals;
// Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
- ALOGD_IF(spent > 200us, "%s: took %lldus to intercept %zu tracks", __func__,
+ ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
spent.count(), mTeePatches.size());
}
-size_t AudioFlinger::PlaybackThread::Track::writeFrames(AudioBufferProvider* dest,
- const void* src,
- size_t frameCount) {
- AudioBufferProvider::Buffer patchBuffer;
- patchBuffer.frameCount = frameCount;
- auto status = dest->getNextBuffer(&patchBuffer);
- if (status != NO_ERROR) {
- ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
- __func__, status, strerror(-status));
- return 0;
- }
- ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
- memcpy(patchBuffer.raw, src, patchBuffer.frameCount * mFrameSize);
- auto framesWritten = patchBuffer.frameCount;
- dest->releaseBuffer(&patchBuffer);
- return framesWritten;
-}
-
-// releaseBuffer() is not overridden
-
// ExtendedAudioBufferProvider interface
// framesReady() may return an approximation of the number of frames if called
@@ -910,8 +888,12 @@
return true;
}
- if (framesReady() >= mServerProxy->getBufferSizeInFrames() ||
- (mCblk->mFlags & CBLK_FORCEREADY)) {
+ size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
+ size_t framesToBeReady = std::min(mFrameCountToBeReady, bufferSizeInFrames);
+
+ if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
+ ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
+ __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
mFillingUpStatus = FS_FILLED;
android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
return true;
@@ -1413,6 +1395,7 @@
void AudioFlinger::PlaybackThread::Track::disable()
{
+ // TODO(b/142394888): the filling status should also be reset to filling
signalClientFlag(CBLK_DISABLED);
}
@@ -1790,12 +1773,14 @@
void *buffer,
size_t bufferSize,
audio_output_flags_t flags,
- const Timeout& timeout)
+ const Timeout& timeout,
+ size_t frameCountToBeReady)
: Track(playbackThread, NULL, streamType,
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, nullptr /* sharedBuffer */,
- AUDIO_SESSION_NONE, getpid(), AID_AUDIOSERVER, flags, TYPE_PATCH),
+ AUDIO_SESSION_NONE, getpid(), AID_AUDIOSERVER, flags, TYPE_PATCH,
+ AUDIO_PORT_HANDLE_NONE, frameCountToBeReady),
PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true),
*playbackThread, timeout)
{
@@ -1810,6 +1795,15 @@
ALOGV("%s(%d)", __func__, mId);
}
+size_t AudioFlinger::PlaybackThread::PatchTrack::framesReady() const
+{
+ if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
+ return std::numeric_limits<size_t>::max();
+ } else {
+ return Track::framesReady();
+ }
+}
+
status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
audio_session_t triggerSession)
{
@@ -1828,9 +1822,19 @@
ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
Proxy::Buffer buf;
buf.mFrameCount = buffer->frameCount;
+ if (ATRACE_ENABLED()) {
+ std::string traceName("PTnReq");
+ traceName += std::to_string(id());
+ ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+ }
status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
buffer->frameCount = buf.mFrameCount;
+ if (ATRACE_ENABLED()) {
+ std::string traceName("PTnObt");
+ traceName += std::to_string(id());
+ ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+ }
if (buf.mFrameCount == 0) {
return WOULD_BLOCK;
}
@@ -1869,7 +1873,6 @@
{
mProxy->releaseBuffer(buffer);
restartIfDisabled();
- android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
}
void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
@@ -1883,6 +1886,112 @@
// ----------------------------------------------------------------------------
// Record
// ----------------------------------------------------------------------------
+
+
+// ----------------------------------------------------------------------------
+// AppOp for audio recording
+// -------------------------------
+
+#undef LOG_TAG
+#define LOG_TAG "AF::OpRecordAudioMonitor"
+
+// static
+sp<AudioFlinger::RecordThread::OpRecordAudioMonitor>
+AudioFlinger::RecordThread::OpRecordAudioMonitor::createIfNeeded(
+ uid_t uid, const audio_attributes_t& attr, const String16& opPackageName)
+{
+ if (isServiceUid(uid)) {
+ ALOGV("not silencing record for service uid:%d pack:%s",
+ uid, String8(opPackageName).string());
+ return nullptr;
+ }
+
+ // Capturing from FM TUNER output is not controlled by OP_RECORD_AUDIO
+ // because it does not affect users privacy as does capturing from an actual microphone.
+ if (attr.source == AUDIO_SOURCE_FM_TUNER) {
+ ALOGV("not muting FM TUNER capture for uid %d", uid);
+ return nullptr;
+ }
+
+ if (opPackageName.size() == 0) {
+ Vector<String16> packages;
+ // no package name, happens with SL ES clients
+ // query package manager to find one
+ PermissionController permissionController;
+ permissionController.getPackagesForUid(uid, packages);
+ if (packages.isEmpty()) {
+ return nullptr;
+ } else {
+ ALOGV("using pack:%s for uid:%d", String8(packages[0]).string(), uid);
+ return new OpRecordAudioMonitor(uid, packages[0]);
+ }
+ }
+
+ return new OpRecordAudioMonitor(uid, opPackageName);
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::OpRecordAudioMonitor(
+ uid_t uid, const String16& opPackageName)
+ : mHasOpRecordAudio(true), mUid(uid), mPackage(opPackageName)
+{
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::~OpRecordAudioMonitor()
+{
+ if (mOpCallback != 0) {
+ mAppOpsManager.stopWatchingMode(mOpCallback);
+ }
+ mOpCallback.clear();
+}
+
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::onFirstRef()
+{
+ checkRecordAudio();
+ mOpCallback = new RecordAudioOpCallback(this);
+ ALOGV("start watching OP_RECORD_AUDIO for pack:%s", String8(mPackage).string());
+ mAppOpsManager.startWatchingMode(AppOpsManager::OP_RECORD_AUDIO, mPackage, mOpCallback);
+}
+
+bool AudioFlinger::RecordThread::OpRecordAudioMonitor::hasOpRecordAudio() const {
+ return mHasOpRecordAudio.load();
+}
+
+// Called by RecordAudioOpCallback when OP_RECORD_AUDIO is updated in AppOp callback
+// and in onFirstRef()
+// Note this method is never called (and never to be) for audio server / root track
+// due to the UID in createIfNeeded(). As a result for those record track, it's:
+// - not called from constructor,
+// - not called from RecordAudioOpCallback because the callback is not installed in this case
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::checkRecordAudio()
+{
+ const int32_t mode = mAppOpsManager.checkOp(AppOpsManager::OP_RECORD_AUDIO,
+ mUid, mPackage);
+ const bool hasIt = (mode == AppOpsManager::MODE_ALLOWED);
+ // verbose logging only log when appOp changed
+ ALOGI_IF(hasIt != mHasOpRecordAudio.load(),
+ "OP_RECORD_AUDIO missing, %ssilencing record uid%d pack:%s",
+ hasIt ? "un" : "", mUid, String8(mPackage).string());
+ mHasOpRecordAudio.store(hasIt);
+}
+
+AudioFlinger::RecordThread::OpRecordAudioMonitor::RecordAudioOpCallback::RecordAudioOpCallback(
+ const wp<OpRecordAudioMonitor>& monitor) : mMonitor(monitor)
+{ }
+
+void AudioFlinger::RecordThread::OpRecordAudioMonitor::RecordAudioOpCallback::opChanged(int32_t op,
+ const String16& packageName) {
+ UNUSED(packageName);
+ if (op != AppOpsManager::OP_RECORD_AUDIO) {
+ return;
+ }
+ sp<OpRecordAudioMonitor> monitor = mMonitor.promote();
+ if (monitor != NULL) {
+ monitor->checkRecordAudio();
+ }
+}
+
+
+
#undef LOG_TAG
#define LOG_TAG "AF::RecordHandle"
@@ -1954,6 +2063,7 @@
uid_t uid,
audio_input_flags_t flags,
track_type type,
+ const String16& opPackageName,
audio_port_handle_t portId)
: TrackBase(thread, client, attr, sampleRate, format,
channelMask, frameCount, buffer, bufferSize, sessionId,
@@ -1967,7 +2077,8 @@
mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
mRecordBufferConverter(NULL),
mFlags(flags),
- mSilenced(false)
+ mSilenced(false),
+ mOpRecordAudioMonitor(OpRecordAudioMonitor::createIfNeeded(uid, attr, opPackageName))
{
if (mCblk == NULL) {
return;
@@ -2218,6 +2329,14 @@
mServerLatencyMs.store(latencyMs);
}
+bool AudioFlinger::RecordThread::RecordTrack::isSilenced() const {
+ if (mSilenced) {
+ return true;
+ }
+ // The monitor is only created for record tracks that can be silenced.
+ return mOpRecordAudioMonitor ? !mOpRecordAudioMonitor->hasOpRecordAudio() : false;
+}
+
status_t AudioFlinger::RecordThread::RecordTrack::getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones)
{
@@ -2268,7 +2387,7 @@
audio_attributes_t{} /* currently unused for patch track */,
sampleRate, format, channelMask, frameCount,
buffer, bufferSize, AUDIO_SESSION_NONE, getpid(), AID_AUDIOSERVER,
- flags, TYPE_PATCH),
+ flags, TYPE_PATCH, String16()),
PatchTrackBase(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true),
*recordThread, timeout)
{
@@ -2283,6 +2402,39 @@
ALOGV("%s(%d)", __func__, mId);
}
+static size_t writeFramesHelper(
+ AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
+{
+ AudioBufferProvider::Buffer patchBuffer;
+ patchBuffer.frameCount = frameCount;
+ auto status = dest->getNextBuffer(&patchBuffer);
+ if (status != NO_ERROR) {
+ ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
+ __func__, status, strerror(-status));
+ return 0;
+ }
+ ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
+ memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
+ size_t framesWritten = patchBuffer.frameCount;
+ dest->releaseBuffer(&patchBuffer);
+ return framesWritten;
+}
+
+// static
+size_t AudioFlinger::RecordThread::PatchRecord::writeFrames(
+ AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
+{
+ size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
+ // On buffer wrap, the buffer frame count will be less than requested,
+ // when this happens a second buffer needs to be used to write the leftover audio
+ const size_t framesLeft = frameCount - framesWritten;
+ if (framesWritten != 0 && framesLeft != 0) {
+ framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
+ framesLeft, frameSize);
+ }
+ return framesWritten;
+}
+
// AudioBufferProvider interface
status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
AudioBufferProvider::Buffer* buffer)
@@ -2294,6 +2446,11 @@
ALOGV_IF(status != NO_ERROR,
"%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
buffer->frameCount = buf.mFrameCount;
+ if (ATRACE_ENABLED()) {
+ std::string traceName("PRnObt");
+ traceName += std::to_string(id());
+ ATRACE_INT(traceName.c_str(), buf.mFrameCount);
+ }
if (buf.mFrameCount == 0) {
return WOULD_BLOCK;
}
@@ -2322,6 +2479,180 @@
mProxy->releaseBuffer(buffer);
}
+#undef LOG_TAG
+#define LOG_TAG "AF::PthrPatchRecord"
+
+static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
+{
+ void *ptr = nullptr;
+ (void)posix_memalign(&ptr, alignment, size);
+ return std::unique_ptr<void, decltype(free)*>(ptr, free);
+}
+
+AudioFlinger::RecordThread::PassthruPatchRecord::PassthruPatchRecord(
+ RecordThread *recordThread,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_format_t format,
+ size_t frameCount,
+ audio_input_flags_t flags)
+ : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
+ nullptr /*buffer*/, 0 /*bufferSize*/, flags),
+ mPatchRecordAudioBufferProvider(*this),
+ mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
+ mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
+{
+ memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
+}
+
+sp<StreamInHalInterface> AudioFlinger::RecordThread::PassthruPatchRecord::obtainStream(
+ sp<ThreadBase>* thread)
+{
+ *thread = mThread.promote();
+ if (!*thread) return nullptr;
+ RecordThread *recordThread = static_cast<RecordThread*>((*thread).get());
+ Mutex::Autolock _l(recordThread->mLock);
+ return recordThread->mInput ? recordThread->mInput->stream : nullptr;
+}
+
+// PatchProxyBufferProvider methods are called on DirectOutputThread
+status_t AudioFlinger::RecordThread::PassthruPatchRecord::obtainBuffer(
+ Proxy::Buffer* buffer, const struct timespec* timeOut)
+{
+ if (mUnconsumedFrames) {
+ buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
+ // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
+ return PatchRecord::obtainBuffer(buffer, timeOut);
+ }
+
+ // Otherwise, execute a read from HAL and write into the buffer.
+ nsecs_t startTimeNs = 0;
+ if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
+ // Will need to correct timeOut by elapsed time.
+ startTimeNs = systemTime();
+ }
+ const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
+ buffer->mFrameCount = 0;
+ buffer->mRaw = nullptr;
+ sp<ThreadBase> thread;
+ sp<StreamInHalInterface> stream = obtainStream(&thread);
+ if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
+
+ status_t result = NO_ERROR;
+ size_t bytesRead = 0;
+ {
+ ATRACE_NAME("read");
+ result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
+ if (result != NO_ERROR) goto stream_error;
+ if (bytesRead == 0) return NO_ERROR;
+ }
+
+ {
+ std::lock_guard<std::mutex> lock(mReadLock);
+ mReadBytes += bytesRead;
+ mReadError = NO_ERROR;
+ }
+ mReadCV.notify_one();
+ // writeFrames handles wraparound and should write all the provided frames.
+ // If it couldn't, there is something wrong with the client/server buffer of the software patch.
+ buffer->mFrameCount = writeFrames(
+ &mPatchRecordAudioBufferProvider,
+ mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
+ ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
+ "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
+ mUnconsumedFrames = buffer->mFrameCount;
+ struct timespec newTimeOut;
+ if (startTimeNs) {
+ // Correct the timeout by elapsed time.
+ nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
+ if (newTimeOutNs < 0) newTimeOutNs = 0;
+ newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
+ newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
+ timeOut = &newTimeOut;
+ }
+ return PatchRecord::obtainBuffer(buffer, timeOut);
+
+stream_error:
+ stream->standby();
+ {
+ std::lock_guard<std::mutex> lock(mReadLock);
+ mReadError = result;
+ }
+ mReadCV.notify_one();
+ return result;
+}
+
+void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
+{
+ if (buffer->mFrameCount <= mUnconsumedFrames) {
+ mUnconsumedFrames -= buffer->mFrameCount;
+ } else {
+ ALOGW("Write side has consumed more frames than we had: %zu > %zu",
+ buffer->mFrameCount, mUnconsumedFrames);
+ mUnconsumedFrames = 0;
+ }
+ PatchRecord::releaseBuffer(buffer);
+}
+
+// AudioBufferProvider and Source methods are called on RecordThread
+// 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
+// and 'releaseBuffer' are stubbed out and ignore their input.
+// It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
+// until we copy it.
+status_t AudioFlinger::RecordThread::PassthruPatchRecord::read(
+ void* buffer, size_t bytes, size_t* read)
+{
+ bytes = std::min(bytes, mFrameCount * mFrameSize);
+ {
+ std::unique_lock<std::mutex> lock(mReadLock);
+ mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
+ if (mReadError != NO_ERROR) {
+ mLastReadFrames = 0;
+ return mReadError;
+ }
+ *read = std::min(bytes, mReadBytes);
+ mReadBytes -= *read;
+ }
+ mLastReadFrames = *read / mFrameSize;
+ memset(buffer, 0, *read);
+ return 0;
+}
+
+status_t AudioFlinger::RecordThread::PassthruPatchRecord::getCapturePosition(
+ int64_t* frames, int64_t* time)
+{
+ sp<ThreadBase> thread;
+ sp<StreamInHalInterface> stream = obtainStream(&thread);
+ return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
+}
+
+status_t AudioFlinger::RecordThread::PassthruPatchRecord::standby()
+{
+ // RecordThread issues 'standby' command in two major cases:
+ // 1. Error on read--this case is handled in 'obtainBuffer'.
+ // 2. Track is stopping--as PassthruPatchRecord assumes continuous
+ // output, this can only happen when the software patch
+ // is being torn down. In this case, the RecordThread
+ // will terminate and close the HAL stream.
+ return 0;
+}
+
+// As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
+status_t AudioFlinger::RecordThread::PassthruPatchRecord::getNextBuffer(
+ AudioBufferProvider::Buffer* buffer)
+{
+ buffer->frameCount = mLastReadFrames;
+ buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
+ return NO_ERROR;
+}
+
+void AudioFlinger::RecordThread::PassthruPatchRecord::releaseBuffer(
+ AudioBufferProvider::Buffer* buffer)
+{
+ buffer->frameCount = 0;
+ buffer->raw = nullptr;
+}
+
// ----------------------------------------------------------------------------
#undef LOG_TAG
#define LOG_TAG "AF::MmapTrack"
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 30f29d6..0d3e614 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -19,6 +19,7 @@
#include <media/AudioSystem.h>
#include <media/AudioPolicy.h>
+#include <media/DeviceDescriptorBase.h>
#include <utils/String8.h>
namespace android {
@@ -258,7 +259,7 @@
virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
std::vector<audio_format_t> *formats) = 0;
- virtual void setAppState(uid_t uid, app_state_t state) = 0;
+ virtual void setAppState(audio_port_handle_t portId, app_state_t state) = 0;
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
@@ -296,8 +297,7 @@
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags) = 0;
// creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
diff --git a/services/audiopolicy/TEST_MAPPING b/services/audiopolicy/TEST_MAPPING
index a94fd87..eb6c19e 100644
--- a/services/audiopolicy/TEST_MAPPING
+++ b/services/audiopolicy/TEST_MAPPING
@@ -2,9 +2,6 @@
"presubmit": [
{
"name": "audiopolicy_tests"
- },
- {
- "name": "systemaudio_tests"
}
]
}
diff --git a/services/audiopolicy/common/Android.bp b/services/audiopolicy/common/Android.bp
index a925b9a..6e0d2f6 100644
--- a/services/audiopolicy/common/Android.bp
+++ b/services/audiopolicy/common/Android.bp
@@ -1,4 +1,7 @@
cc_library_headers {
name: "libaudiopolicycommon",
+ header_libs: [
+ "libaudiofoundation_headers",
+ ],
export_include_dirs: ["include"],
}
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 1dbd1eb..7c8ce83 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -17,10 +17,13 @@
#pragma once
#include <media/AudioCommonTypes.h>
+#include <media/AudioContainers.h>
#include <system/audio.h>
#include <utils/Log.h>
#include <math.h>
+#include "policy.h"
+
namespace android {
/**
@@ -82,43 +85,26 @@
*
* @return subset of device required to limit the number of volume category per device
*/
- static audio_devices_t getDeviceForVolume(audio_devices_t device)
+ static audio_devices_t getDeviceForVolume(const android::DeviceTypeSet& deviceTypes)
{
- if (device == AUDIO_DEVICE_NONE) {
+ if (deviceTypes.empty()) {
// this happens when forcing a route update and no track is active on an output.
// In this case the returned category is not important.
- device = AUDIO_DEVICE_OUT_SPEAKER;
- } else if (popcount(device) > 1) {
- // Multiple device selection is either:
- // - speaker + one other device: give priority to speaker in this case.
- // - one A2DP device + another device: happens with duplicated output. In this case
- // retain the device on the A2DP output as the other must not correspond to an active
- // selection if not the speaker.
- // - HDMI-CEC system audio mode only output: give priority to available item in order.
- if (device & AUDIO_DEVICE_OUT_SPEAKER) {
- device = AUDIO_DEVICE_OUT_SPEAKER;
- } else if (device & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
- device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
- } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
- device = AUDIO_DEVICE_OUT_HDMI_ARC;
- } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
- device = AUDIO_DEVICE_OUT_AUX_LINE;
- } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
- device = AUDIO_DEVICE_OUT_SPDIF;
- } else {
- device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
- }
+ return AUDIO_DEVICE_OUT_SPEAKER;
}
+ audio_devices_t deviceType = apm_extract_one_audio_device(deviceTypes);
+
/*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
- if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
- device = AUDIO_DEVICE_OUT_SPEAKER;
+ if (deviceType == AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+ deviceType = AUDIO_DEVICE_OUT_SPEAKER;
+ }
- ALOGW_IF(popcount(device) != 1,
- "getDeviceForVolume() invalid device combination: %08x",
- device);
+ ALOGW_IF(deviceType == AUDIO_DEVICE_NONE,
+ "getDeviceForVolume() invalid device combination: %s, returning AUDIO_DEVICE_NONE",
+ android::dumpDeviceTypes(deviceTypes).c_str());
- return device;
+ return deviceType;
}
/**
@@ -128,9 +114,9 @@
*
* @return device category.
*/
- static device_category getDeviceCategory(audio_devices_t device)
+ static device_category getDeviceCategory(const android::DeviceTypeSet& deviceTypes)
{
- switch(getDeviceForVolume(device)) {
+ switch(getDeviceForVolume(deviceTypes)) {
case AUDIO_DEVICE_OUT_EARPIECE:
return DEVICE_CATEGORY_EARPIECE;
case AUDIO_DEVICE_OUT_WIRED_HEADSET:
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index 605fc1c..0537365 100644
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -19,6 +19,8 @@
#include <system/audio.h>
#include <vector>
+#include <media/AudioContainers.h>
+
namespace android {
using StreamTypeVector = std::vector<audio_stream_type_t>;
@@ -43,14 +45,6 @@
#define MAX_MIXER_CHANNEL_COUNT FCC_8
/**
- * A device mask for all audio input and output devices where matching inputs/outputs on device
- * type alone is not enough: the address must match too
- */
-#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX|AUDIO_DEVICE_OUT_BUS)
-
-#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
-
-/**
* Alias to AUDIO_DEVICE_OUT_DEFAULT defined for clarification when this value is used by volume
* control APIs (e.g setStreamVolumeIndex().
*/
@@ -71,6 +65,34 @@
}
/**
+ * Check whether the output device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static inline bool apm_audio_out_device_distinguishes_on_address(audio_devices_t device)
+{
+ return device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ||
+ device == AUDIO_DEVICE_OUT_BUS;
+}
+
+/**
+ * Check whether the input device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static inline bool apm_audio_in_device_distinguishes_on_address(audio_devices_t device)
+{
+ return device == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
+ device == AUDIO_DEVICE_IN_BUS;
+}
+
+/**
* Check whether the device type is one
* where addresses are used to distinguish between one connected device and another
*
@@ -80,10 +102,8 @@
*/
static inline bool device_distinguishes_on_address(audio_devices_t device)
{
- return (((device & AUDIO_DEVICE_BIT_IN) != 0) &&
- ((~AUDIO_DEVICE_BIT_IN & device & APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL) != 0)) ||
- (((device & AUDIO_DEVICE_BIT_IN) == 0) &&
- ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
+ return apm_audio_in_device_distinguishes_on_address(device) ||
+ apm_audio_out_device_distinguishes_on_address(device);
}
/**
@@ -95,10 +115,7 @@
*/
static inline bool device_has_encoding_capability(audio_devices_t device)
{
- if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
- return true;
- }
- return false;
+ return audio_is_a2dp_out_device(device);
}
/**
@@ -184,3 +201,43 @@
{
return hasStream(streams, AUDIO_STREAM_VOICE_CALL);
}
+
+/**
+ * @brief extract one device relevant from multiple device selection
+ * @param deviceTypes collection of audio device type
+ * @return the device type that is selected
+ */
+static inline audio_devices_t apm_extract_one_audio_device(
+ const android::DeviceTypeSet& deviceTypes) {
+ if (deviceTypes.empty()) {
+ return AUDIO_DEVICE_NONE;
+ } else if (deviceTypes.size() == 1) {
+ return *(deviceTypes.begin());
+ } else {
+ // Multiple device selection is either:
+ // - speaker + one other device: give priority to speaker in this case.
+ // - one A2DP device + another device: happens with duplicated output. In this case
+ // retain the device on the A2DP output as the other must not correspond to an active
+ // selection if not the speaker.
+ // - HDMI-CEC system audio mode only output: give priority to available item in order.
+ if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) != 0) {
+ return AUDIO_DEVICE_OUT_SPEAKER;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER_SAFE) != 0) {
+ return AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_HDMI_ARC) != 0) {
+ return AUDIO_DEVICE_OUT_HDMI_ARC;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_AUX_LINE) != 0) {
+ return AUDIO_DEVICE_OUT_AUX_LINE;
+ } else if (deviceTypes.count(AUDIO_DEVICE_OUT_SPDIF) != 0) {
+ return AUDIO_DEVICE_OUT_SPDIF;
+ } else {
+ std::vector<audio_devices_t> a2dpDevices = android::Intersection(
+ deviceTypes, android::getAudioDeviceOutAllA2dpSet());
+ if (a2dpDevices.empty() || a2dpDevices.size() > 1) {
+ ALOGW("%s invalid device combination: %s",
+ __func__, android::dumpDeviceTypes(deviceTypes).c_str());
+ }
+ return a2dpDevices.empty() ? AUDIO_DEVICE_NONE : a2dpDevices[0];
+ }
+ }
+}
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index ebfba83..fad3c5b 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -7,14 +7,14 @@
"src/AudioOutputDescriptor.cpp",
"src/AudioPatch.cpp",
"src/AudioPolicyMix.cpp",
- "src/AudioPort.cpp",
- "src/AudioProfile.cpp",
+ "src/AudioProfileVectorHelper.cpp",
"src/AudioRoute.cpp",
"src/ClientDescriptor.cpp",
"src/DeviceDescriptor.cpp",
"src/EffectDescriptor.cpp",
"src/HwModule.cpp",
"src/IOProfile.cpp",
+ "src/PolicyAudioPort.cpp",
"src/Serializer.cpp",
"src/SoundTriggerSession.cpp",
"src/TypeConverter.cpp",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
index a948ea9..b692592 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioCollections.h
@@ -25,20 +25,15 @@
namespace android {
-class AudioPort;
+class PolicyAudioPort;
class AudioRoute;
-class AudioPortVector : public Vector<sp<AudioPort> >
-{
-public:
- sp<AudioPort> findByTagName(const String8 &tagName) const;
-};
+using PolicyAudioPortVector = Vector<sp<PolicyAudioPort>>;
+using AudioRouteVector = Vector<sp<AudioRoute>>;
+sp<PolicyAudioPort> findByTagName(const PolicyAudioPortVector& policyAudioPortVector,
+ const std::string &tagName);
-class AudioRouteVector : public Vector<sp<AudioRoute> >
-{
-public:
- void dump(String8 *dst, int spaces) const;
-};
+void dumpAudioRouteVector(const AudioRouteVector& audioRouteVector, String8 *dst, int spaces);
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 37f9d14..c67a006 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -21,11 +21,11 @@
#include <utils/SortedVector.h>
#include <utils/KeyedVector.h>
#include "AudioIODescriptorInterface.h"
-#include "AudioPort.h"
#include "ClientDescriptor.h"
#include "DeviceDescriptor.h"
#include "EffectDescriptor.h"
#include "IOProfile.h"
+#include "PolicyAudioPort.h"
namespace android {
@@ -34,13 +34,17 @@
// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
// and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
- , public ClientMapHandler<RecordClientDescriptor>
+class AudioInputDescriptor: public AudioPortConfig,
+ public PolicyAudioPortConfig,
+ public AudioIODescriptorInterface,
+ public ClientMapHandler<RecordClientDescriptor>
{
public:
- explicit AudioInputDescriptor(const sp<IOProfile>& profile,
- AudioPolicyClientInterface *clientInterface);
- audio_port_handle_t getId() const;
+ AudioInputDescriptor(const sp<IOProfile>& profile,
+ AudioPolicyClientInterface *clientInterface);
+
+ virtual ~AudioInputDescriptor() = default;
+
audio_module_handle_t getModuleHandle() const;
audio_devices_t getDeviceType() const { return (mDevice != nullptr) ?
@@ -56,9 +60,18 @@
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
const sp<IOProfile> mProfile; // I/O profile this output derives from
+ // PolicyAudioPortConfig
+ virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
+ return mProfile;
+ }
+
+ // AudioPortConfig
+ virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+ struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
virtual sp<AudioPort> getAudioPort() const { return mProfile; }
+
void toAudioPort(struct audio_port *port) const;
void setPreemptedSessions(const SortedVector<audio_session_t>& sessions);
SortedVector<audio_session_t> getPreemptedSessions() const;
@@ -97,7 +110,7 @@
RecordClientVector clientsList(bool activeOnly = false,
audio_source_t source = AUDIO_SOURCE_DEFAULT, bool preferredDeviceOnly = false) const;
- void setAppState(uid_t uid, app_state_t state);
+ void setAppState(audio_port_handle_t portId, app_state_t state);
// implementation of ClientMapHandler<RecordClientDescriptor>
void addClient(const sp<RecordClientDescriptor> &client) override;
@@ -111,7 +124,6 @@
void updateClientRecordingConfiguration(int event, const sp<RecordClientDescriptor>& client);
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
sp<DeviceDescriptor> mDevice = nullptr; /**< current device this input is routed to */
// Because a preemptible capture session can preempt another one, we end up in an endless loop
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index cd54085..41f7dfc 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -21,14 +21,15 @@
#include <sys/types.h>
+#include <media/AudioContainers.h>
#include <utils/Errors.h>
#include <utils/Timers.h>
#include <utils/KeyedVector.h>
#include <system/audio.h>
#include "AudioIODescriptorInterface.h"
-#include "AudioPort.h"
#include "ClientDescriptor.h"
#include "DeviceDescriptor.h"
+#include "PolicyAudioPort.h"
#include <vector>
namespace android {
@@ -138,27 +139,28 @@
// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
// and keep track of the usage of this output by each audio stream type.
-class AudioOutputDescriptor: public AudioPortConfig, public AudioIODescriptorInterface
- , public ClientMapHandler<TrackClientDescriptor>
+class AudioOutputDescriptor: public AudioPortConfig,
+ public PolicyAudioPortConfig,
+ public AudioIODescriptorInterface,
+ public ClientMapHandler<TrackClientDescriptor>
{
public:
- AudioOutputDescriptor(const sp<AudioPort>& port,
+ AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort,
AudioPolicyClientInterface *clientInterface);
virtual ~AudioOutputDescriptor() {}
void dump(String8 *dst) const override;
void log(const char* indent);
- audio_port_handle_t getId() const;
virtual DeviceVector devices() const { return mDevices; }
bool sharesHwModuleWith(const sp<AudioOutputDescriptor>& outputDesc);
virtual DeviceVector supportedDevices() const { return mDevices; }
virtual bool isDuplicated() const { return false; }
virtual uint32_t latency() { return 0; }
- virtual bool isFixedVolume(audio_devices_t device);
+ virtual bool isFixedVolume(const DeviceTypeSet& deviceTypes);
virtual bool setVolume(float volumeDb,
VolumeSource volumeSource, const StreamTypeVector &streams,
- audio_devices_t device,
+ const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force);
@@ -245,9 +247,19 @@
mRoutingActivities[ps].setMutedByDevice(isMuted);
}
+ // PolicyAudioPortConfig
+ virtual sp<PolicyAudioPort> getPolicyAudioPort() const
+ {
+ return mPolicyAudioPort;
+ }
+
+ // AudioPortConfig
+ virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+ struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
- virtual sp<AudioPort> getAudioPort() const { return mPort; }
+ virtual sp<AudioPort> getAudioPort() const { return mPolicyAudioPort->asAudioPort(); }
+
virtual void toAudioPort(struct audio_port *port) const;
audio_module_handle_t getModuleHandle() const;
@@ -289,11 +301,10 @@
wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
protected:
- const sp<AudioPort> mPort;
+ const sp<PolicyAudioPort> mPolicyAudioPort;
AudioPolicyClientInterface * const mClientInterface;
uint32_t mGlobalActiveCount = 0; // non-client-specific active count
audio_patch_handle_t mPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
// The ActiveClients shows the clients that contribute to the @VolumeSource counts
// and may include upstream clients from a duplicating thread.
@@ -319,10 +330,10 @@
void setDevices(const DeviceVector &devices) { mDevices = devices; }
bool sharesHwModuleWith(const sp<SwAudioOutputDescriptor>& outputDesc);
virtual DeviceVector supportedDevices() const;
- virtual bool deviceSupportsEncodedFormats(audio_devices_t device);
+ virtual bool devicesSupportEncodedFormats(const DeviceTypeSet& deviceTypes);
virtual uint32_t latency();
virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
- virtual bool isFixedVolume(audio_devices_t device);
+ virtual bool isFixedVolume(const DeviceTypeSet& deviceTypes);
sp<SwAudioOutputDescriptor> subOutput1() { return mOutput1; }
sp<SwAudioOutputDescriptor> subOutput2() { return mOutput2; }
void setClientActive(const sp<TrackClientDescriptor>& client, bool active) override;
@@ -334,7 +345,7 @@
}
virtual bool setVolume(float volumeDb,
VolumeSource volumeSource, const StreamTypeVector &streams,
- audio_devices_t device,
+ const DeviceTypeSet& device,
uint32_t delayMs,
bool force);
@@ -408,7 +419,7 @@
virtual bool setVolume(float volumeDb,
VolumeSource volumeSource, const StreamTypeVector &streams,
- audio_devices_t device,
+ const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index 31c5041..56596f5 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -19,16 +19,17 @@
#include <unordered_map>
#include <unordered_set>
-#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
#include <IOProfile.h>
#include <HwModule.h>
+#include <PolicyAudioPort.h>
#include <AudioInputDescriptor.h>
#include <AudioOutputDescriptor.h>
#include <AudioPolicyMix.h>
#include <EffectDescriptor.h>
#include <SoundTriggerSession.h>
+#include <media/AudioProfile.h>
namespace android {
@@ -118,9 +119,9 @@
mSource = "AudioPolicyConfig::setDefault";
mEngineLibraryNameSuffix = kDefaultEngineLibraryNameSuffix;
mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
- mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+ mDefaultOutputDevice->addAudioProfile(AudioProfile::createFullDynamic(gDynamicFormat));
sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
- defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic());
+ defaultInputDevice->addAudioProfile(AudioProfile::createFullDynamic(gDynamicFormat));
sp<AudioProfile> micProfile = new AudioProfile(
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_MONO, 8000);
defaultInputDevice->addAudioProfile(micProfile);
@@ -132,14 +133,14 @@
mDefaultOutputDevice->attach(module);
defaultInputDevice->attach(module);
- sp<OutputProfile> outProfile = new OutputProfile(String8("primary"));
+ sp<OutputProfile> outProfile = new OutputProfile("primary");
outProfile->addAudioProfile(
new AudioProfile(AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 44100));
outProfile->addSupportedDevice(mDefaultOutputDevice);
outProfile->setFlags(AUDIO_OUTPUT_FLAG_PRIMARY);
module->addOutputProfile(outProfile);
- sp<InputProfile> inProfile = new InputProfile(String8("primary"));
+ sp<InputProfile> inProfile = new InputProfile("primary");
inProfile->addAudioProfile(micProfile);
inProfile->addSupportedDevice(defaultInputDevice);
module->addInputProfile(inProfile);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
deleted file mode 100644
index 641cebf..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "AudioCollections.h"
-#include "AudioProfile.h"
-#include "HandleGenerator.h"
-#include <media/AudioGain.h>
-#include <utils/String8.h>
-#include <utils/Vector.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-
-namespace android {
-
-class HwModule;
-class AudioRoute;
-
-class AudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
-{
-public:
- AudioPort(const String8& name, audio_port_type_t type, audio_port_role_t role) :
- mName(name), mType(type), mRole(role), mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
-
- virtual ~AudioPort() {}
-
- void setName(const String8 &name) { mName = name; }
- const String8 &getName() const { return mName; }
-
- audio_port_type_t getType() const { return mType; }
- audio_port_role_t getRole() const { return mRole; }
-
- virtual const String8 getTagName() const = 0;
-
- void setGains(const AudioGains &gains) { mGains = gains; }
- const AudioGains &getGains() const { return mGains; }
-
- virtual void setFlags(uint32_t flags)
- {
- //force direct flag if offload flag is set: offloading implies a direct output stream
- // and all common behaviors are driven by checking only the direct flag
- // this should normally be set appropriately in the policy configuration file
- if (mRole == AUDIO_PORT_ROLE_SOURCE && (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
- flags |= AUDIO_OUTPUT_FLAG_DIRECT;
- }
- mFlags = flags;
- }
- uint32_t getFlags() const { return mFlags; }
-
- virtual void attach(const sp<HwModule>& module);
- virtual void detach();
- bool isAttached() { return mModule != 0; }
-
- // Audio port IDs are in a different namespace than AudioFlinger unique IDs
- static audio_port_handle_t getNextUniqueId();
-
- virtual void toAudioPort(struct audio_port *port) const;
-
- virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
-
- void addAudioProfile(const sp<AudioProfile> &profile) { mProfiles.add(profile); }
-
- void setAudioProfiles(const AudioProfileVector &profiles) { mProfiles = profiles; }
- AudioProfileVector &getAudioProfiles() { return mProfiles; }
-
- bool hasValidAudioProfile() const { return mProfiles.hasValidProfile(); }
-
- bool hasDynamicAudioProfile() const { return mProfiles.hasDynamicProfile(); }
-
- // searches for an exact match
- virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
-
- // searches for a compatible match, currently implemented for input
- // parameters are input|output, returned value is the best match.
- status_t checkCompatibleAudioProfile(uint32_t &samplingRate,
- audio_channel_mask_t &channelMask,
- audio_format_t &format) const
- {
- return mProfiles.checkCompatibleProfile(samplingRate, channelMask, format, mType, mRole);
- }
-
- void clearAudioProfiles() { return mProfiles.clearProfiles(); }
-
- status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
-
- void pickAudioProfile(uint32_t &samplingRate,
- audio_channel_mask_t &channelMask,
- audio_format_t &format) const;
-
- static const audio_format_t sPcmFormatCompareTable[];
-
- static int compareFormats(audio_format_t format1, audio_format_t format2);
-
- // Used to select an audio HAL output stream with a sample format providing the
- // less degradation for a given AudioTrack sample format.
- static bool isBetterFormatMatch(audio_format_t newFormat,
- audio_format_t currentFormat,
- audio_format_t targetFormat);
- static uint32_t formatDistance(audio_format_t format1,
- audio_format_t format2);
- static const uint32_t kFormatDistanceMax = 4;
-
- audio_module_handle_t getModuleHandle() const;
- uint32_t getModuleVersionMajor() const;
- const char *getModuleName() const;
- sp<HwModule> getModule() const { return mModule; }
-
- bool useInputChannelMask() const
- {
- return ((mType == AUDIO_PORT_TYPE_DEVICE) && (mRole == AUDIO_PORT_ROLE_SOURCE)) ||
- ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SINK));
- }
-
- inline bool isDirectOutput() const
- {
- return (mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
- (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
- }
-
- void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
- const AudioRouteVector &getRoutes() const { return mRoutes; }
-
- void dump(String8 *dst, int spaces, bool verbose = true) const;
-
- void log(const char* indent) const;
-
- AudioGains mGains; // gain controllers
-
-private:
- void pickChannelMask(audio_channel_mask_t &channelMask,
- const ChannelMaskSet &channelMasks) const;
- void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
-
- sp<HwModule> mModule; // audio HW module exposing this I/O stream
- String8 mName;
- audio_port_type_t mType;
- audio_port_role_t mRole;
- uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
- AudioProfileVector mProfiles; // AudioProfiles supported by this port (format, Rates, Channels)
- AudioRouteVector mRoutes; // Routes involving this port
-};
-
-class AudioPortConfig : public virtual RefBase
-{
-public:
- status_t applyAudioPortConfig(const struct audio_port_config *config,
- struct audio_port_config *backupConfig = NULL);
- virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig = NULL) const = 0;
- virtual sp<AudioPort> getAudioPort() const = 0;
- virtual bool hasSameHwModuleAs(const sp<AudioPortConfig>& other) const {
- return (other != 0) && (other->getAudioPort() != 0) && (getAudioPort() != 0) &&
- (other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
- }
- bool hasGainController(bool canUseForVolume = false) const;
-
- unsigned int mSamplingRate = 0u;
- audio_format_t mFormat = AUDIO_FORMAT_INVALID;
- audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
- struct audio_gain_config mGain = { .index = -1 };
- union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
deleted file mode 100644
index ea56729..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/AudioProfile.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <media/AudioContainers.h>
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-
-#include "policy.h"
-
-namespace android {
-
-class AudioProfile : public virtual RefBase
-{
-public:
- static sp<AudioProfile> createFullDynamic();
-
- AudioProfile(audio_format_t format, audio_channel_mask_t channelMasks, uint32_t samplingRate);
- AudioProfile(audio_format_t format,
- const ChannelMaskSet &channelMasks,
- const SampleRateSet &samplingRateCollection);
-
- audio_format_t getFormat() const { return mFormat; }
- const ChannelMaskSet &getChannels() const { return mChannelMasks; }
- const SampleRateSet &getSampleRates() const { return mSamplingRates; }
- void setChannels(const ChannelMaskSet &channelMasks);
- void setSampleRates(const SampleRateSet &sampleRates);
-
- void clear();
- bool isValid() const { return hasValidFormat() && hasValidRates() && hasValidChannels(); }
- bool supportsChannels(audio_channel_mask_t channels) const
- {
- return mChannelMasks.count(channels) != 0;
- }
- bool supportsRate(uint32_t rate) const { return mSamplingRates.count(rate) != 0; }
-
- status_t checkExact(uint32_t rate, audio_channel_mask_t channels, audio_format_t format) const;
- status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
- audio_channel_mask_t &updatedChannelMask,
- audio_port_type_t portType,
- audio_port_role_t portRole) const;
- status_t checkCompatibleSamplingRate(uint32_t samplingRate,
- uint32_t &updatedSamplingRate) const;
-
- bool hasValidFormat() const { return mFormat != AUDIO_FORMAT_DEFAULT; }
- bool hasValidRates() const { return !mSamplingRates.empty(); }
- bool hasValidChannels() const { return !mChannelMasks.empty(); }
-
- void setDynamicChannels(bool dynamic) { mIsDynamicChannels = dynamic; }
- bool isDynamicChannels() const { return mIsDynamicChannels; }
-
- void setDynamicRate(bool dynamic) { mIsDynamicRate = dynamic; }
- bool isDynamicRate() const { return mIsDynamicRate; }
-
- void setDynamicFormat(bool dynamic) { mIsDynamicFormat = dynamic; }
- bool isDynamicFormat() const { return mIsDynamicFormat; }
-
- bool isDynamic() { return mIsDynamicFormat || mIsDynamicChannels || mIsDynamicRate; }
-
- void dump(String8 *dst, int spaces) const;
-
-private:
- String8 mName;
- audio_format_t mFormat;
- ChannelMaskSet mChannelMasks;
- SampleRateSet mSamplingRates;
-
- bool mIsDynamicFormat = false;
- bool mIsDynamicChannels = false;
- bool mIsDynamicRate = false;
-};
-
-
-class AudioProfileVector : public std::vector<sp<AudioProfile> >
-{
-public:
- ssize_t add(const sp<AudioProfile> &profile);
- // This API is intended to be used by the policy manager once retrieving capabilities
- // for a profile with dynamic format, rate and channels attributes
- ssize_t addProfileFromHal(const sp<AudioProfile> &profileToAdd);
- void appendProfiles(const AudioProfileVector& audioProfiles) {
- insert(end(), audioProfiles.begin(), audioProfiles.end());
- }
-
- status_t checkExactProfile(uint32_t samplingRate, audio_channel_mask_t channelMask,
- audio_format_t format) const;
- status_t checkCompatibleProfile(uint32_t &samplingRate, audio_channel_mask_t &channelMask,
- audio_format_t &format,
- audio_port_type_t portType,
- audio_port_role_t portRole) const;
- void clearProfiles();
- // Assuming that this profile vector contains input profiles,
- // find the best matching config from 'outputProfiles', according to
- // the given preferences for audio formats and channel masks.
- // Note: std::vectors are used because specialized containers for formats
- // and channels can be sorted and use their own ordering.
- status_t findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
- const std::vector<audio_format_t>& preferredFormats, // order: most pref -> least pref
- const std::vector<audio_channel_mask_t>& preferredOutputChannels,
- bool preferHigherSamplingRates,
- audio_config_base *bestOutputConfig) const;
-
- sp<AudioProfile> getFirstValidProfile() const;
- sp<AudioProfile> getFirstValidProfileFor(audio_format_t format) const;
- bool hasValidProfile() const { return getFirstValidProfile() != 0; }
-
- FormatVector getSupportedFormats() const;
- bool hasDynamicChannelsFor(audio_format_t format) const;
- bool hasDynamicFormat() const { return getProfileFor(gDynamicFormat) != 0; }
- bool hasDynamicProfile() const;
- bool hasDynamicRateFor(audio_format_t format) const;
-
- // One audio profile will be added for each format supported by Audio HAL
- void setFormats(const FormatVector &formats);
-
- void dump(String8 *dst, int spaces) const;
-
-private:
- sp<AudioProfile> getProfileFor(audio_format_t format) const;
- void setSampleRatesFor(const SampleRateSet &sampleRates, audio_format_t format);
- void setChannelsFor(const ChannelMaskSet &channelMasks, audio_format_t format);
-};
-
-bool operator == (const AudioProfile &left, const AudioProfile &right);
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h b/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h
new file mode 100644
index 0000000..f84bda7
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioProfileVectorHelper.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/AudioProfile.h>
+#include <system/audio.h>
+
+namespace android {
+
+void sortAudioProfiles(AudioProfileVector &audioProfileVector);
+
+ssize_t addAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+ const sp<AudioProfile> &profile);
+
+// One audio profile will be added for each format supported by Audio HAL
+void addProfilesForFormats(AudioProfileVector &audioProfileVector,
+ const FormatVector &formatVector);
+
+// This API is intended to be used by the policy manager once retrieving capabilities
+// for a profile with dynamic format, rate and channels attributes
+void addDynamicAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+ const sp<AudioProfile> &profileToAdd);
+
+void appendAudioProfiles(AudioProfileVector &audioProfileVector,
+ const AudioProfileVector &audioProfileVectorToAppend);
+
+status_t checkExactProfile(const AudioProfileVector &audioProfileVector,
+ const uint32_t samplingRate,
+ audio_channel_mask_t channelMask,
+ audio_format_t format);
+
+status_t checkCompatibleProfile(const AudioProfileVector &audioProfileVector,
+ uint32_t &samplingRate,
+ audio_channel_mask_t &channelMask,
+ audio_format_t &format,
+ audio_port_type_t portType,
+ audio_port_role_t portRole);
+
+// Assuming that this profile vector contains input profiles,
+// find the best matching config from 'outputProfiles', according to
+// the given preferences for audio formats and channel masks.
+// Note: std::vectors are used because specialized containers for formats
+// and channels can be sorted and use their own ordering.
+status_t findBestMatchingOutputConfig(
+ const AudioProfileVector &audioProfileVector,
+ const AudioProfileVector &outputProfileVector,
+ const std::vector<audio_format_t> &preferredFormatVector, // order: most pref -> least pref
+ const std::vector<audio_channel_mask_t> &preferredOutputChannelVector,
+ bool preferHigherSamplingRates,
+ audio_config_base &bestOutputConfig);
+
+
+} // namespace android
\ No newline at end of file
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
index 0357ff4..a7def3e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioRoute.h
@@ -25,7 +25,7 @@
namespace android
{
-class AudioPort;
+class PolicyAudioPort;
class DeviceDescriptor;
typedef enum {
@@ -38,11 +38,11 @@
public:
explicit AudioRoute(audio_route_type_t type) : mType(type) {}
- void setSources(const AudioPortVector &sources) { mSources = sources; }
- const AudioPortVector &getSources() const { return mSources; }
+ void setSources(const PolicyAudioPortVector &sources) { mSources = sources; }
+ const PolicyAudioPortVector &getSources() const { return mSources; }
- void setSink(const sp<AudioPort> &sink) { mSink = sink; }
- const sp<AudioPort> &getSink() const { return mSink; }
+ void setSink(const sp<PolicyAudioPort> &sink) { mSink = sink; }
+ const sp<PolicyAudioPort> &getSink() const { return mSink; }
audio_route_type_t getType() const { return mType; }
@@ -57,13 +57,14 @@
* @return true if the audio route supports the connection between the sink and the source,
* false otherwise
*/
- bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+ bool supportsPatch(const sp<PolicyAudioPort> &srcPort,
+ const sp<PolicyAudioPort> &dstPort) const;
void dump(String8 *dst, int spaces) const;
private:
- AudioPortVector mSources;
- sp<AudioPort> mSink;
+ PolicyAudioPortVector mSources;
+ sp<PolicyAudioPort> mSink;
audio_route_type_t mType;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index c7c1fee..7faf90e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -16,7 +16,9 @@
#pragma once
-#include "AudioPort.h"
+#include "PolicyAudioPort.h"
+#include <media/AudioContainers.h>
+#include <media/DeviceDescriptorBase.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <utils/SortedVector.h>
@@ -26,21 +28,22 @@
namespace android {
-class DeviceDescriptor : public AudioPort, public AudioPortConfig
+class DeviceDescriptor : public DeviceDescriptorBase,
+ public PolicyAudioPort, public PolicyAudioPortConfig
{
public:
// Note that empty name refers by convention to a generic device.
- explicit DeviceDescriptor(audio_devices_t type, const String8 &tagName = String8(""));
+ explicit DeviceDescriptor(audio_devices_t type, const std::string &tagName = "");
DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
- const String8 &tagName = String8(""));
+ const std::string &tagName = "");
virtual ~DeviceDescriptor() {}
- virtual const String8 getTagName() const { return mTagName; }
+ virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+ addAudioProfileAndSort(mProfiles, profile);
+ }
- audio_devices_t type() const { return mDeviceType; }
- String8 address() const { return mAddress; }
- void setAddress(const String8 &address) { mAddress = address; }
+ virtual const std::string getTagName() const { return mTagName; }
const FormatVector& encodedFormats() const { return mEncodedFormats; }
@@ -56,36 +59,42 @@
bool supportsFormat(audio_format_t format);
+ // PolicyAudioPortConfig
+ virtual sp<PolicyAudioPort> getPolicyAudioPort() const {
+ return static_cast<PolicyAudioPort*>(const_cast<DeviceDescriptor*>(this));
+ }
+
// AudioPortConfig
- virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
+ virtual status_t applyAudioPortConfig(const struct audio_port_config *config,
+ struct audio_port_config *backupConfig = NULL);
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig = NULL) const;
- // AudioPort
+ // PolicyAudioPort
+ virtual sp<AudioPort> asAudioPort() const {
+ return static_cast<AudioPort*>(const_cast<DeviceDescriptor*>(this));
+ }
virtual void attach(const sp<HwModule>& module);
virtual void detach();
+ // AudioPort
virtual void toAudioPort(struct audio_port *port) const;
- virtual void importAudioPort(const sp<AudioPort>& port, bool force = false);
- audio_port_handle_t getId() const;
+ void importAudioPortAndPickAudioProfile(const sp<PolicyAudioPort>& policyPort,
+ bool force = false);
+
void dump(String8 *dst, int spaces, int index, bool verbose = true) const;
- void log() const;
- std::string toString() const;
private:
- String8 mAddress{""};
- String8 mTagName; // Unique human readable identifier for a device port found in conf file.
- audio_devices_t mDeviceType;
+ std::string mTagName; // Unique human readable identifier for a device port found in conf file.
FormatVector mEncodedFormats;
- audio_port_handle_t mId = AUDIO_PORT_HANDLE_NONE;
audio_format_t mCurrentEncodedFormat;
};
class DeviceVector : public SortedVector<sp<DeviceDescriptor> >
{
public:
- DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
+ DeviceVector() : SortedVector() {}
explicit DeviceVector(const sp<DeviceDescriptor>& item) : DeviceVector()
{
add(item);
@@ -97,13 +106,16 @@
void remove(const DeviceVector &devices);
ssize_t indexOf(const sp<DeviceDescriptor>& item) const;
- audio_devices_t types() const { return mDeviceTypes; }
+ DeviceTypeSet types() const { return mDeviceTypes; }
// If 'address' is empty and 'codec' is AUDIO_FORMAT_DEFAULT, a device with a non-empty
// address may be returned if there is no device with the specified 'type' and empty address.
sp<DeviceDescriptor> getDevice(audio_devices_t type, const String8 &address,
audio_format_t codec) const;
- DeviceVector getDevicesFromTypeMask(audio_devices_t types) const;
+ DeviceVector getDevicesFromTypes(const DeviceTypeSet& types) const;
+ DeviceVector getDevicesFromType(audio_devices_t type) const {
+ return getDevicesFromTypes({type});
+ }
/**
* @brief getDeviceFromId
@@ -112,17 +124,36 @@
* equal to AUDIO_PORT_HANDLE_NONE, it also returns a nullptr.
*/
sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
- sp<DeviceDescriptor> getDeviceFromTagName(const String8 &tagName) const;
+ sp<DeviceDescriptor> getDeviceFromTagName(const std::string &tagName) const;
DeviceVector getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
- audio_devices_t getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const;
DeviceVector getFirstDevicesFromTypes(std::vector<audio_devices_t> orderedTypes) const;
sp<DeviceDescriptor> getFirstExistingDevice(std::vector<audio_devices_t> orderedTypes) const;
+ // Return device descriptor that is used to open an input/output stream.
+ // Null pointer will be returned if
+ // 1) this collection is empty
+ // 2) the device descriptors are not the same category(input or output)
+ // 3) there are more than one device type for input case
+ // 4) the combination of all devices is invalid for selection
+ sp<DeviceDescriptor> getDeviceForOpening() const;
+
// If there are devices with the given type and the devices to add is not empty,
// remove all the devices with the given type and add all the devices to add.
void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
+ bool containsDeviceAmongTypes(const DeviceTypeSet& deviceTypes) const {
+ return !Intersection(mDeviceTypes, deviceTypes).empty();
+ }
+
+ bool containsDeviceWithType(audio_devices_t deviceType) const {
+ return containsDeviceAmongTypes({deviceType});
+ }
+
+ bool onlyContainsDevicesWithType(audio_devices_t deviceType) const {
+ return isSingleDeviceType(mDeviceTypes, deviceType);
+ }
+
bool contains(const sp<DeviceDescriptor>& item) const { return indexOf(item) >= 0; }
/**
@@ -203,7 +234,7 @@
{
for (const auto &device : *this) {
if (device->address() != "") {
- return device->address();
+ return String8(device->address().c_str());
}
}
return String8("");
@@ -215,7 +246,7 @@
private:
void refreshTypes();
- audio_devices_t mDeviceTypes;
+ DeviceTypeSet mDeviceTypes;
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
index eb34da4..23f0c9a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/HwModule.h
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -82,19 +82,19 @@
status_t addInputProfile(const sp<IOProfile> &profile);
status_t addProfile(const sp<IOProfile> &profile);
- status_t addOutputProfile(const String8& name, const audio_config_t *config,
+ status_t addOutputProfile(const std::string& name, const audio_config_t *config,
audio_devices_t device, const String8& address);
- status_t removeOutputProfile(const String8& name);
- status_t addInputProfile(const String8& name, const audio_config_t *config,
+ status_t removeOutputProfile(const std::string& name);
+ status_t addInputProfile(const std::string& name, const audio_config_t *config,
audio_devices_t device, const String8& address);
- status_t removeInputProfile(const String8& name);
+ status_t removeInputProfile(const std::string& name);
audio_module_handle_t getHandle() const { return mHandle; }
void setHandle(audio_module_handle_t handle);
- sp<AudioPort> findPortByTagName(const String8 &tagName) const
+ sp<PolicyAudioPort> findPortByTagName(const std::string &tagName) const
{
- return mPorts.findByTagName(tagName);
+ return findByTagName(mPorts, tagName);
}
/**
@@ -106,7 +106,8 @@
* @return true if the HwModule supports the connection between the sink and the source,
* false otherwise
*/
- bool supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const;
+ bool supportsPatch(const sp<PolicyAudioPort> &srcPort,
+ const sp<PolicyAudioPort> &dstPort) const;
// TODO remove from here (split serialization)
void dump(String8 *dst) const;
@@ -122,7 +123,7 @@
DeviceVector mDeclaredDevices; // devices declared in audio_policy configuration file.
DeviceVector mDynamicDevices; /**< devices that can be added/removed at runtime (e.g. rsbumix)*/
AudioRouteVector mRoutes;
- AudioPortVector mPorts;
+ PolicyAudioPortVector mPorts;
};
class HwModuleCollection : public Vector<sp<HwModule> >
@@ -130,8 +131,8 @@
public:
sp<HwModule> getModuleFromName(const char *name) const;
- sp<HwModule> getModuleForDeviceTypes(audio_devices_t device,
- audio_format_t encodedFormat) const;
+ sp<HwModule> getModuleForDeviceType(audio_devices_t device,
+ audio_format_t encodedFormat) const;
sp<HwModule> getModuleForDevice(const sp<DeviceDescriptor> &device,
audio_format_t encodedFormat) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index e0b56d4..2044863 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -16,8 +16,10 @@
#pragma once
-#include "AudioPort.h"
#include "DeviceDescriptor.h"
+#include "PolicyAudioPort.h"
+#include "policy.h"
+#include <media/AudioContainers.h>
#include <utils/String8.h>
#include <system/audio.h>
@@ -30,18 +32,28 @@
// It is used by the policy manager to determine if an output or input is suitable for
// a given use case, open/close it accordingly and connect/disconnect audio tracks
// to/from it.
-class IOProfile : public AudioPort
+class IOProfile : public AudioPort, public PolicyAudioPort
{
public:
- IOProfile(const String8 &name, audio_port_role_t role)
+ IOProfile(const std::string &name, audio_port_role_t role)
: AudioPort(name, AUDIO_PORT_TYPE_MIX, role),
maxOpenCount(1),
curOpenCount(0),
maxActiveCount(1),
curActiveCount(0) {}
+ virtual ~IOProfile() = default;
+
// For a Profile aka MixPort, tag name and name are equivalent.
- virtual const String8 getTagName() const { return getName(); }
+ virtual const std::string getTagName() const { return getName(); }
+
+ virtual void addAudioProfile(const sp<AudioProfile> &profile) {
+ addAudioProfileAndSort(mProfiles, profile);
+ }
+
+ virtual sp<AudioPort> asAudioPort() const {
+ return static_cast<AudioPort*>(const_cast<IOProfile*>(this));
+ }
// FIXME: this is needed because shared MMAP stream clients use the same audio session.
// Once capture clients are tracked individually and not per session this can be removed
@@ -51,7 +63,7 @@
// flags are parsed before maxActiveCount by the serializer.
void setFlags(uint32_t flags) override
{
- AudioPort::setFlags(flags);
+ PolicyAudioPort::setFlags(flags);
if (getRole() == AUDIO_PORT_ROLE_SINK && (flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
maxActiveCount = 0;
}
@@ -91,15 +103,12 @@
bool hasSupportedDevices() const { return !mSupportedDevices.isEmpty(); }
- bool supportsDeviceTypes(audio_devices_t device) const
+ bool supportsDeviceTypes(const DeviceTypeSet& deviceTypes) const
{
- if (audio_is_output_devices(device)) {
- if (deviceSupportsEncodedFormats(device)) {
- return mSupportedDevices.types() & device;
- }
- return false;
- }
- return mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN);
+ const bool areOutputDevices = Intersection(deviceTypes, getAudioDeviceInAllSet()).empty();
+ const bool devicesSupported = !mSupportedDevices.getDevicesFromTypes(deviceTypes).empty();
+ return devicesSupported &&
+ (!areOutputDevices || devicesSupportEncodedFormats(deviceTypes));
}
/**
@@ -114,18 +123,18 @@
bool supportsDevice(const sp<DeviceDescriptor> &device, bool forceCheckOnAddress = false) const
{
if (!device_distinguishes_on_address(device->type()) && !forceCheckOnAddress) {
- return supportsDeviceTypes(device->type());
+ return supportsDeviceTypes(DeviceTypeSet({device->type()}));
}
return mSupportedDevices.contains(device);
}
- bool deviceSupportsEncodedFormats(audio_devices_t device) const
+ bool devicesSupportEncodedFormats(DeviceTypeSet deviceTypes) const
{
- if (device == AUDIO_DEVICE_NONE) {
+ if (deviceTypes.empty()) {
return true; // required for isOffloadSupported() check
}
DeviceVector deviceList =
- mSupportedDevices.getDevicesFromTypeMask(device);
+ mSupportedDevices.getDevicesFromTypes(deviceTypes);
if (!deviceList.empty()) {
return deviceList.itemAt(0)->hasCurrentEncodedFormat();
}
@@ -183,13 +192,13 @@
class InputProfile : public IOProfile
{
public:
- explicit InputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SINK) {}
+ explicit InputProfile(const std::string &name) : IOProfile(name, AUDIO_PORT_ROLE_SINK) {}
};
class OutputProfile : public IOProfile
{
public:
- explicit OutputProfile(const String8 &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
+ explicit OutputProfile(const std::string &name) : IOProfile(name, AUDIO_PORT_ROLE_SOURCE) {}
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
index d408446..fd8b81a 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
@@ -16,8 +16,9 @@
#pragma once
-#include <system/audio.h>
#include <Volume.h>
+#include <media/AudioContainers.h>
+#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/String8.h>
#include <vector>
@@ -33,7 +34,7 @@
virtual void addCurrentVolumeIndex(audio_devices_t device, int index) = 0;
virtual bool canBeMuted() const = 0;
virtual int getVolumeIndexMin() const = 0;
- virtual int getVolumeIndex(audio_devices_t device) const = 0;
+ virtual int getVolumeIndex(const DeviceTypeSet& device) const = 0;
virtual int getVolumeIndexMax() const = 0;
virtual float volIndexToDb(device_category device, int indexInUi) const = 0;
virtual bool hasVolumeIndexForDevice(audio_devices_t device) const = 0;
diff --git a/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
new file mode 100644
index 0000000..99df3c0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/PolicyAudioPort.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioCollections.h"
+#include "AudioProfileVectorHelper.h"
+#include "HandleGenerator.h"
+#include <media/AudioGain.h>
+#include <media/AudioPort.h>
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class HwModule;
+class AudioRoute;
+
+class PolicyAudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
+{
+public:
+ PolicyAudioPort() : mFlags(AUDIO_OUTPUT_FLAG_NONE) {}
+
+ virtual ~PolicyAudioPort() = default;
+
+ virtual const std::string getTagName() const = 0;
+
+ virtual sp<AudioPort> asAudioPort() const = 0;
+
+ virtual void setFlags(uint32_t flags)
+ {
+ //force direct flag if offload flag is set: offloading implies a direct output stream
+ // and all common behaviors are driven by checking only the direct flag
+ // this should normally be set appropriately in the policy configuration file
+ if (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE &&
+ (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+ flags |= AUDIO_OUTPUT_FLAG_DIRECT;
+ }
+ mFlags = flags;
+ }
+ uint32_t getFlags() const { return mFlags; }
+
+ virtual void attach(const sp<HwModule>& module);
+ virtual void detach();
+ bool isAttached() { return mModule != 0; }
+
+ // Audio port IDs are in a different namespace than AudioFlinger unique IDs
+ static audio_port_handle_t getNextUniqueId();
+
+ // searches for an exact match
+ virtual status_t checkExactAudioProfile(const struct audio_port_config *config) const;
+
+ // searches for a compatible match, currently implemented for input
+ // parameters are input|output, returned value is the best match.
+ status_t checkCompatibleAudioProfile(uint32_t &samplingRate,
+ audio_channel_mask_t &channelMask,
+ audio_format_t &format) const
+ {
+ return checkCompatibleProfile(
+ asAudioPort()->getAudioProfiles(), samplingRate, channelMask, format,
+ asAudioPort()->getType(), asAudioPort()->getRole());
+ }
+
+ void pickAudioProfile(uint32_t &samplingRate,
+ audio_channel_mask_t &channelMask,
+ audio_format_t &format) const;
+
+ static const audio_format_t sPcmFormatCompareTable[];
+
+ static int compareFormats(audio_format_t format1, audio_format_t format2);
+
+ // Used to select an audio HAL output stream with a sample format providing the
+ // less degradation for a given AudioTrack sample format.
+ static bool isBetterFormatMatch(audio_format_t newFormat,
+ audio_format_t currentFormat,
+ audio_format_t targetFormat);
+ static uint32_t formatDistance(audio_format_t format1,
+ audio_format_t format2);
+ static const uint32_t kFormatDistanceMax = 4;
+
+ audio_module_handle_t getModuleHandle() const;
+ uint32_t getModuleVersionMajor() const;
+ const char *getModuleName() const;
+ sp<HwModule> getModule() const { return mModule; }
+
+ inline bool isDirectOutput() const
+ {
+ return (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) &&
+ (asAudioPort()->getRole() == AUDIO_PORT_ROLE_SOURCE) &&
+ (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD));
+ }
+
+ void addRoute(const sp<AudioRoute> &route) { mRoutes.add(route); }
+ const AudioRouteVector &getRoutes() const { return mRoutes; }
+
+private:
+ void pickChannelMask(audio_channel_mask_t &channelMask,
+ const ChannelMaskSet &channelMasks) const;
+ void pickSamplingRate(uint32_t &rate, const SampleRateSet &samplingRates) const;
+
+ uint32_t mFlags; // attribute flags mask (e.g primary output, direct output...).
+ sp<HwModule> mModule; // audio HW module exposing this I/O stream
+ AudioRouteVector mRoutes; // Routes involving this port
+};
+
+class PolicyAudioPortConfig : public virtual RefBase
+{
+public:
+ virtual ~PolicyAudioPortConfig() = default;
+
+ virtual sp<PolicyAudioPort> getPolicyAudioPort() const = 0;
+
+ status_t validationBeforeApplyConfig(const struct audio_port_config *config) const;
+
+ void applyPolicyAudioPortConfig(const struct audio_port_config *config) {
+ if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ mFlags = config->flags;
+ }
+ }
+
+ void toPolicyAudioPortConfig(
+ struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig = NULL) const;
+
+
+ virtual bool hasSameHwModuleAs(const sp<PolicyAudioPortConfig>& other) const {
+ return (other.get() != nullptr) && (other->getPolicyAudioPort().get() != nullptr) &&
+ (getPolicyAudioPort().get() != nullptr) &&
+ (other->getPolicyAudioPort()->getModuleHandle() ==
+ getPolicyAudioPort()->getModuleHandle());
+ }
+
+ union audio_io_flags mFlags = { AUDIO_INPUT_FLAG_NONE };
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
index e8cf485..cd10010 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioCollections.cpp
@@ -18,15 +18,16 @@
//#define LOG_NDEBUG 0
#include "AudioCollections.h"
-#include "AudioPort.h"
#include "AudioRoute.h"
#include "HwModule.h"
+#include "PolicyAudioPort.h"
namespace android {
-sp<AudioPort> AudioPortVector::findByTagName(const String8 &tagName) const
+sp<PolicyAudioPort> findByTagName(const PolicyAudioPortVector& policyAudioPortVector,
+ const std::string &tagName)
{
- for (const auto& port : *this) {
+ for (const auto& port : policyAudioPortVector) {
if (port->getTagName() == tagName) {
return port;
}
@@ -34,15 +35,15 @@
return nullptr;
}
-void AudioRouteVector::dump(String8 *dst, int spaces) const
+void dumpAudioRouteVector(const AudioRouteVector& audioRouteVector, String8 *dst, int spaces)
{
- if (isEmpty()) {
+ if (audioRouteVector.isEmpty()) {
return;
}
- dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", size());
- for (size_t i = 0; i < size(); i++) {
+ dst->appendFormat("\n%*sAudio Routes (%zu):\n", spaces, "", audioRouteVector.size());
+ for (size_t i = 0; i < audioRouteVector.size(); i++) {
dst->appendFormat("%*s- Route %zu:\n", spaces, "", i + 1);
- itemAt(i)->dump(dst, 4);
+ audioRouteVector.itemAt(i)->dump(dst, 4);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index a9b87e3..b963121 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -34,8 +34,8 @@
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
- if (profile->mGains.size() > 0) {
- profile->mGains[0]->getDefaultConfig(&mGain);
+ if (profile->getGains().size() > 0) {
+ profile->getGains()[0]->getDefaultConfig(&mGain);
}
}
}
@@ -48,16 +48,29 @@
return mProfile->getModuleHandle();
}
-audio_port_handle_t AudioInputDescriptor::getId() const
-{
- return mId;
-}
-
audio_source_t AudioInputDescriptor::source() const
{
return getHighestPriorityAttributes().source;
}
+status_t AudioInputDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+ audio_port_config *backupConfig)
+{
+ struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+ status_t status = NO_ERROR;
+
+ toAudioPortConfig(&localBackupConfig);
+ if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+ AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+ applyPolicyAudioPortConfig(config);
+ }
+
+ if (backupConfig != NULL) {
+ *backupConfig = localBackupConfig;
+ }
+ return status;
+}
+
void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
const struct audio_port_config *srcConfig) const
{
@@ -70,8 +83,8 @@
}
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+ toPolicyAudioPortConfig(dstConfig, srcConfig);
- dstConfig->id = mId;
dstConfig->role = AUDIO_PORT_ROLE_SINK;
dstConfig->type = AUDIO_PORT_TYPE_MIX;
dstConfig->ext.mix.hw_module = getModuleHandle();
@@ -212,7 +225,7 @@
mDevice = device;
ALOGV("opening input for device %s profile %p name %s",
- mDevice->toString().c_str(), mProfile.get(), mProfile->getName().string());
+ mDevice->toString().c_str(), mProfile.get(), mProfile->getName().c_str());
audio_devices_t deviceType = mDevice->type();
@@ -220,7 +233,7 @@
input,
&lConfig,
&deviceType,
- mDevice->address(),
+ String8(mDevice->address().c_str()),
source,
flags);
LOG_ALWAYS_FATAL_IF(mDevice->type() != deviceType,
@@ -234,7 +247,7 @@
mSamplingRate = lConfig.sample_rate;
mChannelMask = lConfig.channel_mask;
mFormat = lConfig.format;
- mId = AudioPort::getNextUniqueId();
+ mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *input;
mProfile->curOpenCount++;
}
@@ -450,13 +463,13 @@
return enabledEffects;
}
-void AudioInputDescriptor::setAppState(uid_t uid, app_state_t state)
+void AudioInputDescriptor::setAppState(audio_port_handle_t portId, app_state_t state)
{
RecordClientVector clients = clientsList(false /*activeOnly*/);
RecordClientVector updatedClients;
for (const auto& client : clients) {
- if (uid == client->uid()) {
+ if (portId == client->portId()) {
bool wasSilenced = client->isSilenced();
client->setAppState(state);
if (client->active() && wasSilenced != client->isSilenced()) {
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 49524b0..dd51658 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -30,18 +30,19 @@
// A device mask for all audio output devices that are considered "remote" when evaluating
// active output devices in isStreamActiveRemotely()
-#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL AUDIO_DEVICE_OUT_REMOTE_SUBMIX
namespace android {
-AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
+DeviceTypeSet APM_AUDIO_OUT_DEVICE_REMOTE_ALL = {AUDIO_DEVICE_OUT_REMOTE_SUBMIX};
+
+AudioOutputDescriptor::AudioOutputDescriptor(const sp<PolicyAudioPort>& policyAudioPort,
AudioPolicyClientInterface *clientInterface)
- : mPort(port), mClientInterface(clientInterface)
+ : mPolicyAudioPort(policyAudioPort), mClientInterface(clientInterface)
{
- if (mPort.get() != nullptr) {
- mPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
- if (mPort->mGains.size() > 0) {
- mPort->mGains[0]->getDefaultConfig(&mGain);
+ if (mPolicyAudioPort.get() != nullptr) {
+ mPolicyAudioPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
+ if (mPolicyAudioPort->asAudioPort()->getGains().size() > 0) {
+ mPolicyAudioPort->asAudioPort()->getGains()[0]->getDefaultConfig(&mGain);
}
}
}
@@ -55,7 +56,8 @@
audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
{
- return mPort.get() != nullptr ? mPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
+ return mPolicyAudioPort.get() != nullptr ?
+ mPolicyAudioPort->getModuleHandle() : AUDIO_MODULE_HANDLE_NONE;
}
audio_patch_handle_t AudioOutputDescriptor::getPatchHandle() const
@@ -68,11 +70,6 @@
mPatchHandle = handle;
}
-audio_port_handle_t AudioOutputDescriptor::getId() const
-{
- return mId;
-}
-
bool AudioOutputDescriptor::sharesHwModuleWith(
const sp<AudioOutputDescriptor>& outputDesc)
{
@@ -144,7 +141,7 @@
return false;
}
-bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused)
+bool AudioOutputDescriptor::isFixedVolume(const DeviceTypeSet& deviceTypes __unused)
{
return false;
}
@@ -152,7 +149,7 @@
bool AudioOutputDescriptor::setVolume(float volumeDb,
VolumeSource volumeSource,
const StreamTypeVector &/*streams*/,
- audio_devices_t /*device*/,
+ const DeviceTypeSet& /*deviceTypes*/,
uint32_t delayMs,
bool force)
{
@@ -167,9 +164,27 @@
return false;
}
-void AudioOutputDescriptor::toAudioPortConfig(
- struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig) const
+status_t AudioOutputDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+ audio_port_config *backupConfig)
+{
+ struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+ status_t status = NO_ERROR;
+
+ toAudioPortConfig(&localBackupConfig);
+ if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+ AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+ applyPolicyAudioPortConfig(config);
+ }
+
+ if (backupConfig != NULL) {
+ *backupConfig = localBackupConfig;
+ }
+ return status;
+}
+
+
+void AudioOutputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig) const
{
dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
@@ -177,8 +192,8 @@
dstConfig->config_mask |= srcConfig->config_mask;
}
AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+ toPolicyAudioPortConfig(dstConfig, srcConfig);
- dstConfig->id = mId;
dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
dstConfig->type = AUDIO_PORT_TYPE_MIX;
dstConfig->ext.mix.hw_module = getModuleHandle();
@@ -188,7 +203,7 @@
void AudioOutputDescriptor::toAudioPort(struct audio_port *port) const
{
// Should not be called for duplicated ports, see SwAudioOutputDescriptor::toAudioPortConfig.
- mPort->toAudioPort(port);
+ mPolicyAudioPort->asAudioPort()->toAudioPort(port);
port->id = mId;
port->ext.mix.hw_module = getModuleHandle();
}
@@ -320,13 +335,13 @@
return filteredDevices.filter(devices);
}
-bool SwAudioOutputDescriptor::deviceSupportsEncodedFormats(audio_devices_t device)
+bool SwAudioOutputDescriptor::devicesSupportEncodedFormats(const DeviceTypeSet& deviceTypes)
{
if (isDuplicated()) {
- return (mOutput1->deviceSupportsEncodedFormats(device)
- || mOutput2->deviceSupportsEncodedFormats(device));
+ return (mOutput1->devicesSupportEncodedFormats(deviceTypes)
+ || mOutput2->devicesSupportEncodedFormats(deviceTypes));
} else {
- return mProfile->deviceSupportsEncodedFormats(device);
+ return mProfile->devicesSupportEncodedFormats(deviceTypes);
}
}
@@ -349,16 +364,16 @@
AudioOutputDescriptor::setClientActive(client, active);
}
-bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device)
+bool SwAudioOutputDescriptor::isFixedVolume(const DeviceTypeSet& deviceTypes)
{
// unit gain if rerouting to external policy
- if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) {
+ if (isSingleDeviceType(deviceTypes, AUDIO_DEVICE_OUT_REMOTE_SUBMIX)) {
if (mPolicyMix != NULL) {
ALOGV("max gain when rerouting for output=%d", mIoHandle);
return true;
}
}
- if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX) {
+ if (isSingleDeviceType(deviceTypes, AUDIO_DEVICE_OUT_TELEPHONY_TX)) {
ALOGV("max gain when output device is telephony tx");
return true;
}
@@ -391,12 +406,12 @@
bool SwAudioOutputDescriptor::setVolume(float volumeDb,
VolumeSource vs, const StreamTypeVector &streamTypes,
- audio_devices_t device,
+ const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force)
{
StreamTypeVector streams = streamTypes;
- if (!AudioOutputDescriptor::setVolume(volumeDb, vs, streamTypes, device, delayMs, force)) {
+ if (!AudioOutputDescriptor::setVolume(volumeDb, vs, streamTypes, deviceTypes, delayMs, force)) {
return false;
}
if (streams.empty()) {
@@ -406,7 +421,7 @@
// APM loops on all group, so filter on active group to set the port gain,
// let the other groups set the stream volume as per legacy
// TODO: Pass in the device address and check against it.
- if (device == devicePort->type() &&
+ if (isSingleDeviceType(deviceTypes, devicePort->type()) &&
devicePort->hasGainController(true) && isActive(vs)) {
ALOGV("%s: device %s has gain controller", __func__, devicePort->toString().c_str());
// @todo: here we might be in trouble if the SwOutput has several active clients with
@@ -452,8 +467,11 @@
audio_io_handle_t *output)
{
mDevices = devices;
- const String8& address = devices.getFirstValidAddress();
- audio_devices_t device = devices.types();
+ sp<DeviceDescriptor> device = devices.getDeviceForOpening();
+ LOG_ALWAYS_FATAL_IF(device == nullptr,
+ "%s failed to get device descriptor for opening "
+ "with the requested devices, all device types: %s",
+ __func__, dumpDeviceTypes(devices.types()).c_str());
audio_config_t lConfig;
if (config == nullptr) {
@@ -483,27 +501,25 @@
mFlags = (audio_output_flags_t)(mFlags | flags);
ALOGV("opening output for device %s profile %p name %s",
- mDevices.toString().c_str(), mProfile.get(), mProfile->getName().string());
+ mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
output,
&lConfig,
- &device,
- address,
+ device,
&mLatency,
mFlags);
- LOG_ALWAYS_FATAL_IF(mDevices.types() != device,
- "%s openOutput returned device %08x when given device %08x",
- __FUNCTION__, mDevices.types(), device);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
- "%s openOutput returned output handle %d for device %08x",
- __FUNCTION__, *output, device);
+ "%s openOutput returned output handle %d for device %s, "
+ "selected device %s for opening",
+ __FUNCTION__, *output, devices.toString().c_str(),
+ device->toString().c_str());
mSamplingRate = lConfig.sample_rate;
mChannelMask = lConfig.channel_mask;
mFormat = lConfig.format;
- mId = AudioPort::getNextUniqueId();
+ mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
}
@@ -589,7 +605,7 @@
return INVALID_OPERATION;
}
- mId = AudioPort::getNextUniqueId();
+ mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *ioHandle;
mOutput1 = output1;
mOutput2 = output2;
@@ -632,12 +648,12 @@
bool HwAudioOutputDescriptor::setVolume(float volumeDb,
VolumeSource volumeSource, const StreamTypeVector &streams,
- audio_devices_t device,
+ const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force)
{
- bool changed =
- AudioOutputDescriptor::setVolume(volumeDb, volumeSource, streams, device, delayMs, force);
+ bool changed = AudioOutputDescriptor::setVolume(
+ volumeDb, volumeSource, streams, deviceTypes, delayMs, force);
if (changed) {
// TODO: use gain controller on source device if any to adjust volume
@@ -664,7 +680,8 @@
for (size_t i = 0; i < this->size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
if (outputDesc->isActive(volumeSource, inPastMs, sysTime)
- && ((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) == 0)) {
+ && (!(outputDesc->devices()
+ .containsDeviceAmongTypes(APM_AUDIO_OUT_DEVICE_REMOTE_ALL)))) {
return true;
}
}
@@ -676,7 +693,7 @@
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < size(); i++) {
const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
- if (((outputDesc->devices().types() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
+ if (outputDesc->devices().containsDeviceAmongTypes(APM_AUDIO_OUT_DEVICE_REMOTE_ALL) &&
outputDesc->isActive(volumeSource, inPastMs, sysTime)) {
// do not consider re routing (when the output is going to a dynamic policy)
// as "remote playback"
@@ -707,9 +724,8 @@
for (size_t i = 0; i < size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
if (!outputDesc->isDuplicated() &&
- outputDesc->devices().types() & AUDIO_DEVICE_OUT_ALL_A2DP &&
- outputDesc->deviceSupportsEncodedFormats(
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP)) {
+ outputDesc->devices().containsDeviceAmongTypes(getAudioDeviceOutAllA2dpSet()) &&
+ outputDesc->devicesSupportEncodedFormats(getAudioDeviceOutAllA2dpSet())) {
return this->keyAt(i);
}
}
@@ -725,7 +741,7 @@
sp<HwModule> primaryHwModule = primaryOutput->mProfile->getModule();
for (const auto &outputProfile : primaryHwModule->getOutputProfiles()) {
- if (outputProfile->supportsDeviceTypes(AUDIO_DEVICE_OUT_ALL_A2DP)) {
+ if (outputProfile->supportsDeviceTypes(getAudioDeviceOutAllA2dpSet())) {
return true;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 0221348..6f8ea36 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -20,7 +20,7 @@
#include "AudioPolicyMix.h"
#include "TypeConverter.h"
#include "HwModule.h"
-#include "AudioPort.h"
+#include "PolicyAudioPort.h"
#include "IOProfile.h"
#include <AudioOutputDescriptor.h>
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
deleted file mode 100644
index ff32284..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "APM::AudioPort"
-//#define LOG_NDEBUG 0
-#include "TypeConverter.h"
-#include "AudioPort.h"
-#include "HwModule.h"
-#include <policy.h>
-
-#ifndef ARRAY_SIZE
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-#endif
-
-namespace android {
-
-// --- AudioPort class implementation
-void AudioPort::attach(const sp<HwModule>& module)
-{
- ALOGV("%s: attaching module %s to port %s", __FUNCTION__, getModuleName(), mName.string());
- mModule = module;
-}
-
-void AudioPort::detach()
-{
- mModule = nullptr;
-}
-
-// Note that is a different namespace than AudioFlinger unique IDs
-audio_port_handle_t AudioPort::getNextUniqueId()
-{
- return getNextHandle();
-}
-
-audio_module_handle_t AudioPort::getModuleHandle() const
-{
- return mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
-}
-
-uint32_t AudioPort::getModuleVersionMajor() const
-{
- return mModule != 0 ? mModule->getHalVersionMajor() : 0;
-}
-
-const char *AudioPort::getModuleName() const
-{
- return mModule != 0 ? mModule->getName() : "invalid module";
-}
-
-void AudioPort::toAudioPort(struct audio_port *port) const
-{
- // TODO: update this function once audio_port structure reflects the new profile definition.
- // For compatibility reason: flatening the AudioProfile into audio_port structure.
- FormatSet flatenedFormats;
- SampleRateSet flatenedRates;
- ChannelMaskSet flatenedChannels;
- for (const auto& profile : mProfiles) {
- if (profile->isValid()) {
- audio_format_t formatToExport = profile->getFormat();
- const SampleRateSet &ratesToExport = profile->getSampleRates();
- const ChannelMaskSet &channelsToExport = profile->getChannels();
-
- flatenedFormats.insert(formatToExport);
- flatenedRates.insert(ratesToExport.begin(), ratesToExport.end());
- flatenedChannels.insert(channelsToExport.begin(), channelsToExport.end());
-
- if (flatenedRates.size() > AUDIO_PORT_MAX_SAMPLING_RATES ||
- flatenedChannels.size() > AUDIO_PORT_MAX_CHANNEL_MASKS ||
- flatenedFormats.size() > AUDIO_PORT_MAX_FORMATS) {
- ALOGE("%s: bailing out: cannot export profiles to port config", __FUNCTION__);
- return;
- }
- }
- }
- port->role = mRole;
- port->type = mType;
- strlcpy(port->name, mName, AUDIO_PORT_MAX_NAME_LEN);
- port->num_sample_rates = flatenedRates.size();
- port->num_channel_masks = flatenedChannels.size();
- port->num_formats = flatenedFormats.size();
- std::copy(flatenedRates.begin(), flatenedRates.end(), port->sample_rates);
- std::copy(flatenedChannels.begin(), flatenedChannels.end(), port->channel_masks);
- std::copy(flatenedFormats.begin(), flatenedFormats.end(), port->formats);
-
- ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
-
- port->num_gains = std::min(mGains.size(), (size_t) AUDIO_PORT_MAX_GAINS);
- for (size_t i = 0; i < port->num_gains; i++) {
- port->gains[i] = mGains[i]->getGain();
- }
-}
-
-void AudioPort::importAudioPort(const sp<AudioPort>& port, bool force __unused)
-{
- for (const auto& profileToImport : port->mProfiles) {
- if (profileToImport->isValid()) {
- // Import only valid port, i.e. valid format, non empty rates and channels masks
- bool hasSameProfile = false;
- for (const auto& profile : mProfiles) {
- if (*profile == *profileToImport) {
- // never import a profile twice
- hasSameProfile = true;
- break;
- }
- }
- if (hasSameProfile) { // never import a same profile twice
- continue;
- }
- addAudioProfile(profileToImport);
- }
- }
-}
-
-status_t AudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
-{
- status_t status = NO_ERROR;
- auto config_mask = config->config_mask;
- if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
- config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
- status = checkGain(&config->gain, config->gain.index);
- if (status != NO_ERROR) {
- return status;
- }
- }
- if (config_mask != 0) {
- // TODO should we check sample_rate / channel_mask / format separately?
- status = mProfiles.checkExactProfile(config->sample_rate,
- config->channel_mask,
- config->format);
- }
- return status;
-}
-
-void AudioPort::pickSamplingRate(uint32_t &pickedRate,const SampleRateSet &samplingRates) const
-{
- pickedRate = 0;
- // For direct outputs, pick minimum sampling rate: this helps ensuring that the
- // channel count / sampling rate combination chosen will be supported by the connected
- // sink
- if (isDirectOutput()) {
- uint32_t samplingRate = UINT_MAX;
- for (const auto rate : samplingRates) {
- if ((rate < samplingRate) && (rate > 0)) {
- samplingRate = rate;
- }
- }
- pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
- } else {
- uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
-
- // For mixed output and inputs, use max mixer sampling rates. Do not
- // limit sampling rate otherwise
- // For inputs, also see checkCompatibleSamplingRate().
- if (mType != AUDIO_PORT_TYPE_MIX) {
- maxRate = UINT_MAX;
- }
- // TODO: should mSamplingRates[] be ordered in terms of our preference
- // and we return the first (and hence most preferred) match? This is of concern if
- // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
- for (const auto rate : samplingRates) {
- if ((rate > pickedRate) && (rate <= maxRate)) {
- pickedRate = rate;
- }
- }
- }
-}
-
-void AudioPort::pickChannelMask(audio_channel_mask_t &pickedChannelMask,
- const ChannelMaskSet &channelMasks) const
-{
- pickedChannelMask = AUDIO_CHANNEL_NONE;
- // For direct outputs, pick minimum channel count: this helps ensuring that the
- // channel count / sampling rate combination chosen will be supported by the connected
- // sink
- if (isDirectOutput()) {
- uint32_t channelCount = UINT_MAX;
- for (const auto channelMask : channelMasks) {
- uint32_t cnlCount;
- if (useInputChannelMask()) {
- cnlCount = audio_channel_count_from_in_mask(channelMask);
- } else {
- cnlCount = audio_channel_count_from_out_mask(channelMask);
- }
- if ((cnlCount < channelCount) && (cnlCount > 0)) {
- pickedChannelMask = channelMask;
- channelCount = cnlCount;
- }
- }
- } else {
- uint32_t channelCount = 0;
- uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
-
- // For mixed output and inputs, use max mixer channel count. Do not
- // limit channel count otherwise
- if (mType != AUDIO_PORT_TYPE_MIX) {
- maxCount = UINT_MAX;
- }
- for (const auto channelMask : channelMasks) {
- uint32_t cnlCount;
- if (useInputChannelMask()) {
- cnlCount = audio_channel_count_from_in_mask(channelMask);
- } else {
- cnlCount = audio_channel_count_from_out_mask(channelMask);
- }
- if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
- pickedChannelMask = channelMask;
- channelCount = cnlCount;
- }
- }
- }
-}
-
-/* format in order of increasing preference */
-const audio_format_t AudioPort::sPcmFormatCompareTable[] = {
- AUDIO_FORMAT_DEFAULT,
- AUDIO_FORMAT_PCM_16_BIT,
- AUDIO_FORMAT_PCM_8_24_BIT,
- AUDIO_FORMAT_PCM_24_BIT_PACKED,
- AUDIO_FORMAT_PCM_32_BIT,
- AUDIO_FORMAT_PCM_FLOAT,
-};
-
-int AudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
-{
- // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
- // compressed format and better than any PCM format. This is by design of pickFormat()
- if (!audio_is_linear_pcm(format1)) {
- if (!audio_is_linear_pcm(format2)) {
- return 0;
- }
- return 1;
- }
- if (!audio_is_linear_pcm(format2)) {
- return -1;
- }
-
- int index1 = -1, index2 = -1;
- for (size_t i = 0;
- (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
- i ++) {
- if (sPcmFormatCompareTable[i] == format1) {
- index1 = i;
- }
- if (sPcmFormatCompareTable[i] == format2) {
- index2 = i;
- }
- }
- // format1 not found => index1 < 0 => format2 > format1
- // format2 not found => index2 < 0 => format2 < format1
- return index1 - index2;
-}
-
-uint32_t AudioPort::formatDistance(audio_format_t format1, audio_format_t format2)
-{
- if (format1 == format2) {
- return 0;
- }
- if (format1 == AUDIO_FORMAT_INVALID || format2 == AUDIO_FORMAT_INVALID) {
- return kFormatDistanceMax;
- }
- int diffBytes = (int)audio_bytes_per_sample(format1) -
- audio_bytes_per_sample(format2);
-
- return abs(diffBytes);
-}
-
-bool AudioPort::isBetterFormatMatch(audio_format_t newFormat,
- audio_format_t currentFormat,
- audio_format_t targetFormat)
-{
- return formatDistance(newFormat, targetFormat) < formatDistance(currentFormat, targetFormat);
-}
-
-void AudioPort::pickAudioProfile(uint32_t &samplingRate,
- audio_channel_mask_t &channelMask,
- audio_format_t &format) const
-{
- format = AUDIO_FORMAT_DEFAULT;
- samplingRate = 0;
- channelMask = AUDIO_CHANNEL_NONE;
-
- // special case for uninitialized dynamic profile
- if (!mProfiles.hasValidProfile()) {
- return;
- }
- audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
- // For mixed output and inputs, use best mixer output format.
- // Do not limit format otherwise
- if ((mType != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
- bestFormat = AUDIO_FORMAT_INVALID;
- }
-
- for (size_t i = 0; i < mProfiles.size(); i ++) {
- if (!mProfiles[i]->isValid()) {
- continue;
- }
- audio_format_t formatToCompare = mProfiles[i]->getFormat();
- if ((compareFormats(formatToCompare, format) > 0) &&
- (compareFormats(formatToCompare, bestFormat) <= 0)) {
- uint32_t pickedSamplingRate = 0;
- audio_channel_mask_t pickedChannelMask = AUDIO_CHANNEL_NONE;
- pickChannelMask(pickedChannelMask, mProfiles[i]->getChannels());
- pickSamplingRate(pickedSamplingRate, mProfiles[i]->getSampleRates());
-
- if (formatToCompare != AUDIO_FORMAT_DEFAULT && pickedChannelMask != AUDIO_CHANNEL_NONE
- && pickedSamplingRate != 0) {
- format = formatToCompare;
- channelMask = pickedChannelMask;
- samplingRate = pickedSamplingRate;
- // TODO: shall we return on the first one or still trying to pick a better Profile?
- }
- }
- }
- ALOGV("%s Port[nm:%s] profile rate=%d, format=%d, channels=%d", __FUNCTION__, mName.string(),
- samplingRate, channelMask, format);
-}
-
-status_t AudioPort::checkGain(const struct audio_gain_config *gainConfig, int index) const
-{
- if (index < 0 || (size_t)index >= mGains.size()) {
- return BAD_VALUE;
- }
- return mGains[index]->checkConfig(gainConfig);
-}
-
-void AudioPort::dump(String8 *dst, int spaces, bool verbose) const
-{
- if (!mName.isEmpty()) {
- dst->appendFormat("%*s- name: %s\n", spaces, "", mName.string());
- }
- if (verbose) {
- mProfiles.dump(dst, spaces);
-
- if (mGains.size() != 0) {
- dst->appendFormat("%*s- gains:\n", spaces, "");
- for (size_t i = 0; i < mGains.size(); i++) {
- std::string gainStr;
- mGains[i]->dump(&gainStr, spaces + 2, i);
- dst->append(gainStr.c_str());
- }
- }
- }
-}
-
-void AudioPort::log(const char* indent) const
-{
- ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.string(), mType, mRole);
-}
-
-// --- AudioPortConfig class implementation
-
-status_t AudioPortConfig::applyAudioPortConfig(const struct audio_port_config *config,
- struct audio_port_config *backupConfig)
-{
- struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
- status_t status = NO_ERROR;
-
- toAudioPortConfig(&localBackupConfig);
-
- sp<AudioPort> audioport = getAudioPort();
- if (audioport == 0) {
- status = NO_INIT;
- goto exit;
- }
- status = audioport->checkExactAudioProfile(config);
- if (status != NO_ERROR) {
- goto exit;
- }
- if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- mSamplingRate = config->sample_rate;
- }
- if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- mChannelMask = config->channel_mask;
- }
- if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- mFormat = config->format;
- }
- if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
- mGain = config->gain;
- }
- if (config->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
- mFlags = config->flags;
- }
-
-exit:
- if (status != NO_ERROR) {
- applyAudioPortConfig(&localBackupConfig);
- }
- if (backupConfig != NULL) {
- *backupConfig = localBackupConfig;
- }
- return status;
-}
-
-namespace {
-
-template<typename T>
-void updateField(
- const T& portConfigField, T audio_port_config::*port_config_field,
- struct audio_port_config *dstConfig, const struct audio_port_config *srcConfig,
- unsigned int configMask, T defaultValue)
-{
- if (dstConfig->config_mask & configMask) {
- if ((srcConfig != nullptr) && (srcConfig->config_mask & configMask)) {
- dstConfig->*port_config_field = srcConfig->*port_config_field;
- } else {
- dstConfig->*port_config_field = portConfigField;
- }
- } else {
- dstConfig->*port_config_field = defaultValue;
- }
-}
-
-} // namespace
-
-void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig) const
-{
- updateField(mSamplingRate, &audio_port_config::sample_rate,
- dstConfig, srcConfig, AUDIO_PORT_CONFIG_SAMPLE_RATE, 0u);
- updateField(mChannelMask, &audio_port_config::channel_mask,
- dstConfig, srcConfig, AUDIO_PORT_CONFIG_CHANNEL_MASK,
- (audio_channel_mask_t)AUDIO_CHANNEL_NONE);
- updateField(mFormat, &audio_port_config::format,
- dstConfig, srcConfig, AUDIO_PORT_CONFIG_FORMAT, AUDIO_FORMAT_INVALID);
-
- sp<AudioPort> audioport = getAudioPort();
- if ((dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) && audioport != NULL) {
- dstConfig->gain = mGain;
- if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)
- && audioport->checkGain(&srcConfig->gain, srcConfig->gain.index) == OK) {
- dstConfig->gain = srcConfig->gain;
- }
- } else {
- dstConfig->gain.index = -1;
- }
- if (dstConfig->gain.index != -1) {
- dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
- } else {
- dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
- }
-
- updateField(mFlags, &audio_port_config::flags,
- dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
-}
-
-bool AudioPortConfig::hasGainController(bool canUseForVolume) const
-{
- sp<AudioPort> audioport = getAudioPort();
- if (audioport == nullptr) {
- return false;
- }
- return canUseForVolume ? audioport->getGains().canUseForVolume()
- : audioport->getGains().size() > 0;
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
deleted file mode 100644
index d1082e8..0000000
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <algorithm>
-#include <set>
-#include <string>
-
-#define LOG_TAG "APM::AudioProfile"
-//#define LOG_NDEBUG 0
-
-#include <media/AudioContainers.h>
-#include <media/AudioResamplerPublic.h>
-#include <utils/Errors.h>
-
-#include "AudioPort.h"
-#include "AudioProfile.h"
-#include "HwModule.h"
-#include "TypeConverter.h"
-
-namespace android {
-
-bool operator == (const AudioProfile &left, const AudioProfile &compareTo)
-{
- return (left.getFormat() == compareTo.getFormat()) &&
- (left.getChannels() == compareTo.getChannels()) &&
- (left.getSampleRates() == compareTo.getSampleRates());
-}
-
-static AudioProfile* createFullDynamicImpl()
-{
- AudioProfile* dynamicProfile = new AudioProfile(gDynamicFormat,
- ChannelMaskSet(), SampleRateSet());
- dynamicProfile->setDynamicFormat(true);
- dynamicProfile->setDynamicChannels(true);
- dynamicProfile->setDynamicRate(true);
- return dynamicProfile;
-}
-
-// static
-sp<AudioProfile> AudioProfile::createFullDynamic()
-{
- static sp<AudioProfile> dynamicProfile = createFullDynamicImpl();
- return dynamicProfile;
-}
-
-AudioProfile::AudioProfile(audio_format_t format,
- audio_channel_mask_t channelMasks,
- uint32_t samplingRate) :
- mName(String8("")),
- mFormat(format)
-{
- mChannelMasks.insert(channelMasks);
- mSamplingRates.insert(samplingRate);
-}
-
-AudioProfile::AudioProfile(audio_format_t format,
- const ChannelMaskSet &channelMasks,
- const SampleRateSet &samplingRateCollection) :
- mName(String8("")),
- mFormat(format),
- mChannelMasks(channelMasks),
- mSamplingRates(samplingRateCollection) {}
-
-void AudioProfile::setChannels(const ChannelMaskSet &channelMasks)
-{
- if (mIsDynamicChannels) {
- mChannelMasks = channelMasks;
- }
-}
-
-void AudioProfile::setSampleRates(const SampleRateSet &sampleRates)
-{
- if (mIsDynamicRate) {
- mSamplingRates = sampleRates;
- }
-}
-
-void AudioProfile::clear()
-{
- if (mIsDynamicChannels) {
- mChannelMasks.clear();
- }
- if (mIsDynamicRate) {
- mSamplingRates.clear();
- }
-}
-
-status_t AudioProfile::checkExact(uint32_t samplingRate, audio_channel_mask_t channelMask,
- audio_format_t format) const
-{
- if (audio_formats_match(format, mFormat) &&
- supportsChannels(channelMask) &&
- supportsRate(samplingRate)) {
- return NO_ERROR;
- }
- return BAD_VALUE;
-}
-
-status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
- uint32_t &updatedSamplingRate) const
-{
- ALOG_ASSERT(samplingRate > 0);
-
- if (mSamplingRates.empty()) {
- updatedSamplingRate = samplingRate;
- return NO_ERROR;
- }
-
- // Search for the closest supported sampling rate that is above (preferred)
- // or below (acceptable) the desired sampling rate, within a permitted ratio.
- // The sampling rates are sorted in ascending order.
- auto desiredRate = mSamplingRates.lower_bound(samplingRate);
-
- // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
- if (desiredRate != mSamplingRates.end()) {
- if (*desiredRate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
- updatedSamplingRate = *desiredRate;
- return NO_ERROR;
- }
- }
- // But if we have to up-sample from a lower sampling rate, that's OK.
- if (desiredRate != mSamplingRates.begin()) {
- uint32_t candidate = *(--desiredRate);
- if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
- updatedSamplingRate = candidate;
- return NO_ERROR;
- }
- }
- // leave updatedSamplingRate unmodified
- return BAD_VALUE;
-}
-
-status_t AudioProfile::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
- audio_channel_mask_t &updatedChannelMask,
- audio_port_type_t portType,
- audio_port_role_t portRole) const
-{
- if (mChannelMasks.empty()) {
- updatedChannelMask = channelMask;
- return NO_ERROR;
- }
- const bool isRecordThread = portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK;
- const bool isIndex = audio_channel_mask_get_representation(channelMask)
- == AUDIO_CHANNEL_REPRESENTATION_INDEX;
- const uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
- int bestMatch = 0;
- for (const auto &supported : mChannelMasks) {
- if (supported == channelMask) {
- // Exact matches always taken.
- updatedChannelMask = channelMask;
- return NO_ERROR;
- }
-
- // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
- if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
- // Approximate (best) match:
- // The match score measures how well the supported channel mask matches the
- // desired mask, where increasing-is-better.
- //
- // TODO: Some tweaks may be needed.
- // Should be a static function of the data processing library.
- //
- // In priority:
- // match score = 1000 if legacy channel conversion equivalent (always prefer this)
- // OR
- // match score += 100 if the channel mask representations match
- // match score += number of channels matched.
- // match score += 100 if the channel mask representations DO NOT match
- // but the profile has positional channel mask and less than 2 channels.
- // This is for audio HAL convention to not list index masks for less than 2 channels
- //
- // If there are no matched channels, the mask may still be accepted
- // but the playback or record will be silent.
- const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
- == AUDIO_CHANNEL_REPRESENTATION_INDEX);
- const uint32_t supportedChannelCount = audio_channel_count_from_in_mask(supported);
- int match;
- if (isIndex && isSupportedIndex) {
- // index equivalence
- match = 100 + __builtin_popcount(
- audio_channel_mask_get_bits(channelMask)
- & audio_channel_mask_get_bits(supported));
- } else if (isIndex && !isSupportedIndex) {
- const uint32_t equivalentBits = (1 << supportedChannelCount) - 1 ;
- match = __builtin_popcount(
- audio_channel_mask_get_bits(channelMask) & equivalentBits);
- if (supportedChannelCount <= FCC_2) {
- match += 100;
- }
- } else if (!isIndex && isSupportedIndex) {
- const uint32_t equivalentBits = (1 << channelCount) - 1;
- match = __builtin_popcount(
- equivalentBits & audio_channel_mask_get_bits(supported));
- } else {
- // positional equivalence
- match = 100 + __builtin_popcount(
- audio_channel_mask_get_bits(channelMask)
- & audio_channel_mask_get_bits(supported));
- switch (supported) {
- case AUDIO_CHANNEL_IN_FRONT_BACK:
- case AUDIO_CHANNEL_IN_STEREO:
- if (channelMask == AUDIO_CHANNEL_IN_MONO) {
- match = 1000;
- }
- break;
- case AUDIO_CHANNEL_IN_MONO:
- if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
- || channelMask == AUDIO_CHANNEL_IN_STEREO) {
- match = 1000;
- }
- break;
- default:
- break;
- }
- }
- if (match > bestMatch) {
- bestMatch = match;
- updatedChannelMask = supported;
- }
- }
- }
- return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
-}
-
-void AudioProfile::dump(String8 *dst, int spaces) const
-{
- dst->appendFormat("%s%s%s\n", mIsDynamicFormat ? "[dynamic format]" : "",
- mIsDynamicChannels ? "[dynamic channels]" : "",
- mIsDynamicRate ? "[dynamic rates]" : "");
- if (mName.length() != 0) {
- dst->appendFormat("%*s- name: %s\n", spaces, "", mName.string());
- }
- std::string formatLiteral;
- if (FormatConverter::toString(mFormat, formatLiteral)) {
- dst->appendFormat("%*s- format: %s\n", spaces, "", formatLiteral.c_str());
- }
- if (!mSamplingRates.empty()) {
- dst->appendFormat("%*s- sampling rates:", spaces, "");
- for (auto it = mSamplingRates.begin(); it != mSamplingRates.end();) {
- dst->appendFormat("%d", *it);
- dst->append(++it == mSamplingRates.end() ? "" : ", ");
- }
- dst->append("\n");
- }
-
- if (!mChannelMasks.empty()) {
- dst->appendFormat("%*s- channel masks:", spaces, "");
- for (auto it = mChannelMasks.begin(); it != mChannelMasks.end();) {
- dst->appendFormat("0x%04x", *it);
- dst->append(++it == mChannelMasks.end() ? "" : ", ");
- }
- dst->append("\n");
- }
-}
-
-ssize_t AudioProfileVector::add(const sp<AudioProfile> &profile)
-{
- ssize_t index = size();
- push_back(profile);
- // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
- std::sort(begin(), end(),
- [](const sp<AudioProfile> & a, const sp<AudioProfile> & b)
- {
- return AudioPort::compareFormats(a->getFormat(), b->getFormat()) < 0;
- });
- return index;
-}
-
-ssize_t AudioProfileVector::addProfileFromHal(const sp<AudioProfile> &profileToAdd)
-{
- // Check valid profile to add:
- if (!profileToAdd->hasValidFormat()) {
- return -1;
- }
- if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
- FormatVector formats;
- formats.push_back(profileToAdd->getFormat());
- setFormats(FormatVector(formats));
- return 0;
- }
- if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
- setSampleRatesFor(profileToAdd->getSampleRates(), profileToAdd->getFormat());
- return 0;
- }
- if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
- setChannelsFor(profileToAdd->getChannels(), profileToAdd->getFormat());
- return 0;
- }
- // Go through the list of profile to avoid duplicates
- for (size_t profileIndex = 0; profileIndex < size(); profileIndex++) {
- const sp<AudioProfile> &profile = at(profileIndex);
- if (profile->isValid() && profile == profileToAdd) {
- // Nothing to do
- return profileIndex;
- }
- }
- profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
- return add(profileToAdd);
-}
-
-status_t AudioProfileVector::checkExactProfile(uint32_t samplingRate,
- audio_channel_mask_t channelMask,
- audio_format_t format) const
-{
- if (empty()) {
- return NO_ERROR;
- }
-
- for (const auto& profile : *this) {
- if (profile->checkExact(samplingRate, channelMask, format) == NO_ERROR) {
- return NO_ERROR;
- }
- }
- return BAD_VALUE;
-}
-
-status_t AudioProfileVector::checkCompatibleProfile(uint32_t &samplingRate,
- audio_channel_mask_t &channelMask,
- audio_format_t &format,
- audio_port_type_t portType,
- audio_port_role_t portRole) const
-{
- if (empty()) {
- return NO_ERROR;
- }
-
- const bool checkInexact = // when port is input and format is linear pcm
- portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK
- && audio_is_linear_pcm(format);
-
- // iterate from best format to worst format (reverse order)
- for (ssize_t i = size() - 1; i >= 0 ; --i) {
- const sp<AudioProfile> profile = at(i);
- audio_format_t formatToCompare = profile->getFormat();
- if (formatToCompare == format ||
- (checkInexact
- && formatToCompare != AUDIO_FORMAT_DEFAULT
- && audio_is_linear_pcm(formatToCompare))) {
- // Compatible profile has been found, checks if this profile has compatible
- // rate and channels as well
- audio_channel_mask_t updatedChannels;
- uint32_t updatedRate;
- if (profile->checkCompatibleChannelMask(channelMask, updatedChannels,
- portType, portRole) == NO_ERROR &&
- profile->checkCompatibleSamplingRate(samplingRate, updatedRate) == NO_ERROR) {
- // for inexact checks we take the first linear pcm format due to sorting.
- format = formatToCompare;
- channelMask = updatedChannels;
- samplingRate = updatedRate;
- return NO_ERROR;
- }
- }
- }
- return BAD_VALUE;
-}
-
-void AudioProfileVector::clearProfiles()
-{
- for (auto it = begin(); it != end();) {
- if ((*it)->isDynamicFormat() && (*it)->hasValidFormat()) {
- it = erase(it);
- } else {
- (*it)->clear();
- ++it;
- }
- }
-}
-
-// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
-// The result is ordered according to 'order'.
-template<typename T, typename Order>
-std::vector<typename T::value_type> intersectFilterAndOrder(
- const T& input1, const T& input2, const Order& order)
-{
- std::set<typename T::value_type> set1{input1.begin(), input1.end()};
- std::set<typename T::value_type> set2{input2.begin(), input2.end()};
- std::set<typename T::value_type> common;
- std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
- std::inserter(common, common.begin()));
- std::vector<typename T::value_type> result;
- for (const auto& e : order) {
- if (common.find(e) != common.end()) result.push_back(e);
- }
- return result;
-}
-
-// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
-// 'comp' is a comparator function.
-template<typename T, typename Compare>
-std::vector<typename T::value_type> intersectAndOrder(
- const T& input1, const T& input2, Compare comp)
-{
- std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
- std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
- std::vector<typename T::value_type> result;
- std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
- std::back_inserter(result), comp);
- return result;
-}
-
-status_t AudioProfileVector::findBestMatchingOutputConfig(const AudioProfileVector& outputProfiles,
- const std::vector<audio_format_t>& preferredFormats,
- const std::vector<audio_channel_mask_t>& preferredOutputChannels,
- bool preferHigherSamplingRates,
- audio_config_base *bestOutputConfig) const
-{
- auto formats = intersectFilterAndOrder(getSupportedFormats(),
- outputProfiles.getSupportedFormats(), preferredFormats);
- // Pick the best compatible profile.
- for (const auto& f : formats) {
- sp<AudioProfile> inputProfile = getFirstValidProfileFor(f);
- sp<AudioProfile> outputProfile = outputProfiles.getFirstValidProfileFor(f);
- if (inputProfile == nullptr || outputProfile == nullptr) {
- continue;
- }
- auto channels = intersectFilterAndOrder(asOutMask(inputProfile->getChannels()),
- outputProfile->getChannels(), preferredOutputChannels);
- if (channels.empty()) {
- continue;
- }
- auto sampleRates = preferHigherSamplingRates ?
- intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
- std::greater<typename SampleRateSet::value_type>()) :
- intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
- std::less<typename SampleRateSet::value_type>());
- if (sampleRates.empty()) {
- continue;
- }
- ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
- __func__, *channels.begin(), *sampleRates.begin(), f);
- bestOutputConfig->format = f;
- bestOutputConfig->sample_rate = *sampleRates.begin();
- bestOutputConfig->channel_mask = *channels.begin();
- return NO_ERROR;
- }
- return BAD_VALUE;
-}
-
-sp<AudioProfile> AudioProfileVector::getFirstValidProfile() const
-{
- for (const auto &profile : *this) {
- if (profile->isValid()) {
- return profile;
- }
- }
- return nullptr;
-}
-
-sp<AudioProfile> AudioProfileVector::getFirstValidProfileFor(audio_format_t format) const
-{
- for (const auto &profile : *this) {
- if (profile->isValid() && profile->getFormat() == format) {
- return profile;
- }
- }
- return nullptr;
-}
-
-FormatVector AudioProfileVector::getSupportedFormats() const
-{
- FormatVector supportedFormats;
- for (const auto &profile : *this) {
- if (profile->hasValidFormat()) {
- supportedFormats.push_back(profile->getFormat());
- }
- }
- return supportedFormats;
-}
-
-bool AudioProfileVector::hasDynamicChannelsFor(audio_format_t format) const
-{
- for (const auto &profile : *this) {
- if (profile->getFormat() == format && profile->isDynamicChannels()) {
- return true;
- }
- }
- return false;
-}
-
-bool AudioProfileVector::hasDynamicProfile() const
-{
- for (const auto &profile : *this) {
- if (profile->isDynamic()) {
- return true;
- }
- }
- return false;
-}
-
-bool AudioProfileVector::hasDynamicRateFor(audio_format_t format) const
-{
- for (const auto &profile : *this) {
- if (profile->getFormat() == format && profile->isDynamicRate()) {
- return true;
- }
- }
- return false;
-}
-
-void AudioProfileVector::setFormats(const FormatVector &formats)
-{
- // Only allow to change the format of dynamic profile
- sp<AudioProfile> dynamicFormatProfile = getProfileFor(gDynamicFormat);
- if (dynamicFormatProfile == 0) {
- return;
- }
- for (const auto &format : formats) {
- sp<AudioProfile> profile = new AudioProfile(format,
- dynamicFormatProfile->getChannels(),
- dynamicFormatProfile->getSampleRates());
- profile->setDynamicFormat(true);
- profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
- profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
- add(profile);
- }
-}
-
-void AudioProfileVector::dump(String8 *dst, int spaces) const
-{
- dst->appendFormat("%*s- Profiles:\n", spaces, "");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat("%*sProfile %zu:", spaces + 4, "", i);
- at(i)->dump(dst, spaces + 8);
- }
-}
-
-sp<AudioProfile> AudioProfileVector::getProfileFor(audio_format_t format) const
-{
- for (const auto &profile : *this) {
- if (profile->getFormat() == format) {
- return profile;
- }
- }
- return nullptr;
-}
-
-void AudioProfileVector::setSampleRatesFor(
- const SampleRateSet &sampleRates, audio_format_t format)
-{
- for (const auto &profile : *this) {
- if (profile->getFormat() == format && profile->isDynamicRate()) {
- if (profile->hasValidRates()) {
- // Need to create a new profile with same format
- sp<AudioProfile> profileToAdd = new AudioProfile(format, profile->getChannels(),
- sampleRates);
- profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
- add(profileToAdd);
- } else {
- profile->setSampleRates(sampleRates);
- }
- return;
- }
- }
-}
-
-void AudioProfileVector::setChannelsFor(const ChannelMaskSet &channelMasks, audio_format_t format)
-{
- for (const auto &profile : *this) {
- if (profile->getFormat() == format && profile->isDynamicChannels()) {
- if (profile->hasValidChannels()) {
- // Need to create a new profile with same format
- sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMasks,
- profile->getSampleRates());
- profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
- add(profileToAdd);
- } else {
- profile->setChannels(channelMasks);
- }
- return;
- }
- }
-}
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp
new file mode 100644
index 0000000..8ccb8b9
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfileVectorHelper.cpp
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <set>
+#include <string>
+
+#define LOG_TAG "APM::AudioProfileVectorHelper"
+//#define LOG_NDEBUG 0
+
+#include <media/AudioContainers.h>
+#include <media/AudioResamplerPublic.h>
+#include <utils/Errors.h>
+
+#include "AudioProfileVectorHelper.h"
+#include "HwModule.h"
+#include "PolicyAudioPort.h"
+#include "policy.h"
+
+namespace android {
+
+void sortAudioProfiles(AudioProfileVector &audioProfileVector) {
+ std::sort(audioProfileVector.begin(), audioProfileVector.end(),
+ [](const sp<AudioProfile> & a, const sp<AudioProfile> & b)
+ {
+ return PolicyAudioPort::compareFormats(a->getFormat(), b->getFormat()) < 0;
+ });
+}
+
+ssize_t addAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+ const sp<AudioProfile> &profile)
+{
+ ssize_t ret = audioProfileVector.add(profile);
+ // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+ sortAudioProfiles(audioProfileVector);
+ return ret;
+}
+
+sp<AudioProfile> getAudioProfileForFormat(const AudioProfileVector &audioProfileVector,
+ audio_format_t format)
+{
+ for (const auto &profile : audioProfileVector) {
+ if (profile->getFormat() == format) {
+ return profile;
+ }
+ }
+ return nullptr;
+}
+
+void setSampleRatesForAudioProfiles(AudioProfileVector &audioProfileVector,
+ const SampleRateSet &sampleRateSet,
+ audio_format_t format)
+{
+ for (const auto &profile : audioProfileVector) {
+ if (profile->getFormat() == format && profile->isDynamicRate()) {
+ if (profile->hasValidRates()) {
+ // Need to create a new profile with same format
+ sp<AudioProfile> profileToAdd = new AudioProfile(
+ format, profile->getChannels(), sampleRateSet);
+ profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+ addAudioProfileAndSort(audioProfileVector, profileToAdd);
+ } else {
+ profile->setSampleRates(sampleRateSet);
+ }
+ return;
+ }
+ }
+}
+
+void setChannelsForAudioProfiles(AudioProfileVector &audioProfileVector,
+ const ChannelMaskSet &channelMaskSet,
+ audio_format_t format)
+{
+ for (const auto &profile : audioProfileVector) {
+ if (profile->getFormat() == format && profile->isDynamicChannels()) {
+ if (profile->hasValidChannels()) {
+ // Need to create a new profile with same format
+ sp<AudioProfile> profileToAdd = new AudioProfile(format, channelMaskSet,
+ profile->getSampleRates());
+ profileToAdd->setDynamicFormat(true); // need to set to allow cleaning
+ addAudioProfileAndSort(audioProfileVector, profileToAdd);
+ } else {
+ profile->setChannels(channelMaskSet);
+ }
+ return;
+ }
+ }
+}
+
+void addProfilesForFormats(AudioProfileVector &audioProfileVector, const FormatVector &formatVector)
+{
+ // Only allow to change the format of dynamic profile
+ sp<AudioProfile> dynamicFormatProfile = getAudioProfileForFormat(
+ audioProfileVector, gDynamicFormat);
+ if (!dynamicFormatProfile) {
+ return;
+ }
+ for (const auto &format : formatVector) {
+ sp<AudioProfile> profile = new AudioProfile(format,
+ dynamicFormatProfile->getChannels(),
+ dynamicFormatProfile->getSampleRates());
+ profile->setDynamicFormat(true);
+ profile->setDynamicChannels(dynamicFormatProfile->isDynamicChannels());
+ profile->setDynamicRate(dynamicFormatProfile->isDynamicRate());
+ addAudioProfileAndSort(audioProfileVector, profile);
+ }
+}
+
+void addDynamicAudioProfileAndSort(AudioProfileVector &audioProfileVector,
+ const sp<AudioProfile> &profileToAdd)
+{
+ // Check valid profile to add:
+ if (!profileToAdd->hasValidFormat()) {
+ ALOGW("Adding dynamic audio profile without valid format");
+ return;
+ }
+ if (!profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+ FormatVector formats;
+ formats.push_back(profileToAdd->getFormat());
+ addProfilesForFormats(audioProfileVector, FormatVector(formats));
+ return;
+ }
+ if (!profileToAdd->hasValidChannels() && profileToAdd->hasValidRates()) {
+ setSampleRatesForAudioProfiles(
+ audioProfileVector, profileToAdd->getSampleRates(), profileToAdd->getFormat());
+ return;
+ }
+ if (profileToAdd->hasValidChannels() && !profileToAdd->hasValidRates()) {
+ setChannelsForAudioProfiles(
+ audioProfileVector, profileToAdd->getChannels(), profileToAdd->getFormat());
+ return;
+ }
+ // Go through the list of profile to avoid duplicates
+ for (size_t profileIndex = 0; profileIndex < audioProfileVector.size(); profileIndex++) {
+ const sp<AudioProfile> &profile = audioProfileVector.at(profileIndex);
+ if (profile->isValid() && profile == profileToAdd) {
+ // Nothing to do
+ return;
+ }
+ }
+ profileToAdd->setDynamicFormat(true); // set the format as dynamic to allow removal
+ addAudioProfileAndSort(audioProfileVector, profileToAdd);
+}
+
+void appendAudioProfiles(AudioProfileVector &audioProfileVector,
+ const AudioProfileVector &audioProfileVectorToAppend)
+{
+ audioProfileVector.insert(audioProfileVector.end(),
+ audioProfileVectorToAppend.begin(),
+ audioProfileVectorToAppend.end());
+}
+
+status_t checkExact(const sp<AudioProfile> &audioProfile,
+ uint32_t samplingRate,
+ audio_channel_mask_t channelMask,
+ audio_format_t format)
+{
+ if (audio_formats_match(format, audioProfile->getFormat()) &&
+ audioProfile->supportsChannels(channelMask) &&
+ audioProfile->supportsRate(samplingRate)) {
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
+}
+
+status_t checkCompatibleSamplingRate(const sp<AudioProfile> &audioProfile,
+ uint32_t samplingRate,
+ uint32_t &updatedSamplingRate)
+{
+ ALOG_ASSERT(samplingRate > 0);
+
+ const SampleRateSet sampleRates = audioProfile->getSampleRates();
+ if (sampleRates.empty()) {
+ updatedSamplingRate = samplingRate;
+ return NO_ERROR;
+ }
+
+ // Search for the closest supported sampling rate that is above (preferred)
+ // or below (acceptable) the desired sampling rate, within a permitted ratio.
+ // The sampling rates are sorted in ascending order.
+ auto desiredRate = sampleRates.lower_bound(samplingRate);
+
+ // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
+ if (desiredRate != sampleRates.end()) {
+ if (*desiredRate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
+ updatedSamplingRate = *desiredRate;
+ return NO_ERROR;
+ }
+ }
+ // But if we have to up-sample from a lower sampling rate, that's OK.
+ if (desiredRate != sampleRates.begin()) {
+ uint32_t candidate = *(--desiredRate);
+ if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
+ updatedSamplingRate = candidate;
+ return NO_ERROR;
+ }
+ }
+ // leave updatedSamplingRate unmodified
+ return BAD_VALUE;
+}
+
+status_t checkCompatibleChannelMask(const sp<AudioProfile> &audioProfile,
+ audio_channel_mask_t channelMask,
+ audio_channel_mask_t &updatedChannelMask,
+ audio_port_type_t portType,
+ audio_port_role_t portRole)
+{
+ const ChannelMaskSet channelMasks = audioProfile->getChannels();
+ if (channelMasks.empty()) {
+ updatedChannelMask = channelMask;
+ return NO_ERROR;
+ }
+ const bool isRecordThread = portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK;
+ const bool isIndex = audio_channel_mask_get_representation(channelMask)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX;
+ const uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
+ int bestMatch = 0;
+ for (const auto &supported : channelMasks) {
+ if (supported == channelMask) {
+ // Exact matches always taken.
+ updatedChannelMask = channelMask;
+ return NO_ERROR;
+ }
+
+ // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
+ if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
+ // Approximate (best) match:
+ // The match score measures how well the supported channel mask matches the
+ // desired mask, where increasing-is-better.
+ //
+ // TODO: Some tweaks may be needed.
+ // Should be a static function of the data processing library.
+ //
+ // In priority:
+ // match score = 1000 if legacy channel conversion equivalent (always prefer this)
+ // OR
+ // match score += 100 if the channel mask representations match
+ // match score += number of channels matched.
+ // match score += 100 if the channel mask representations DO NOT match
+ // but the profile has positional channel mask and less than 2 channels.
+ // This is for audio HAL convention to not list index masks for less than 2 channels
+ //
+ // If there are no matched channels, the mask may still be accepted
+ // but the playback or record will be silent.
+ const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX);
+ const uint32_t supportedChannelCount = audio_channel_count_from_in_mask(supported);
+ int match;
+ if (isIndex && isSupportedIndex) {
+ // index equivalence
+ match = 100 + __builtin_popcount(
+ audio_channel_mask_get_bits(channelMask)
+ & audio_channel_mask_get_bits(supported));
+ } else if (isIndex && !isSupportedIndex) {
+ const uint32_t equivalentBits = (1 << supportedChannelCount) - 1 ;
+ match = __builtin_popcount(
+ audio_channel_mask_get_bits(channelMask) & equivalentBits);
+ if (supportedChannelCount <= FCC_2) {
+ match += 100;
+ }
+ } else if (!isIndex && isSupportedIndex) {
+ const uint32_t equivalentBits = (1 << channelCount) - 1;
+ match = __builtin_popcount(
+ equivalentBits & audio_channel_mask_get_bits(supported));
+ } else {
+ // positional equivalence
+ match = 100 + __builtin_popcount(
+ audio_channel_mask_get_bits(channelMask)
+ & audio_channel_mask_get_bits(supported));
+ switch (supported) {
+ case AUDIO_CHANNEL_IN_FRONT_BACK:
+ case AUDIO_CHANNEL_IN_STEREO:
+ if (channelMask == AUDIO_CHANNEL_IN_MONO) {
+ match = 1000;
+ }
+ break;
+ case AUDIO_CHANNEL_IN_MONO:
+ if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
+ || channelMask == AUDIO_CHANNEL_IN_STEREO) {
+ match = 1000;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if (match > bestMatch) {
+ bestMatch = match;
+ updatedChannelMask = supported;
+ }
+ }
+ }
+ return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
+}
+
+status_t checkExactProfile(const AudioProfileVector& audioProfileVector,
+ const uint32_t samplingRate,
+ audio_channel_mask_t channelMask,
+ audio_format_t format)
+{
+ if (audioProfileVector.empty()) {
+ return NO_ERROR;
+ }
+
+ for (const auto& profile : audioProfileVector) {
+ if (checkExact(profile, samplingRate, channelMask, format) == NO_ERROR) {
+ return NO_ERROR;
+ }
+ }
+ return BAD_VALUE;
+}
+
+status_t checkCompatibleProfile(const AudioProfileVector &audioProfileVector,
+ uint32_t &samplingRate,
+ audio_channel_mask_t &channelMask,
+ audio_format_t &format,
+ audio_port_type_t portType,
+ audio_port_role_t portRole)
+{
+ if (audioProfileVector.empty()) {
+ return NO_ERROR;
+ }
+
+ const bool checkInexact = // when port is input and format is linear pcm
+ portType == AUDIO_PORT_TYPE_MIX && portRole == AUDIO_PORT_ROLE_SINK
+ && audio_is_linear_pcm(format);
+
+ // iterate from best format to worst format (reverse order)
+ for (ssize_t i = audioProfileVector.size() - 1; i >= 0 ; --i) {
+ const sp<AudioProfile> profile = audioProfileVector.at(i);
+ audio_format_t formatToCompare = profile->getFormat();
+ if (formatToCompare == format ||
+ (checkInexact
+ && formatToCompare != AUDIO_FORMAT_DEFAULT
+ && audio_is_linear_pcm(formatToCompare))) {
+ // Compatible profile has been found, checks if this profile has compatible
+ // rate and channels as well
+ audio_channel_mask_t updatedChannels;
+ uint32_t updatedRate;
+ if (checkCompatibleChannelMask(profile, channelMask, updatedChannels,
+ portType, portRole) == NO_ERROR &&
+ checkCompatibleSamplingRate(profile, samplingRate, updatedRate) == NO_ERROR) {
+ // for inexact checks we take the first linear pcm format due to sorting.
+ format = formatToCompare;
+ channelMask = updatedChannels;
+ samplingRate = updatedRate;
+ return NO_ERROR;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
+// Returns an intersection between two possibly unsorted vectors and the contents of 'order'.
+// The result is ordered according to 'order'.
+template<typename T, typename Order>
+std::vector<typename T::value_type> intersectFilterAndOrder(
+ const T& input1, const T& input2, const Order& order)
+{
+ std::set<typename T::value_type> set1{input1.begin(), input1.end()};
+ std::set<typename T::value_type> set2{input2.begin(), input2.end()};
+ std::set<typename T::value_type> common;
+ std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+ std::inserter(common, common.begin()));
+ std::vector<typename T::value_type> result;
+ for (const auto& e : order) {
+ if (common.find(e) != common.end()) result.push_back(e);
+ }
+ return result;
+}
+
+// Intersect two possibly unsorted vectors, return common elements according to 'comp' ordering.
+// 'comp' is a comparator function.
+template<typename T, typename Compare>
+std::vector<typename T::value_type> intersectAndOrder(
+ const T& input1, const T& input2, Compare comp)
+{
+ std::set<typename T::value_type, Compare> set1{input1.begin(), input1.end(), comp};
+ std::set<typename T::value_type, Compare> set2{input2.begin(), input2.end(), comp};
+ std::vector<typename T::value_type> result;
+ std::set_intersection(set1.begin(), set1.end(), set2.begin(), set2.end(),
+ std::back_inserter(result), comp);
+ return result;
+}
+
+status_t findBestMatchingOutputConfig(
+ const AudioProfileVector &audioProfileVector,
+ const AudioProfileVector &outputProfileVector,
+ const std::vector<audio_format_t> &preferredFormatVector, // order: most pref -> least pref
+ const std::vector<audio_channel_mask_t> &preferredOutputChannelVector,
+ bool preferHigherSamplingRates,
+ audio_config_base &bestOutputConfig)
+{
+ auto formats = intersectFilterAndOrder(audioProfileVector.getSupportedFormats(),
+ outputProfileVector.getSupportedFormats(), preferredFormatVector);
+ // Pick the best compatible profile.
+ for (const auto& f : formats) {
+ sp<AudioProfile> inputProfile = audioProfileVector.getFirstValidProfileFor(f);
+ sp<AudioProfile> outputProfile = outputProfileVector.getFirstValidProfileFor(f);
+ if (inputProfile == nullptr || outputProfile == nullptr) {
+ continue;
+ }
+ auto channels = intersectFilterAndOrder(asOutMask(inputProfile->getChannels()),
+ outputProfile->getChannels(), preferredOutputChannelVector);
+ if (channels.empty()) {
+ continue;
+ }
+ auto sampleRates = preferHigherSamplingRates ?
+ intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+ std::greater<typename SampleRateSet::value_type>()) :
+ intersectAndOrder(inputProfile->getSampleRates(), outputProfile->getSampleRates(),
+ std::less<typename SampleRateSet::value_type>());
+ if (sampleRates.empty()) {
+ continue;
+ }
+ ALOGD("%s() found channel mask %#x and sample rate %d for format %#x.",
+ __func__, *channels.begin(), *sampleRates.begin(), f);
+ bestOutputConfig.format = f;
+ bestOutputConfig.sample_rate = *sampleRates.begin();
+ bestOutputConfig.channel_mask = *channels.begin();
+ return NO_ERROR;
+ }
+ return BAD_VALUE;
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
index 92cbe4e..2a18f19 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioRoute.cpp
@@ -26,25 +26,26 @@
void AudioRoute::dump(String8 *dst, int spaces) const
{
dst->appendFormat("%*s- Type: %s\n", spaces, "", mType == AUDIO_ROUTE_MUX ? "Mux" : "Mix");
- dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().string());
+ dst->appendFormat("%*s- Sink: %s\n", spaces, "", mSink->getTagName().c_str());
if (mSources.size() != 0) {
dst->appendFormat("%*s- Sources: \n", spaces, "");
for (size_t i = 0; i < mSources.size(); i++) {
- dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().string());
+ dst->appendFormat("%*s%s \n", spaces + 4, "", mSources[i]->getTagName().c_str());
}
}
dst->append("\n");
}
-bool AudioRoute::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const
+bool AudioRoute::supportsPatch(const sp<PolicyAudioPort> &srcPort,
+ const sp<PolicyAudioPort> &dstPort) const
{
if (mSink == 0 || dstPort == 0 || dstPort != mSink) {
return false;
}
- ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().string());
+ ALOGV("%s: sinks %s matching", __FUNCTION__, mSink->getTagName().c_str());
for (const auto &sourcePort : mSources) {
if (sourcePort == srcPort) {
- ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().string());
+ ALOGV("%s: sources %s matching", __FUNCTION__, sourcePort->getTagName().c_str());
return true;
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index e395caa..0587041 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -26,22 +26,16 @@
namespace android {
-DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const String8 &tagName) :
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const std::string &tagName) :
DeviceDescriptor(type, FormatVector{}, tagName)
{
}
DeviceDescriptor::DeviceDescriptor(audio_devices_t type, const FormatVector &encodedFormats,
- const String8 &tagName) :
- AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
- audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
- AUDIO_PORT_ROLE_SOURCE),
- mTagName(tagName), mDeviceType(type), mEncodedFormats(encodedFormats)
+ const std::string &tagName) :
+ DeviceDescriptorBase(type), mTagName(tagName), mEncodedFormats(encodedFormats)
{
mCurrentEncodedFormat = AUDIO_FORMAT_DEFAULT;
- if (audio_is_remote_submix_device(type)) {
- mAddress = String8("0");
- }
/* If framework runs against a pre 5.0 Audio HAL, encoded formats are absent from the config.
* FIXME: APM should know the version of the HAL and don't add the formats for V5.0.
* For now, the workaround to remove AC3 and IEC61937 support on HDMI is to declare
@@ -53,20 +47,15 @@
}
}
-audio_port_handle_t DeviceDescriptor::getId() const
-{
- return mId;
-}
-
void DeviceDescriptor::attach(const sp<HwModule>& module)
{
- AudioPort::attach(module);
+ PolicyAudioPort::attach(module);
mId = getNextUniqueId();
}
void DeviceDescriptor::detach() {
mId = AUDIO_PORT_HANDLE_NONE;
- AudioPort::detach();
+ PolicyAudioPort::detach();
}
template<typename T>
@@ -117,13 +106,69 @@
return false;
}
+status_t DeviceDescriptor::applyAudioPortConfig(const struct audio_port_config *config,
+ audio_port_config *backupConfig)
+{
+ struct audio_port_config localBackupConfig = { .config_mask = config->config_mask };
+ status_t status = NO_ERROR;
+
+ toAudioPortConfig(&localBackupConfig);
+ if ((status = validationBeforeApplyConfig(config)) == NO_ERROR) {
+ AudioPortConfig::applyAudioPortConfig(config, backupConfig);
+ applyPolicyAudioPortConfig(config);
+ }
+
+ if (backupConfig != NULL) {
+ *backupConfig = localBackupConfig;
+ }
+ return status;
+}
+
+void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig) const
+{
+ DeviceDescriptorBase::toAudioPortConfig(dstConfig, srcConfig);
+ toPolicyAudioPortConfig(dstConfig, srcConfig);
+
+ dstConfig->ext.device.hw_module = getModuleHandle();
+}
+
+void DeviceDescriptor::toAudioPort(struct audio_port *port) const
+{
+ ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
+ DeviceDescriptorBase::toAudioPort(port);
+ port->ext.device.hw_module = getModuleHandle();
+}
+
+void DeviceDescriptor::importAudioPortAndPickAudioProfile(
+ const sp<PolicyAudioPort>& policyPort, bool force) {
+ if (!force && !policyPort->asAudioPort()->hasDynamicAudioProfile()) {
+ return;
+ }
+ AudioPort::importAudioPort(policyPort->asAudioPort());
+ policyPort->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
+}
+
+void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
+{
+ String8 extraInfo;
+ if (!mTagName.empty()) {
+ extraInfo.appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.c_str());
+ }
+
+ std::string descBaseDumpStr;
+ DeviceDescriptorBase::dump(&descBaseDumpStr, spaces, index, extraInfo.string(), verbose);
+ dst->append(descBaseDumpStr.c_str());
+}
+
+
void DeviceVector::refreshTypes()
{
- mDeviceTypes = AUDIO_DEVICE_NONE;
+ mDeviceTypes.clear();
for (size_t i = 0; i < size(); i++) {
- mDeviceTypes |= itemAt(i)->type();
+ mDeviceTypes.insert(itemAt(i)->type());
}
- ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
+ ALOGV("DeviceVector::refreshTypes() mDeviceTypes %s", dumpDeviceTypes(mDeviceTypes).c_str());
}
ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
@@ -198,17 +243,6 @@
return devices;
}
-audio_devices_t DeviceVector::getDeviceTypesFromHwModule(audio_module_handle_t moduleHandle) const
-{
- audio_devices_t deviceTypes = AUDIO_DEVICE_NONE;
- for (const auto& device : *this) {
- if (device->getModuleHandle() == moduleHandle) {
- deviceTypes |= device->type();
- }
- }
- return deviceTypes;
-}
-
sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, const String8& address,
audio_format_t format) const
{
@@ -218,11 +252,11 @@
// If format is specified, match it and ignore address
// Otherwise if address is specified match it
// Otherwise always match
- if (((address == "" || itemAt(i)->address() == address) &&
+ if (((address == "" || (itemAt(i)->address().compare(address.c_str()) == 0)) &&
format == AUDIO_FORMAT_DEFAULT) ||
(itemAt(i)->supportsFormat(format) && format != AUDIO_FORMAT_DEFAULT)) {
device = itemAt(i);
- if (itemAt(i)->address() == address) {
+ if (itemAt(i)->address().compare(address.c_str()) == 0) {
break;
}
}
@@ -245,15 +279,14 @@
return nullptr;
}
-DeviceVector DeviceVector::getDevicesFromTypeMask(audio_devices_t type) const
+DeviceVector DeviceVector::getDevicesFromTypes(const DeviceTypeSet& types) const
{
DeviceVector devices;
- bool isOutput = audio_is_output_devices(type);
- type &= ~AUDIO_DEVICE_BIT_IN;
- for (size_t i = 0; (i < size()) && (type != AUDIO_DEVICE_NONE); i++) {
- bool curIsOutput = audio_is_output_devices(itemAt(i)->type());
- audio_devices_t curType = itemAt(i)->type() & ~AUDIO_DEVICE_BIT_IN;
- if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
+ if (types.empty()) {
+ return devices;
+ }
+ for (size_t i = 0; i < size(); i++) {
+ if (types.count(itemAt(i)->type()) != 0) {
devices.add(itemAt(i));
ALOGV("DeviceVector::%s() for type %08x found %p",
__func__, itemAt(i)->type(), itemAt(i).get());
@@ -262,7 +295,7 @@
return devices;
}
-sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const String8 &tagName) const
+sp<DeviceDescriptor> DeviceVector::getDeviceFromTagName(const std::string &tagName) const
{
for (const auto& device : *this) {
if (device->getTagName() == tagName) {
@@ -277,7 +310,7 @@
{
DeviceVector devices;
for (auto deviceType : orderedTypes) {
- if (!(devices = getDevicesFromTypeMask(deviceType)).isEmpty()) {
+ if (!(devices = getDevicesFromType(deviceType)).isEmpty()) {
break;
}
}
@@ -295,9 +328,27 @@
return device;
}
+sp<DeviceDescriptor> DeviceVector::getDeviceForOpening() const
+{
+ if (isEmpty()) {
+ // Return nullptr if this collection is empty.
+ return nullptr;
+ } else if (areAllOfSameDeviceType(types(), audio_is_input_device)) {
+ // For input case, return the first one when there is only one device.
+ return size() > 1 ? nullptr : *begin();
+ } else if (areAllOfSameDeviceType(types(), audio_is_output_device)) {
+ // For output case, return the device descriptor according to apm strategy.
+ audio_devices_t deviceType = apm_extract_one_audio_device(types());
+ return deviceType == AUDIO_DEVICE_NONE ? nullptr :
+ getDevice(deviceType, String8(""), AUDIO_FORMAT_DEFAULT);
+ }
+ // Return null pointer if the devices are not all input/output device.
+ return nullptr;
+}
+
void DeviceVector::replaceDevicesByType(
audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
- DeviceVector devicesToRemove = getDevicesFromTypeMask(typeToRemove);
+ DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
if (!devicesToRemove.isEmpty() && !devicesToAdd.isEmpty()) {
remove(devicesToRemove);
add(devicesToAdd);
@@ -315,84 +366,6 @@
}
}
-void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
- const struct audio_port_config *srcConfig) const
-{
- dstConfig->config_mask = AUDIO_PORT_CONFIG_GAIN;
- if (mSamplingRate != 0) {
- dstConfig->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
- }
- if (mChannelMask != AUDIO_CHANNEL_NONE) {
- dstConfig->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
- }
- if (mFormat != AUDIO_FORMAT_INVALID) {
- dstConfig->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
- }
-
- if (srcConfig != NULL) {
- dstConfig->config_mask |= srcConfig->config_mask;
- }
-
- AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
- dstConfig->id = mId;
- dstConfig->role = audio_is_output_device(mDeviceType) ?
- AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
- dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
- dstConfig->ext.device.type = mDeviceType;
-
- //TODO Understand why this test is necessary. i.e. why at boot time does it crash
- // without the test?
- // This has been demonstrated to NOT be true (at start up)
- // ALOG_ASSERT(mModule != NULL);
- dstConfig->ext.device.hw_module = getModuleHandle();
- (void)audio_utils_strlcpy_zerofill(dstConfig->ext.device.address, mAddress.string());
-}
-
-void DeviceDescriptor::toAudioPort(struct audio_port *port) const
-{
- ALOGV("DeviceDescriptor::toAudioPort() handle %d type %08x", mId, mDeviceType);
- AudioPort::toAudioPort(port);
- port->id = mId;
- toAudioPortConfig(&port->active_config);
- port->ext.device.type = mDeviceType;
- port->ext.device.hw_module = getModuleHandle();
- (void)audio_utils_strlcpy_zerofill(port->ext.device.address, mAddress.string());
-}
-
-void DeviceDescriptor::importAudioPort(const sp<AudioPort>& port, bool force) {
- if (!force && !port->hasDynamicAudioProfile()) {
- return;
- }
- AudioPort::importAudioPort(port);
- port->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
-}
-
-void DeviceDescriptor::dump(String8 *dst, int spaces, int index, bool verbose) const
-{
- dst->appendFormat("%*sDevice %d:\n", spaces, "", index + 1);
- if (mId != 0) {
- dst->appendFormat("%*s- id: %2d\n", spaces, "", mId);
- }
- if (!mTagName.isEmpty()) {
- dst->appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.string());
- }
-
- dst->appendFormat("%*s- type: %-48s\n", spaces, "", ::android::toString(mDeviceType).c_str());
-
- if (mAddress.size() != 0) {
- dst->appendFormat("%*s- address: %-32s\n", spaces, "", mAddress.string());
- }
- AudioPort::dump(dst, spaces, verbose);
-}
-
-std::string DeviceDescriptor::toString() const
-{
- std::stringstream sstream;
- sstream << "type:0x" << std::hex << type() << ",@:" << mAddress;
- return sstream.str();
-}
-
std::string DeviceVector::toString() const
{
if (isEmpty()) {
@@ -441,13 +414,4 @@
return filteredDevices;
}
-void DeviceDescriptor::log() const
-{
- ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType,
- ::android::toString(mDeviceType).c_str(),
- mAddress.string());
-
- AudioPort::log(" ");
-}
-
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index 99e282e..0b4d3d4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -41,7 +41,7 @@
}
}
-status_t HwModule::addOutputProfile(const String8& name, const audio_config_t *config,
+status_t HwModule::addOutputProfile(const std::string& name, const audio_config_t *config,
audio_devices_t device, const String8& address)
{
sp<IOProfile> profile = new OutputProfile(name);
@@ -50,7 +50,7 @@
config->sample_rate));
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
- devDesc->setAddress(address);
+ devDesc->setAddress(address.string());
addDynamicDevice(devDesc);
// Reciprocally attach the device to the module
devDesc->attach(this);
@@ -95,7 +95,7 @@
}
}
-status_t HwModule::removeOutputProfile(const String8& name)
+status_t HwModule::removeOutputProfile(const std::string& name)
{
for (size_t i = 0; i < mOutputProfiles.size(); i++) {
if (mOutputProfiles[i]->getName() == name) {
@@ -110,7 +110,7 @@
return NO_ERROR;
}
-status_t HwModule::addInputProfile(const String8& name, const audio_config_t *config,
+status_t HwModule::addInputProfile(const std::string& name, const audio_config_t *config,
audio_devices_t device, const String8& address)
{
sp<IOProfile> profile = new InputProfile(name);
@@ -118,19 +118,19 @@
config->sample_rate));
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
- devDesc->setAddress(address);
+ devDesc->setAddress(address.string());
addDynamicDevice(devDesc);
// Reciprocally attach the device to the module
devDesc->attach(this);
profile->addSupportedDevice(devDesc);
ALOGV("addInputProfile() name %s rate %d mask 0x%08x",
- name.string(), config->sample_rate, config->channel_mask);
+ name.c_str(), config->sample_rate, config->channel_mask);
return addInputProfile(profile);
}
-status_t HwModule::removeInputProfile(const String8& name)
+status_t HwModule::removeInputProfile(const std::string& name)
{
for (size_t i = 0; i < mInputProfiles.size(); i++) {
if (mInputProfiles[i]->getName() == name) {
@@ -156,7 +156,7 @@
sp<DeviceDescriptor> HwModule::getRouteSinkDevice(const sp<AudioRoute> &route) const
{
sp<DeviceDescriptor> sinkDevice = 0;
- if (route->getSink()->getType() == AUDIO_PORT_TYPE_DEVICE) {
+ if (route->getSink()->asAudioPort()->getType() == AUDIO_PORT_TYPE_DEVICE) {
sinkDevice = mDeclaredDevices.getDeviceFromTagName(route->getSink()->getTagName());
}
return sinkDevice;
@@ -166,7 +166,7 @@
{
DeviceVector sourceDevices;
for (const auto& source : route->getSources()) {
- if (source->getType() == AUDIO_PORT_TYPE_DEVICE) {
+ if (source->asAudioPort()->getType() == AUDIO_PORT_TYPE_DEVICE) {
sourceDevices.add(mDeclaredDevices.getDeviceFromTagName(source->getTagName()));
}
}
@@ -186,20 +186,20 @@
for (const auto& stream : mInputProfiles) {
DeviceVector sourceDevices;
for (const auto& route : stream->getRoutes()) {
- sp<AudioPort> sink = route->getSink();
+ sp<PolicyAudioPort> sink = route->getSink();
if (sink == 0 || stream != sink) {
ALOGE("%s: Invalid route attached to input stream", __FUNCTION__);
continue;
}
DeviceVector sourceDevicesForRoute = getRouteSourceDevices(route);
if (sourceDevicesForRoute.isEmpty()) {
- ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+ ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().c_str());
continue;
}
sourceDevices.add(sourceDevicesForRoute);
}
if (sourceDevices.isEmpty()) {
- ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().string());
+ ALOGE("%s: invalid source devices for %s", __FUNCTION__, stream->getName().c_str());
continue;
}
stream->setSupportedDevices(sourceDevices);
@@ -207,14 +207,14 @@
for (const auto& stream : mOutputProfiles) {
DeviceVector sinkDevices;
for (const auto& route : stream->getRoutes()) {
- sp<AudioPort> source = route->getSources().findByTagName(stream->getTagName());
+ sp<PolicyAudioPort> source = findByTagName(route->getSources(), stream->getTagName());
if (source == 0 || stream != source) {
ALOGE("%s: Invalid route attached to output stream", __FUNCTION__);
continue;
}
sp<DeviceDescriptor> sinkDevice = getRouteSinkDevice(route);
if (sinkDevice == 0) {
- ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().string());
+ ALOGE("%s: invalid sink device for %s", __FUNCTION__, stream->getName().c_str());
continue;
}
sinkDevices.add(sinkDevice);
@@ -229,7 +229,8 @@
mHandle = handle;
}
-bool HwModule::supportsPatch(const sp<AudioPort> &srcPort, const sp<AudioPort> &dstPort) const {
+bool HwModule::supportsPatch(const sp<PolicyAudioPort> &srcPort,
+ const sp<PolicyAudioPort> &dstPort) const {
for (const auto &route : mRoutes) {
if (route->supportsPatch(srcPort, dstPort)) {
return true;
@@ -259,7 +260,7 @@
}
mDeclaredDevices.dump(dst, String8("Declared"), 2, true);
mDynamicDevices.dump(dst, String8("Dynamic"), 2, true);
- mRoutes.dump(dst, 2);
+ dumpAudioRouteVector(mRoutes, dst, 2);
}
sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
@@ -272,14 +273,14 @@
return nullptr;
}
-sp <HwModule> HwModuleCollection::getModuleForDeviceTypes(audio_devices_t type,
- audio_format_t encodedFormat) const
+sp <HwModule> HwModuleCollection::getModuleForDeviceType(audio_devices_t type,
+ audio_format_t encodedFormat) const
{
for (const auto& module : *this) {
const auto& profiles = audio_is_output_device(type) ?
module->getOutputProfiles() : module->getInputProfiles();
for (const auto& profile : profiles) {
- if (profile->supportsDeviceTypes(type)) {
+ if (profile->supportsDeviceTypes({type})) {
if (encodedFormat != AUDIO_FORMAT_DEFAULT) {
DeviceVector declaredDevices = module->getDeclaredDevices();
sp <DeviceDescriptor> deviceDesc =
@@ -299,7 +300,7 @@
sp<HwModule> HwModuleCollection::getModuleForDevice(const sp<DeviceDescriptor> &device,
audio_format_t encodedFormat) const
{
- return getModuleForDeviceTypes(device->type(), encodedFormat);
+ return getModuleForDeviceType(device->type(), encodedFormat);
}
DeviceVector HwModuleCollection::getAvailableDevicesFromModuleName(
@@ -334,8 +335,8 @@
}
if (allowToCreate) {
moduleDevice->attach(hwModule);
- moduleDevice->setAddress(devAddress);
- moduleDevice->setName(String8(name));
+ moduleDevice->setAddress(devAddress.string());
+ moduleDevice->setName(name);
}
return moduleDevice;
}
@@ -353,15 +354,15 @@
const char *name,
const audio_format_t encodedFormat) const
{
- sp<HwModule> hwModule = getModuleForDeviceTypes(type, encodedFormat);
+ sp<HwModule> hwModule = getModuleForDeviceType(type, encodedFormat);
if (hwModule == 0) {
ALOGE("%s: could not find HW module for device %04x address %s", __FUNCTION__, type,
address);
return nullptr;
}
- sp<DeviceDescriptor> device = new DeviceDescriptor(type, String8(name));
- device->setName(String8(name));
- device->setAddress(String8(address));
+ sp<DeviceDescriptor> device = new DeviceDescriptor(type, name);
+ device->setName(name);
+ device->setAddress(address);
device->setEncodedFormat(encodedFormat);
// Add the device to the list of dynamic devices
@@ -381,7 +382,7 @@
// @todo quid of audio profile? import the profile from device of the same type?
const auto &isoTypeDeviceForProfile =
profile->getSupportedDevices().getDevice(type, String8(), AUDIO_FORMAT_DEFAULT);
- device->importAudioPort(isoTypeDeviceForProfile, true /* force */);
+ device->importAudioPortAndPickAudioProfile(isoTypeDeviceForProfile, true /* force */);
ALOGV("%s: adding device %s to profile %s", __FUNCTION__,
device->toString().c_str(), profile->getTagName().c_str());
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 5662dcf..bf1a0f7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -78,7 +78,10 @@
}
}
- if (isPlaybackThread && (getFlags() & flags) != flags) {
+ const uint32_t mustMatchOutputFlags =
+ AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
+ if (isPlaybackThread && (((getFlags() ^ flags) & mustMatchOutputFlags)
+ || (getFlags() & flags) != flags)) {
return false;
}
// The only input flag that is allowed to be different is the fast flag.
@@ -104,7 +107,9 @@
void IOProfile::dump(String8 *dst) const
{
- AudioPort::dump(dst, 4);
+ std::string portStr;
+ AudioPort::dump(&portStr, 4);
+ dst->append(portStr.c_str());
dst->appendFormat(" - flags: 0x%04x", getFlags());
std::string flagsLiteral;
diff --git a/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
new file mode 100644
index 0000000..8c61b90
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/PolicyAudioPort.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::PolicyAudioPort"
+//#define LOG_NDEBUG 0
+#include "TypeConverter.h"
+#include "PolicyAudioPort.h"
+#include "HwModule.h"
+#include <policy.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+namespace android {
+
+// --- PolicyAudioPort class implementation
+void PolicyAudioPort::attach(const sp<HwModule>& module)
+{
+ ALOGV("%s: attaching module %s to port %s",
+ __FUNCTION__, getModuleName(), asAudioPort()->getName().c_str());
+ mModule = module;
+}
+
+void PolicyAudioPort::detach()
+{
+ mModule = nullptr;
+}
+
+// Note that is a different namespace than AudioFlinger unique IDs
+audio_port_handle_t PolicyAudioPort::getNextUniqueId()
+{
+ return getNextHandle();
+}
+
+audio_module_handle_t PolicyAudioPort::getModuleHandle() const
+{
+ return mModule != 0 ? mModule->getHandle() : AUDIO_MODULE_HANDLE_NONE;
+}
+
+uint32_t PolicyAudioPort::getModuleVersionMajor() const
+{
+ return mModule != 0 ? mModule->getHalVersionMajor() : 0;
+}
+
+const char *PolicyAudioPort::getModuleName() const
+{
+ return mModule != 0 ? mModule->getName() : "invalid module";
+}
+
+status_t PolicyAudioPort::checkExactAudioProfile(const struct audio_port_config *config) const
+{
+ status_t status = NO_ERROR;
+ auto config_mask = config->config_mask;
+ if (config_mask & AUDIO_PORT_CONFIG_GAIN) {
+ config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+ status = asAudioPort()->checkGain(&config->gain, config->gain.index);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ }
+ if (config_mask != 0) {
+ // TODO should we check sample_rate / channel_mask / format separately?
+ status = checkExactProfile(asAudioPort()->getAudioProfiles(), config->sample_rate,
+ config->channel_mask, config->format);
+ }
+ return status;
+}
+
+void PolicyAudioPort::pickSamplingRate(uint32_t &pickedRate,
+ const SampleRateSet &samplingRates) const
+{
+ pickedRate = 0;
+ // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if (isDirectOutput()) {
+ uint32_t samplingRate = UINT_MAX;
+ for (const auto rate : samplingRates) {
+ if ((rate < samplingRate) && (rate > 0)) {
+ samplingRate = rate;
+ }
+ }
+ pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
+ } else {
+ uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
+
+ // For mixed output and inputs, use max mixer sampling rates. Do not
+ // limit sampling rate otherwise
+ // For inputs, also see checkCompatibleSamplingRate().
+ if (asAudioPort()->getType() == AUDIO_PORT_TYPE_MIX) {
+ maxRate = UINT_MAX;
+ }
+ // TODO: should mSamplingRates[] be ordered in terms of our preference
+ // and we return the first (and hence most preferred) match? This is of concern if
+ // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
+ for (const auto rate : samplingRates) {
+ if ((rate > pickedRate) && (rate <= maxRate)) {
+ pickedRate = rate;
+ }
+ }
+ }
+}
+
+void PolicyAudioPort::pickChannelMask(audio_channel_mask_t &pickedChannelMask,
+ const ChannelMaskSet &channelMasks) const
+{
+ pickedChannelMask = AUDIO_CHANNEL_NONE;
+ // For direct outputs, pick minimum channel count: this helps ensuring that the
+ // channel count / sampling rate combination chosen will be supported by the connected
+ // sink
+ if (isDirectOutput()) {
+ uint32_t channelCount = UINT_MAX;
+ for (const auto channelMask : channelMasks) {
+ uint32_t cnlCount;
+ if (asAudioPort()->useInputChannelMask()) {
+ cnlCount = audio_channel_count_from_in_mask(channelMask);
+ } else {
+ cnlCount = audio_channel_count_from_out_mask(channelMask);
+ }
+ if ((cnlCount < channelCount) && (cnlCount > 0)) {
+ pickedChannelMask = channelMask;
+ channelCount = cnlCount;
+ }
+ }
+ } else {
+ uint32_t channelCount = 0;
+ uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
+
+ // For mixed output and inputs, use max mixer channel count. Do not
+ // limit channel count otherwise
+ if (asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) {
+ maxCount = UINT_MAX;
+ }
+ for (const auto channelMask : channelMasks) {
+ uint32_t cnlCount;
+ if (asAudioPort()->useInputChannelMask()) {
+ cnlCount = audio_channel_count_from_in_mask(channelMask);
+ } else {
+ cnlCount = audio_channel_count_from_out_mask(channelMask);
+ }
+ if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
+ pickedChannelMask = channelMask;
+ channelCount = cnlCount;
+ }
+ }
+ }
+}
+
+/* format in order of increasing preference */
+const audio_format_t PolicyAudioPort::sPcmFormatCompareTable[] = {
+ AUDIO_FORMAT_DEFAULT,
+ AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_FORMAT_PCM_8_24_BIT,
+ AUDIO_FORMAT_PCM_24_BIT_PACKED,
+ AUDIO_FORMAT_PCM_32_BIT,
+ AUDIO_FORMAT_PCM_FLOAT,
+};
+
+int PolicyAudioPort::compareFormats(audio_format_t format1, audio_format_t format2)
+{
+ // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
+ // compressed format and better than any PCM format. This is by design of pickFormat()
+ if (!audio_is_linear_pcm(format1)) {
+ if (!audio_is_linear_pcm(format2)) {
+ return 0;
+ }
+ return 1;
+ }
+ if (!audio_is_linear_pcm(format2)) {
+ return -1;
+ }
+
+ int index1 = -1, index2 = -1;
+ for (size_t i = 0;
+ (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
+ i ++) {
+ if (sPcmFormatCompareTable[i] == format1) {
+ index1 = i;
+ }
+ if (sPcmFormatCompareTable[i] == format2) {
+ index2 = i;
+ }
+ }
+ // format1 not found => index1 < 0 => format2 > format1
+ // format2 not found => index2 < 0 => format2 < format1
+ return index1 - index2;
+}
+
+uint32_t PolicyAudioPort::formatDistance(audio_format_t format1, audio_format_t format2)
+{
+ if (format1 == format2) {
+ return 0;
+ }
+ if (format1 == AUDIO_FORMAT_INVALID || format2 == AUDIO_FORMAT_INVALID) {
+ return kFormatDistanceMax;
+ }
+ int diffBytes = (int)audio_bytes_per_sample(format1) -
+ audio_bytes_per_sample(format2);
+
+ return abs(diffBytes);
+}
+
+bool PolicyAudioPort::isBetterFormatMatch(audio_format_t newFormat,
+ audio_format_t currentFormat,
+ audio_format_t targetFormat)
+{
+ return formatDistance(newFormat, targetFormat) < formatDistance(currentFormat, targetFormat);
+}
+
+void PolicyAudioPort::pickAudioProfile(uint32_t &samplingRate,
+ audio_channel_mask_t &channelMask,
+ audio_format_t &format) const
+{
+ format = AUDIO_FORMAT_DEFAULT;
+ samplingRate = 0;
+ channelMask = AUDIO_CHANNEL_NONE;
+
+ // special case for uninitialized dynamic profile
+ if (!asAudioPort()->hasValidAudioProfile()) {
+ return;
+ }
+ audio_format_t bestFormat = sPcmFormatCompareTable[ARRAY_SIZE(sPcmFormatCompareTable) - 1];
+ // For mixed output and inputs, use best mixer output format.
+ // Do not limit format otherwise
+ if ((asAudioPort()->getType() != AUDIO_PORT_TYPE_MIX) || isDirectOutput()) {
+ bestFormat = AUDIO_FORMAT_INVALID;
+ }
+
+ const AudioProfileVector& audioProfiles = asAudioPort()->getAudioProfiles();
+ for (size_t i = 0; i < audioProfiles.size(); i ++) {
+ if (!audioProfiles[i]->isValid()) {
+ continue;
+ }
+ audio_format_t formatToCompare = audioProfiles[i]->getFormat();
+ if ((compareFormats(formatToCompare, format) > 0) &&
+ (compareFormats(formatToCompare, bestFormat) <= 0)) {
+ uint32_t pickedSamplingRate = 0;
+ audio_channel_mask_t pickedChannelMask = AUDIO_CHANNEL_NONE;
+ pickChannelMask(pickedChannelMask, audioProfiles[i]->getChannels());
+ pickSamplingRate(pickedSamplingRate, audioProfiles[i]->getSampleRates());
+
+ if (formatToCompare != AUDIO_FORMAT_DEFAULT && pickedChannelMask != AUDIO_CHANNEL_NONE
+ && pickedSamplingRate != 0) {
+ format = formatToCompare;
+ channelMask = pickedChannelMask;
+ samplingRate = pickedSamplingRate;
+ // TODO: shall we return on the first one or still trying to pick a better Profile?
+ }
+ }
+ }
+ ALOGV("%s Port[nm:%s] profile rate=%d, format=%d, channels=%d", __FUNCTION__,
+ asAudioPort()->getName().c_str(), samplingRate, channelMask, format);
+}
+
+// --- PolicyAudioPortConfig class implementation
+
+status_t PolicyAudioPortConfig::validationBeforeApplyConfig(
+ const struct audio_port_config *config) const
+{
+ sp<PolicyAudioPort> policyAudioPort = getPolicyAudioPort();
+ return policyAudioPort ? policyAudioPort->checkExactAudioProfile(config) : NO_INIT;
+}
+
+void PolicyAudioPortConfig::toPolicyAudioPortConfig(struct audio_port_config *dstConfig,
+ const struct audio_port_config *srcConfig) const
+{
+ if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ if ((srcConfig != nullptr) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
+ dstConfig->flags = srcConfig->flags;
+ } else {
+ dstConfig->flags = mFlags;
+ }
+ } else {
+ dstConfig->flags = { AUDIO_INPUT_FLAG_NONE };
+ }
+}
+
+
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index c699aa7..3b27cf6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -430,7 +430,7 @@
audio_port_role_t portRole = (role == Attributes::roleSource) ?
AUDIO_PORT_ROLE_SOURCE : AUDIO_PORT_ROLE_SINK;
- Element mixPort = new IOProfile(String8(name.c_str()), portRole);
+ Element mixPort = new IOProfile(name, portRole);
AudioProfileTraits::Collection profiles;
status_t status = deserializeCollection<AudioProfileTraits>(child, &profiles, NULL);
@@ -438,8 +438,11 @@
return Status::fromStatusT(status);
}
if (profiles.empty()) {
- profiles.add(AudioProfile::createFullDynamic());
+ profiles.add(AudioProfile::createFullDynamic(gDynamicFormat));
}
+ // The audio profiles are in order of listed in audio policy configuration file.
+ // Sort audio profiles accroding to the format.
+ sortAudioProfiles(profiles);
mixPort->setAudioProfiles(profiles);
std::string flags = getXmlAttribute(child, Attributes::flags);
@@ -508,12 +511,12 @@
if (!encodedFormatsLiteral.empty()) {
encodedFormats = formatsFromString(encodedFormatsLiteral, " ");
}
- Element deviceDesc = new DeviceDescriptor(type, encodedFormats, String8(name.c_str()));
+ Element deviceDesc = new DeviceDescriptor(type, encodedFormats, name);
std::string address = getXmlAttribute(cur, Attributes::address);
if (!address.empty()) {
ALOGV("%s: address=%s for %s", __func__, address.c_str(), name.c_str());
- deviceDesc->setAddress(String8(address.c_str()));
+ deviceDesc->setAddress(address);
}
AudioProfileTraits::Collection profiles;
@@ -522,8 +525,11 @@
return Status::fromStatusT(status);
}
if (profiles.empty()) {
- profiles.add(AudioProfile::createFullDynamic());
+ profiles.add(AudioProfile::createFullDynamic(gDynamicFormat));
}
+ // The audio profiles are in order of listed in audio policy configuration file.
+ // Sort audio profiles accroding to the format.
+ sortAudioProfiles(profiles);
deviceDesc->setAudioProfiles(profiles);
// Deserialize AudioGain children
@@ -532,7 +538,7 @@
return Status::fromStatusT(status);
}
ALOGV("%s: adding device tag %s type %08x address %s", __func__,
- deviceDesc->getName().string(), type, deviceDesc->address().string());
+ deviceDesc->getName().c_str(), type, deviceDesc->address().c_str());
return deviceDesc;
}
@@ -555,7 +561,7 @@
return Status::fromStatusT(BAD_VALUE);
}
// Convert Sink name to port pointer
- sp<AudioPort> sink = ctx->findPortByTagName(String8(sinkAttr.c_str()));
+ sp<PolicyAudioPort> sink = ctx->findPortByTagName(sinkAttr);
if (sink == NULL) {
ALOGE("%s: no sink found with name=%s", __func__, sinkAttr.c_str());
return Status::fromStatusT(BAD_VALUE);
@@ -568,13 +574,13 @@
return Status::fromStatusT(BAD_VALUE);
}
// Tokenize and Convert Sources name to port pointer
- AudioPortVector sources;
+ PolicyAudioPortVector sources;
std::unique_ptr<char[]> sourcesLiteral{strndup(
sourcesAttr.c_str(), strlen(sourcesAttr.c_str()))};
char *devTag = strtok(sourcesLiteral.get(), ",");
while (devTag != NULL) {
if (strlen(devTag) != 0) {
- sp<AudioPort> source = ctx->findPortByTagName(String8(devTag));
+ sp<PolicyAudioPort> source = ctx->findPortByTagName(devTag);
if (source == NULL) {
ALOGE("%s: no source found with name=%s", __func__, devTag);
return Status::fromStatusT(BAD_VALUE);
@@ -586,7 +592,7 @@
sink->addRoute(route);
for (size_t i = 0; i < sources.size(); i++) {
- sp<AudioPort> source = sources.itemAt(i);
+ sp<PolicyAudioPort> source = sources.itemAt(i);
source->addRoute(route);
}
route->setSources(sources);
@@ -648,7 +654,7 @@
ALOGV("%s: %s %s=%s", __func__, tag, childAttachedDeviceTag,
reinterpret_cast<const char*>(attachedDevice.get()));
sp<DeviceDescriptor> device = module->getDeclaredDevices().
- getDeviceFromTagName(String8(reinterpret_cast<const char*>(
+ getDeviceFromTagName(std::string(reinterpret_cast<const char*>(
attachedDevice.get())));
ctx->addAvailableDevice(device);
}
@@ -663,7 +669,7 @@
ALOGV("%s: %s %s=%s", __func__, tag, childDefaultOutputDeviceTag,
reinterpret_cast<const char*>(defaultOutputDevice.get()));
sp<DeviceDescriptor> device = module->getDeclaredDevices().getDeviceFromTagName(
- String8(reinterpret_cast<const char*>(defaultOutputDevice.get())));
+ std::string(reinterpret_cast<const char*>(defaultOutputDevice.get())));
if (device != 0 && ctx->getDefaultOutputDevice() == 0) {
ctx->setDefaultOutputDevice(device);
ALOGV("%s: default is %08x",
diff --git a/services/audiopolicy/config/Android.bp b/services/audiopolicy/config/Android.bp
new file mode 100644
index 0000000..4b5e788
--- /dev/null
+++ b/services/audiopolicy/config/Android.bp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+soong_namespace {
+}
+
+prebuilt_etc {
+ name: "a2dp_in_audio_policy_configuration.xml",
+ vendor: true,
+ src: ":a2dp_in_audio_policy_configuration",
+}
+prebuilt_etc {
+ name: "a2dp_audio_policy_configuration.xml",
+ vendor: true,
+ src: ":a2dp_audio_policy_configuration",
+}
+prebuilt_etc {
+ name: "audio_policy_configuration.xml",
+ vendor: true,
+ src: ":audio_policy_configuration_generic",
+}
+prebuilt_etc {
+ name: "r_submix_audio_policy_configuration.xml",
+ vendor: true,
+ src: ":r_submix_audio_policy_configuration",
+}
+prebuilt_etc {
+ name: "audio_policy_volumes.xml",
+ vendor: true,
+ src: ":audio_policy_volumes",
+}
+prebuilt_etc {
+ name: "default_volume_tables.xml",
+ vendor: true,
+ src: ":default_volume_tables",
+}
+prebuilt_etc {
+ name: "surround_sound_configuration_5_0.xml",
+ vendor: true,
+ src: ":surround_sound_configuration_5_0",
+}
+prebuilt_etc {
+ name: "usb_audio_policy_configuration.xml",
+ vendor: true,
+ src: ":usb_audio_policy_configuration",
+}
+prebuilt_etc {
+ name: "primary_audio_policy_configuration.xml",
+ src: ":primary_audio_policy_configuration",
+ vendor: true,
+}
+
+filegroup {
+ name: "a2dp_in_audio_policy_configuration",
+ srcs: ["a2dp_in_audio_policy_configuration.xml"],
+}
+filegroup {
+ name: "a2dp_audio_policy_configuration",
+ srcs: ["a2dp_audio_policy_configuration.xml"],
+}
+filegroup {
+ name: "primary_audio_policy_configuration",
+ srcs: ["primary_audio_policy_configuration.xml"],
+}
+filegroup {
+ name: "surround_sound_configuration_5_0",
+ srcs: ["surround_sound_configuration_5_0.xml"],
+}
+filegroup {
+ name: "default_volume_tables",
+ srcs: ["default_volume_tables.xml"],
+}
+filegroup {
+ name: "audio_policy_volumes",
+ srcs: ["audio_policy_volumes.xml"],
+}
+filegroup {
+ name: "audio_policy_configuration_generic",
+ srcs: ["audio_policy_configuration_generic.xml"],
+}
+filegroup {
+ name: "usb_audio_policy_configuration",
+ srcs: ["usb_audio_policy_configuration.xml"],
+}
+filegroup {
+ name: "r_submix_audio_policy_configuration",
+ srcs: ["r_submix_audio_policy_configuration.xml"],
+}
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
index d0775ad..b87c71d 100644
--- a/services/audiopolicy/engine/common/Android.bp
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -44,4 +44,7 @@
"libaudiopolicycomponents",
"libaudiopolicyengine_config",
],
+ shared_libs: [
+ "libaudiofoundation",
+ ],
}
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index c538f52..ab8eff3 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -27,6 +27,7 @@
#include <utils/Errors.h>
#include <utils/String8.h>
#include <media/AudioAttributes.h>
+#include <media/AudioContainers.h>
namespace android {
@@ -77,12 +78,12 @@
std::string getDeviceAddress() const { return mDeviceAddress; }
- void setDeviceTypes(audio_devices_t devices)
+ void setDeviceTypes(const DeviceTypeSet& devices)
{
mApplicableDevices = devices;
}
- audio_devices_t getDeviceTypes() const { return mApplicableDevices; }
+ DeviceTypeSet getDeviceTypes() const { return mApplicableDevices; }
audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
@@ -109,7 +110,7 @@
/**
* Applicable device(s) type mask for this strategy.
*/
- audio_devices_t mApplicableDevices = AUDIO_DEVICE_NONE;
+ DeviceTypeSet mApplicableDevices;
};
class ProductStrategyMap : public std::map<product_strategy_t, sp<ProductStrategy> >
@@ -144,7 +145,7 @@
*/
audio_attributes_t getAttributesForProductStrategy(product_strategy_t strategy) const;
- audio_devices_t getDeviceTypesForProductStrategy(product_strategy_t strategy) const;
+ DeviceTypeSet getDeviceTypesForProductStrategy(product_strategy_t strategy) const;
std::string getDeviceAddressForProductStrategy(product_strategy_t strategy) const;
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
index d3d0904..2e75ff1 100644
--- a/services/audiopolicy/engine/common/include/VolumeCurve.h
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -91,9 +91,9 @@
return valueFor(device);
}
- virtual int getVolumeIndex(audio_devices_t device) const
+ virtual int getVolumeIndex(const DeviceTypeSet& deviceTypes) const
{
- device = Volume::getDeviceForVolume(device);
+ audio_devices_t device = Volume::getDeviceForVolume(deviceTypes);
// there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
if (mIndexCur.find(device) == end(mIndexCur)) {
device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
@@ -114,7 +114,7 @@
bool hasVolumeIndexForDevice(audio_devices_t device) const
{
- device = Volume::getDeviceForVolume(device);
+ device = Volume::getDeviceForVolume({device});
return mIndexCur.find(device) != end(mIndexCur);
}
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 07a7e65..840eb34 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -39,7 +39,7 @@
{
ALOGV("setPhoneState() state %d", state);
- if (state < 0 || state >= AUDIO_MODE_CNT) {
+ if (state < 0 || uint32_t(state) >= AUDIO_MODE_CNT) {
ALOGW("setPhoneState() invalid state %d", state);
return BAD_VALUE;
}
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index ac3e462..14c9dd1 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -143,8 +143,9 @@
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
std::string deviceLiteral;
- if (!OutputDeviceConverter::toString(mApplicableDevices, deviceLiteral)) {
- ALOGE("%s: failed to convert device %d", __FUNCTION__, mApplicableDevices);
+ if (!deviceTypesToString(mApplicableDevices, deviceLiteral)) {
+ ALOGE("%s: failed to convert device %s",
+ __FUNCTION__, dumpDeviceTypes(mApplicableDevices).c_str());
}
dst->appendFormat("%*sSelected Device: {type:%s, @:%s}\n", spaces + 2, "",
deviceLiteral.c_str(), mDeviceAddress.c_str());
@@ -236,14 +237,14 @@
}
-audio_devices_t ProductStrategyMap::getDeviceTypesForProductStrategy(
+DeviceTypeSet ProductStrategyMap::getDeviceTypesForProductStrategy(
product_strategy_t strategy) const
{
if (find(strategy) == end()) {
ALOGE("Invalid %d strategy requested, returning device for default strategy", strategy);
product_strategy_t defaultStrategy = getDefault();
if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
- return AUDIO_DEVICE_NONE;
+ return {AUDIO_DEVICE_NONE};
}
return at(getDefault())->getDeviceTypes();
}
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index ae3fc79..349f969 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,7 @@
#pragma once
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
#include <AudioPatch.h>
#include <IOProfile.h>
#include <DeviceDescriptor.h>
diff --git a/services/audiopolicy/engineconfigurable/config/Android.bp b/services/audiopolicy/engineconfigurable/config/Android.bp
new file mode 100644
index 0000000..fe3eae0
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/Android.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Root soong_namespace for common components
+
+prebuilt_etc {
+ name: "audio_policy_engine_criteria.xml",
+ vendor: true,
+ src: ":audio_policy_engine_criteria",
+}
+filegroup {
+ name: "audio_policy_engine_criterion_types_template",
+ srcs: ["example/common/audio_policy_engine_criterion_types.xml.in"],
+}
+filegroup {
+ name: "audio_policy_engine_criteria",
+ srcs: ["example/common/audio_policy_engine_criteria.xml"],
+}
diff --git a/services/audiopolicy/engineconfigurable/config/example/Android.mk b/services/audiopolicy/engineconfigurable/config/example/Android.mk
deleted file mode 100644
index a0f1a90..0000000
--- a/services/audiopolicy/engineconfigurable/config/example/Android.mk
+++ /dev/null
@@ -1,151 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-ifdef BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION
-
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-PROVISION_CRITERION_TYPES := $(TOOLS)/provision_criterion_types_from_android_headers.mk
-
-##################################################################
-# CONFIGURATION TOP FILE
-##################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_configuration.xml
-
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-
-LOCAL_REQUIRED_MODULES := \
- audio_policy_engine_product_strategies.xml \
- audio_policy_engine_stream_volumes.xml \
- audio_policy_engine_default_stream_volumes.xml \
- audio_policy_engine_criteria.xml \
- audio_policy_engine_criterion_types.xml
-
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_product_strategies.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
-
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),automotive_configurable caremu_configurable))
-
-##################################################################
-# AUTOMOTIVE CONFIGURATION TOP FILE
-##################################################################
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_configuration.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
-
-LOCAL_REQUIRED_MODULES := \
- audio_policy_engine_product_strategies.xml \
- audio_policy_engine_criteria.xml \
- audio_policy_engine_criterion_types.xml \
- audio_policy_engine_volumes.xml
-
-include $(BUILD_PREBUILT)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),automotive_configurable caremu_configurable))
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_product_strategies.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), caremu_configurable)
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_product_strategies.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := caremu/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := caremu/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), caremu_configurable)
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable caremu_configurable))
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_criteria.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := common/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_criterion_types.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_ADDITIONAL_DEPENDENCIES := $(TARGET_OUT_VENDOR_ETC)/primary_audio_policy_configuration.xml
-ANDROID_AUDIO_BASE_HEADER_FILE := system/media/audio/include/system/audio-base.h
-AUDIO_POLICY_CONFIGURATION_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_configuration.xml
-CRITERION_TYPES_FILE := $(LOCAL_PATH)/common/$(LOCAL_MODULE).in
-
-include $(PROVISION_CRITERION_TYPES)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable caremu_configurable))
-
-endif #ifdef BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
new file mode 100644
index 0000000..f913a14
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/Android.bp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Automotive configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+prebuilt_etc {
+ name: "audio_policy_engine_configuration.xml",
+ vendor: true,
+ src: ":audio_policy_engine_configuration",
+ required: [
+ ":audio_policy_engine_criterion_types.xml",
+ ":audio_policy_engine_criteria.xml",
+ ":audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_volumes.xml",
+ ],
+}
+prebuilt_etc {
+ name: "audio_policy_engine_product_strategies.xml",
+ vendor: true,
+ src: "audio_policy_engine_product_strategies.xml",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_volumes.xml",
+ vendor: true,
+ src: ":audio_policy_engine_volumes",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_criterion_types.xml",
+ vendor: true,
+ src: ":audio_policy_engine_criterion_types",
+}
+
+//
+// Generate audio_policy_engine criterion type file => provides device addresses criterion type
+//
+genrule {
+ name: "audio_policy_engine_criterion_types",
+ defaults: ["buildpolicycriteriontypesrule"],
+ srcs: [
+ ":audio_policy_configuration_top_file",
+ ":audio_policy_configuration_files",
+ ],
+}
+filegroup {
+ name: "audio_policy_configuration_files",
+ srcs: [
+ ":r_submix_audio_policy_configuration",
+ ":default_volume_tables",
+ ":audio_policy_volumes",
+ ":surround_sound_configuration_5_0",
+ ":primary_audio_policy_configuration",
+ ],
+}
+filegroup {
+ name : "audio_policy_configuration_top_file",
+ srcs: [":audio_policy_configuration_generic"],
+}
+filegroup {
+ name: "audio_policy_engine_configuration",
+ srcs: ["audio_policy_engine_configuration.xml"],
+}
+filegroup {
+ name: "audio_policy_engine_volumes",
+ srcs: ["audio_policy_engine_volumes.xml"],
+}
+filegroup {
+ name: "audio_policy_engine_configuration_files",
+ srcs: [
+ ":audio_policy_engine_configuration",
+ "audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_volumes",
+ ":audio_policy_engine_criterion_types",
+ ":audio_policy_engine_criteria",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
new file mode 100644
index 0000000..fae6b7b
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/caremu/Android.bp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Car Emulator configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/automotive",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+prebuilt_etc {
+ name: "audio_policy_engine_configuration.xml",
+ vendor: true,
+ src: ":audio_policy_engine_configuration",
+ required: [
+ "audio_policy_engine_criterion_types.xml",
+ "audio_policy_engine_criteria.xml",
+ "audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_volumes.xml",
+ ],
+}
+prebuilt_etc {
+ name: "audio_policy_engine_product_strategies.xml",
+ vendor: true,
+ src: "audio_policy_engine_product_strategies.xml",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_criterion_types.xml",
+ vendor: true,
+ src: ":audio_policy_engine_criterion_types",
+}
+
+//
+// Generate audio_policy_engine criterion type file => provides device addresses criterion type
+//
+genrule {
+ name: "audio_policy_engine_criterion_types",
+ defaults: ["buildpolicycriteriontypesrule"],
+ srcs: [
+ ":audio_policy_configuration_top_file",
+ ":audio_policy_configuration_files",
+ ],
+}
+filegroup {
+ name: "audio_policy_configuration_files",
+ srcs: [
+ ":r_submix_audio_policy_configuration",
+ ":default_volume_tables",
+ ":audio_policy_volumes",
+ ":surround_sound_configuration_5_0",
+ ":primary_audio_policy_configuration",
+ ],
+}
+filegroup {
+ name : "audio_policy_configuration_top_file",
+ srcs: [":audio_policy_configuration_generic"],
+}
+filegroup {
+ name: "audio_policy_engine_configuration_files",
+ srcs: [
+ ":audio_policy_engine_configuration",
+ "audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_volumes",
+ ":audio_policy_engine_criterion_types",
+ ":audio_policy_engine_criteria",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
new file mode 100644
index 0000000..94d33bd
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/Android.bp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Phone configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+prebuilt_etc {
+ name: "audio_policy_engine_configuration.xml",
+ vendor: true,
+ src: ":audio_policy_engine_configuration",
+ required: [
+ ":audio_policy_engine_criterion_types.xml",
+ ":audio_policy_engine_criteria.xml",
+ ":audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_volumes.xml",
+ ],
+}
+prebuilt_etc {
+ name: "audio_policy_engine_product_strategies.xml",
+ vendor: true,
+ src: "audio_policy_engine_product_strategies.xml",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_stream_volumes.xml",
+ vendor: true,
+ src: ":audio_policy_engine_stream_volumes",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_default_stream_volumes.xml",
+ vendor: true,
+ src: ":audio_policy_engine_default_stream_volumes",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_criterion_types.xml",
+ vendor: true,
+ src: ":audio_policy_engine_criterion_types",
+}
+
+//
+// Generate audio_policy_engine criterion type file => provides device addresses criterion type
+//
+genrule {
+ name: "audio_policy_engine_criterion_types",
+ defaults: ["buildpolicycriteriontypesrule"],
+ srcs: [
+ ":audio_policy_configuration_top_file",
+ ":audio_policy_configuration_files",
+ ],
+}
+filegroup {
+ name: "audio_policy_configuration_files",
+ srcs: [
+ ":r_submix_audio_policy_configuration",
+ ":default_volume_tables",
+ ":audio_policy_volumes",
+ ":surround_sound_configuration_5_0",
+ ":primary_audio_policy_configuration",
+ ],
+}
+filegroup {
+ name : "audio_policy_configuration_top_file",
+ srcs: [":audio_policy_configuration_generic"],
+}
+filegroup {
+ name: "audio_policy_engine_configuration",
+ srcs: ["audio_policy_engine_configuration.xml"],
+}
+filegroup {
+ name: "audio_policy_engine_stream_volumes",
+ srcs: ["audio_policy_engine_stream_volumes.xml"],
+}
+filegroup {
+ name: "audio_policy_engine_default_stream_volumes",
+ srcs: ["audio_policy_engine_default_stream_volumes.xml"],
+}
+filegroup {
+ name: "audio_policy_engine_configuration_files",
+ srcs: [
+ ":audio_policy_engine_configuration",
+ "audio_policy_engine_product_strategies.xml",
+ ":audio_policy_engine_stream_volumes",
+ ":audio_policy_engine_default_stream_volumes",
+ ":audio_policy_engine_criterion_types",
+ ":audio_policy_engine_criteria",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
new file mode 100644
index 0000000..a0b874a
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/Android.bp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Root soong_namespace for common components
+
+prebuilt_etc {
+ name: "PolicyClass.xml",
+ vendor: true,
+ src: ":PolicyClass",
+ sub_dir: "parameter-framework/Structure/Policy",
+}
+prebuilt_etc {
+ name: "PolicySubsystem.xml",
+ vendor: true,
+ src: ":PolicySubsystem",
+ sub_dir: "parameter-framework/Structure/Policy",
+}
+prebuilt_etc {
+ name: "PolicySubsystem-CommonTypes.xml",
+ vendor: true,
+ src: ":PolicySubsystem-CommonTypes",
+ sub_dir: "parameter-framework/Structure/Policy",
+}
+
+filegroup {
+ name: "product_strategies_structure_template",
+ srcs: ["examples/common/Structure/ProductStrategies.xml.in"],
+}
+filegroup {
+ name: "PolicySubsystem",
+ srcs: ["examples/common/Structure/PolicySubsystem.xml"],
+}
+filegroup {
+ name: "PolicySubsystem-no-strategy",
+ srcs: ["examples/common/Structure/PolicySubsystem-no-strategy.xml"],
+}
+filegroup {
+ name: "PolicySubsystem-CommonTypes",
+ srcs: ["examples/common/Structure/PolicySubsystem-CommonTypes.xml"],
+}
+filegroup {
+ name: "PolicyClass",
+ srcs: ["examples/common/Structure/PolicyClass.xml"],
+}
+filegroup {
+ name: "volumes.pfw",
+ srcs: ["examples/Settings/volumes.pfw"],
+}
+filegroup {
+ name: "device_for_input_source.pfw",
+ srcs: ["examples/Settings/device_for_input_source.pfw"],
+}
+filegroup {
+ name: "ParameterFrameworkConfigurationPolicy.userdebug.xml",
+ srcs: ["examples/ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "ParameterFrameworkConfigurationPolicy.user.xml",
+ srcs: ["examples/ParameterFrameworkConfigurationPolicy.user.xml"],
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
deleted file mode 100644
index 19f93b3..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Android.mk
+++ /dev/null
@@ -1,187 +0,0 @@
-################################################################################################
-#
-# @NOTE:
-# Audio Policy Engine configurable example for generic device build
-#
-# Any vendor shall have its own configuration within the corresponding device folder
-#
-################################################################################################
-
-LOCAL_PATH := $(call my-dir)
-
-ifdef BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable caremu_configurable no-output_configurable no-input_configurable))
-
-PFW_CORE := external/parameter-framework
-#@TODO: upstream new domain generator
-#BUILD_PFW_SETTINGS := $(PFW_CORE)/support/android/build_pfw_settings.mk
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
-
-PROVISION_STRATEGIES_STRUCTURE := $(TOOLS)/provision_strategies_structure.mk
-
-endif
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-######### Policy PFW top level file #########
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable caremu_configurable))
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework
-LOCAL_SRC_FILES := $(LOCAL_MODULE).in
-LOCAL_REQUIRED_MODULES := \
- PolicySubsystem.xml \
- PolicyClass.xml
-
-# external/parameter-framework prevents from using debug interface
-AUDIO_PATTERN = @TUNING_ALLOWED@
-ifeq ($(TARGET_BUILD_VARIANT),user)
-AUDIO_VALUE = false
-else
-AUDIO_VALUE = true
-endif
-
-LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(TARGET_OUT_VENDOR_ETC)/$(LOCAL_MODULE_RELATIVE_PATH)/$(LOCAL_MODULE)
-
-include $(BUILD_PREBUILT)
-
-########## Policy PFW Common Structures #########
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_REQUIRED_MODULES := \
- PolicySubsystem-CommonTypes.xml \
- ProductStrategies.xml
-
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := common/Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := common/Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicyClass.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := common/Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := ProductStrategies.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
-
-AUDIO_POLICY_ENGINE_CONFIGURATION_FILE := \
- $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_configuration.xml
-STRATEGIES_STRUCTURE_FILE := $(LOCAL_PATH)/common/Structure/$(LOCAL_MODULE).in
-
-include $(PROVISION_STRATEGIES_STRUCTURE)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable caremu_configurable))
-
-########## Policy PFW Example Structures #########
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := PolicySubsystem.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_REQUIRED_MODULES := PolicySubsystem-CommonTypes.xml
-
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Structure/Policy
-LOCAL_SRC_FILES := common/Structure/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := ParameterFrameworkConfigurationPolicy-no-strategy.xml
-LOCAL_MODULE_STEM := ParameterFrameworkConfigurationPolicy.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework
-LOCAL_SRC_FILES := $(LOCAL_MODULE).in
-LOCAL_REQUIRED_MODULES := \
- PolicySubsystem.xml \
- PolicyClass.xml
-AUDIO_VALUE = false
-LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(TARGET_OUT_VENDOR_ETC)/$(LOCAL_MODULE_RELATIVE_PATH)/$(LOCAL_MODULE)
-
-include $(BUILD_PREBUILT)
-
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable no-input_configurable))
-
-######### Policy PFW Settings - No Output #########
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains-NoOutputDevice.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/SettingsNoOutput/device_for_strategies.pfw \
- $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
- $(LOCAL_PATH)/Settings/volumes.pfw
-LOCAL_REQUIRED_MODULES := libpolicy-subsystem
-include $(BUILD_PFW_SETTINGS)
-
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-output_configurable)
-######### Policy PFW Settings - No Input #########
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains-NoInputDevice.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/SettingsNoInput/device_for_input_source.pfw \
- $(LOCAL_PATH)/Settings/volumes.pfw
-LOCAL_REQUIRED_MODULES := libpolicy-subsystem
-include $(BUILD_PFW_SETTINGS)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),no-input_configurable)
-#######################################################################
-# Recursive call sub-folder Android.mk
-#######################################################################
-
-include $(call all-makefiles-under,$(LOCAL_PATH))
-
-endif #ifdef BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION
-
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
new file mode 100644
index 0000000..5078268
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.bp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Automotive configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/automotive",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Product Strategies Structure file from template
+//
+prebuilt_etc {
+ name: "ProductStrategies.xml",
+ vendor: true,
+ src: ":buildstrategiesstructure_gen",
+ sub_dir: "parameter-framework/Structure/Policy",
+ required: ["libpolicy-subsystem"],
+}
+genrule {
+ name: "buildstrategiesstructure_gen",
+ defaults: ["buildstrategiesstructurerule"],
+ srcs: [
+ ":audio_policy_engine_configuration_files",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Configurable Domains
+//
+prebuilt_etc {
+ name: "parameter-framework.policy",
+ filename_from_src: true,
+ vendor: true,
+ src: ":domaingeneratorpolicyrule_gen",
+ sub_dir: "parameter-framework/Settings/Policy",
+ required: [
+ "ProductStrategies.xml",
+ "PolicyClass.xml",
+ "PolicySubsystem.xml",
+ "PolicySubsystem-CommonTypes.xml",
+ ],
+}
+genrule {
+ name: "domaingeneratorpolicyrule_gen",
+ defaults: ["domaingeneratorpolicyrule"],
+ srcs: [
+ ":audio_policy_pfw_toplevel",
+ ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criterion_types",
+ ":edd_files",
+ ],
+}
+filegroup {
+ name: "edd_files",
+ srcs: [
+ ":device_for_input_source.pfw",
+ ":volumes.pfw",
+ "Settings/device_for_product_strategies.pfw",
+ ],
+}
+// This is for Settings generation, must use socket port, so userdebug version is required
+filegroup {
+ name: "audio_policy_pfw_toplevel",
+ srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "audio_policy_pfw_structure_files",
+ srcs: [
+ ":PolicyClass",
+ ":PolicySubsystem",
+ ":PolicySubsystem-CommonTypes",
+ ":buildstrategiesstructure_gen",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
deleted file mode 100644
index 7304ec2..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car/Android.mk
+++ /dev/null
@@ -1,47 +0,0 @@
-################################################################################################
-#
-# @NOTE:
-# Audio Policy Engine configurable example for generic device build
-#
-# Any vendor shall have its own configuration within the corresponding device folder
-#
-################################################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
-LOCAL_PATH := $(call my-dir)
-
-PFW_CORE := external/parameter-framework
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
-
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-
-########## Policy PFW Structures #########
-######### Policy PFW Settings #########
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_REQUIRED_MODULES := libpolicy-subsystem
-
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/Settings/device_for_product_strategies.pfw \
- $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
- $(LOCAL_PATH)/../Settings/volumes.pfw
-
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-include $(BUILD_PFW_SETTINGS)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
new file mode 100644
index 0000000..0917440
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.bp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Car Emulator configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/caremu",
+ "frameworks/av/services/audiopolicy/engineconfigurable/parameter-framework/examples/Car",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Product Strategies Structure file from template
+//
+prebuilt_etc {
+ name: "ProductStrategies.xml",
+ vendor: true,
+ src: ":buildstrategiesstructure_gen",
+ sub_dir: "parameter-framework/Structure/Policy",
+ required: ["libpolicy-subsystem"],
+}
+genrule {
+ name: "buildstrategiesstructure_gen",
+ defaults: ["buildstrategiesstructurerule"],
+ srcs: [
+ ":audio_policy_engine_configuration_files",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Configurable Domains
+//
+prebuilt_etc {
+ name: "parameter-framework.policy",
+ filename_from_src: true,
+ vendor: true,
+ src: ":domaingeneratorpolicyrule_gen",
+ sub_dir: "parameter-framework/Settings/Policy",
+ required: [
+ "ProductStrategies.xml",
+ "PolicyClass.xml",
+ "PolicySubsystem.xml",
+ "PolicySubsystem-CommonTypes.xml",
+ ],
+}
+genrule {
+ name: "domaingeneratorpolicyrule_gen",
+ defaults: ["domaingeneratorpolicyrule"],
+ srcs: [
+ ":audio_policy_pfw_toplevel",
+ ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criterion_types",
+ ":edd_files",
+ ],
+}
+filegroup {
+ name: "edd_files",
+ srcs: [
+ ":device_for_input_source.pfw",
+ ":volumes.pfw",
+ "Settings/device_for_product_strategies.pfw",
+ ],
+}
+// This is for Settings generation, must use socket port, so userdebug version is required
+filegroup {
+ name: "audio_policy_pfw_toplevel",
+ srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "audio_policy_pfw_structure_files",
+ srcs: [
+ ":PolicyClass",
+ ":PolicySubsystem",
+ ":PolicySubsystem-CommonTypes",
+ ":buildstrategiesstructure_gen",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.mk
deleted file mode 100644
index f5eb7d1..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/CarEmu/Android.mk
+++ /dev/null
@@ -1,46 +0,0 @@
-################################################################################################
-#
-# @NOTE:
-# Audio Policy Engine configurable example for generic device build
-#
-# Any vendor shall have its own configuration within the corresponding device folder
-#
-################################################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), caremu_configurable)
-LOCAL_PATH := $(call my-dir)
-
-PFW_CORE := external/parameter-framework
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
-
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-
-########## Policy PFW Structures #########
-######### Policy PFW Settings #########
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_REQUIRED_MODULES := libpolicy-subsystem
-
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/Settings/device_for_product_strategies.pfw \
- $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
- $(LOCAL_PATH)/../Settings/volumes.pfw
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-include $(BUILD_PFW_SETTINGS)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), caremu_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.user.xml
similarity index 81%
copy from services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
copy to services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.user.xml
index 1be67dd..c5960cb 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.user.xml
@@ -1,7 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<ParameterFrameworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- SystemClassName="Policy" ServerPort="unix:///dev/socket/audioserver/policy_debug"
- TuningAllowed="@TUNING_ALLOWED@">
+ SystemClassName="Policy" TuningAllowed="false">
<SubsystemPlugins>
<Location Folder="">
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.userdebug.xml
similarity index 93%
rename from services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
rename to services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.userdebug.xml
index 1be67dd..1b7d7d8 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.xml.in
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/ParameterFrameworkConfigurationPolicy.userdebug.xml
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<ParameterFrameworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
SystemClassName="Policy" ServerPort="unix:///dev/socket/audioserver/policy_debug"
- TuningAllowed="@TUNING_ALLOWED@">
+ TuningAllowed="true">
<SubsystemPlugins>
<Location Folder="">
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
new file mode 100644
index 0000000..11e220b
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.bp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Phone configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/phone",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Product Strategies Structure file from template
+//
+prebuilt_etc {
+ name: "ProductStrategies.xml",
+ vendor: true,
+ src: ":buildstrategiesstructure_gen",
+ sub_dir: "parameter-framework/Structure/Policy",
+ required: ["libpolicy-subsystem"],
+}
+genrule {
+ name: "buildstrategiesstructure_gen",
+ defaults: ["buildstrategiesstructurerule"],
+ srcs: [
+ ":audio_policy_engine_configuration_files",
+ ],
+}
+
+//
+// Generate Audio Policy Parameter Framework Configurable Domains
+//
+prebuilt_etc {
+ name: "parameter-framework.policy",
+ filename_from_src: true,
+ vendor: true,
+ src: ":domaingeneratorpolicyrule_gen",
+ sub_dir: "parameter-framework/Settings/Policy",
+ required: [
+ "ProductStrategies.xml",
+ "PolicyClass.xml",
+ "PolicySubsystem.xml",
+ "PolicySubsystem-CommonTypes.xml",
+ ],
+}
+genrule {
+ name: "domaingeneratorpolicyrule_gen",
+ defaults: ["domaingeneratorpolicyrule"],
+ srcs: [
+ ":audio_policy_pfw_toplevel",
+ ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criterion_types",
+ ":edd_files",
+ ],
+}
+filegroup {
+ name: "edd_files",
+ srcs: [
+ ":device_for_input_source.pfw",
+ ":volumes.pfw",
+ "Settings/device_for_product_strategy_media.pfw",
+ "Settings/device_for_product_strategy_accessibility.pfw",
+ "Settings/device_for_product_strategy_dtmf.pfw",
+ "Settings/device_for_product_strategy_enforced_audible.pfw",
+ "Settings/device_for_product_strategy_phone.pfw",
+ "Settings/device_for_product_strategy_sonification.pfw",
+ "Settings/device_for_product_strategy_sonification_respectful.pfw",
+ "Settings/device_for_product_strategy_transmitted_through_speaker.pfw",
+ "Settings/device_for_product_strategy_rerouting.pfw",
+ "Settings/device_for_product_strategy_patch.pfw",
+ ],
+}
+// This is for Settings generation, must use socket port, so userdebug version is required
+filegroup {
+ name: "audio_policy_pfw_toplevel",
+ srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "audio_policy_pfw_structure_files",
+ srcs: [
+ ":PolicyClass",
+ ":PolicySubsystem",
+ ":PolicySubsystem-CommonTypes",
+ ":buildstrategiesstructure_gen",
+ ],
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
deleted file mode 100644
index 0b20781..0000000
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Phone/Android.mk
+++ /dev/null
@@ -1,54 +0,0 @@
-################################################################################################
-#
-# @NOTE:
-# Audio Policy Engine configurable example for generic device build
-#
-# Any vendor shall have its own configuration within the corresponding device folder
-#
-################################################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
-
-LOCAL_PATH := $(call my-dir)
-
-PFW_CORE := external/parameter-framework
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-TOOLS := frameworks/av/services/audiopolicy/engineconfigurable/tools
-BUILD_PFW_SETTINGS := $(TOOLS)/build_audio_pfw_settings.mk
-
-##################################################################
-# CONFIGURATION FILES
-##################################################################
-########## Policy PFW Structures #########
-######### Policy PFW Settings #########
-include $(CLEAR_VARS)
-LOCAL_MODULE := parameter-framework.policy
-LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
-LOCAL_REQUIRED_MODULES := libpolicy-subsystem
-
-PFW_EDD_FILES := \
- $(LOCAL_PATH)/../Settings/device_for_input_source.pfw \
- $(LOCAL_PATH)/../Settings/volumes.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_media.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_accessibility.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_dtmf.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_enforced_audible.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_phone.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_sonification_respectful.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_transmitted_through_speaker.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_rerouting.pfw \
- $(LOCAL_PATH)/Settings/device_for_product_strategy_patch.pfw
-PFW_CRITERION_TYPES_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criterion_types.xml
-PFW_CRITERIA_FILE := $(TARGET_OUT_VENDOR_ETC)/audio_policy_engine_criteria.xml
-PFW_TOPLEVEL_FILE := $(TARGET_OUT_VENDOR_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
-
-include $(BUILD_PFW_SETTINGS)
-
-endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
index a990879..9e0957c 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_input_source.pfw
@@ -18,7 +18,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/mic/applicable_input_device/mask
communication = 0
ambient = 0
@@ -36,7 +35,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/voice_downlink/applicable_input_device/mask
communication = 0
ambient = 0
@@ -58,7 +56,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/voice_call/applicable_input_device/mask
communication = 0
ambient = 0
@@ -80,7 +77,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/voice_uplink/applicable_input_device/mask
communication = 0
ambient = 0
@@ -102,7 +98,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
communication = 0
ambient = 0
@@ -123,7 +118,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/voice_recognition/applicable_input_device/mask
communication = 0
ambient = 0
@@ -142,7 +136,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
communication = 0
ambient = 0
@@ -160,7 +153,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
communication = 0
ambient = 0
@@ -182,7 +174,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/hotword/applicable_input_device/mask
communication = 0
ambient = 0
@@ -201,7 +192,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/unprocessed/applicable_input_device/mask
communication = 0
ambient = 0
@@ -220,7 +210,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
communication = 0
ambient = 0
@@ -242,7 +231,6 @@
loopback = 0
ip = 0
bus = 0
- stub = 0
domain: DefaultAndMic
conf: A2dp
@@ -255,12 +243,14 @@
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 1
wired_headset = 0
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
conf: Sco
AvailableInputDevices Includes BluetoothScoHeadset
@@ -273,12 +263,14 @@
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 1
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 0
wired_headset = 0
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 1
+ stub = 0
conf: WiredHeadset
AvailableInputDevices Includes WiredHeadset
@@ -290,12 +282,14 @@
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 0
wired_headset = 1
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
conf: UsbDevice
AvailableInputDevices Includes UsbDevice
@@ -307,12 +301,14 @@
usb_device = 1
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 0
wired_headset = 0
usb_device = 1
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
conf: BuiltinMic
AvailableInputDevices Includes BuiltinMic
@@ -324,12 +320,33 @@
usb_device = 0
builtin_mic = 1
bluetooth_sco_headset = 0
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 0
wired_headset = 0
usb_device = 0
builtin_mic = 1
bluetooth_sco_headset = 0
+ stub = 0
+
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources
+ component: default/applicable_input_device/mask/
+ bluetooth_a2dp = 0
+ wired_headset = 0
+ usb_device = 0
+ builtin_mic = 0
+ bluetooth_sco_headset = 0
+ stub = 1
+ component: mic/applicable_input_device/mask/
+ bluetooth_a2dp = 0
+ wired_headset = 0
+ usb_device = 0
+ builtin_mic = 0
+ bluetooth_sco_headset = 0
+ stub = 1
conf: Default
component: /Policy/policy/input_sources
@@ -339,12 +356,14 @@
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
component: mic/applicable_input_device/mask/
bluetooth_a2dp = 0
wired_headset = 0
usb_device = 0
builtin_mic = 0
bluetooth_sco_headset = 0
+ stub = 0
domain: VoiceUplinkAndVoiceDownlinkAndVoiceCall
conf: VoiceCall
@@ -354,12 +373,29 @@
voice_downlink/applicable_input_device/mask/telephony_rx = 1
voice_call/applicable_input_device/mask/telephony_rx = 1
voice_uplink/applicable_input_device/mask/telephony_rx = 1
+ voice_downlink/applicable_input_device/mask/stub = 0
+ voice_call/applicable_input_device/mask/stub = 0
+ voice_uplink/applicable_input_device/mask/stub = 0
+
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources
+ voice_downlink/applicable_input_device/mask/telephony_rx = 0
+ voice_call/applicable_input_device/mask/telephony_rx = 0
+ voice_uplink/applicable_input_device/mask/telephony_rx = 0
+ voice_downlink/applicable_input_device/mask/stub = 1
+ voice_call/applicable_input_device/mask/stub = 1
+ voice_uplink/applicable_input_device/mask/stub = 1
conf: Default
component: /Policy/policy/input_sources
voice_downlink/applicable_input_device/mask/telephony_rx = 0
voice_call/applicable_input_device/mask/telephony_rx = 0
voice_uplink/applicable_input_device/mask/telephony_rx = 0
+ voice_downlink/applicable_input_device/mask/stub = 0
+ voice_call/applicable_input_device/mask/stub = 0
+ voice_uplink/applicable_input_device/mask/stub = 0
domain: Camcorder
conf: BackMic
@@ -368,6 +404,7 @@
component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
back_mic = 1
builtin_mic = 0
+ stub = 0
conf: BuiltinMic
AvailableInputDevices Includes BuiltinMic
@@ -375,11 +412,21 @@
component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
back_mic = 0
builtin_mic = 1
+ stub = 0
+
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+ back_mic = 0
+ builtin_mic = 0
+ stub = 1
conf: Default
component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
back_mic = 0
builtin_mic = 0
+ stub = 0
domain: VoiceRecognitionAndUnprocessedAndHotword
conf: ScoHeadset
@@ -392,16 +439,19 @@
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
component: unprocessed/applicable_input_device/mask
bluetooth_sco_headset = 1
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
component: hotword/applicable_input_device/mask
bluetooth_sco_headset = 1
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
conf: WiredHeadset
AvailableInputDevices Includes WiredHeadset
@@ -411,17 +461,20 @@
bluetooth_sco_headset = 0
wired_headset = 1
usb_device = 0
+ stub = 0
builtin_mic = 0
component: unprocessed/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 1
usb_device = 0
builtin_mic = 0
+ stub = 0
component: hotword/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 1
usb_device = 0
builtin_mic = 0
+ stub = 0
conf: UsbDevice
AvailableInputDevices Includes UsbDevice
@@ -432,16 +485,19 @@
wired_headset = 0
usb_device = 1
builtin_mic = 0
+ stub = 0
component: unprocessed/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 1
builtin_mic = 0
+ stub = 0
component: hotword/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 1
builtin_mic = 0
+ stub = 0
conf: BuiltinMic
AvailableInputDevices Includes BuiltinMic
@@ -452,17 +508,42 @@
wired_headset = 0
usb_device = 0
builtin_mic = 1
+ stub = 0
component: unprocessed/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 0
builtin_mic = 1
+ stub = 0
component: hotword/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 0
builtin_mic = 1
+ stub = 0
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources
+ component: voice_recognition/applicable_input_device/mask
+ bluetooth_sco_headset = 0
+ wired_headset = 0
+ usb_device = 0
+ builtin_mic = 0
+ stub = 1
+ component: unprocessed/applicable_input_device/mask
+ bluetooth_sco_headset = 0
+ wired_headset = 0
+ usb_device = 0
+ builtin_mic = 0
+ stub = 1
+ component: hotword/applicable_input_device/mask
+ bluetooth_sco_headset = 0
+ wired_headset = 0
+ usb_device = 0
+ builtin_mic = 0
+ stub = 1
conf: Default
component: /Policy/policy/input_sources
component: voice_recognition/applicable_input_device/mask
@@ -470,16 +551,19 @@
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
component: unprocessed/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
component: hotword/applicable_input_device/mask
bluetooth_sco_headset = 0
wired_headset = 0
usb_device = 0
builtin_mic = 0
+ stub = 0
domain: VoiceCommunication
conf: ScoHeadset
@@ -495,6 +579,7 @@
usb_device = 0
builtin_mic = 0
back_mic = 0
+ stub = 0
conf: WiredHeadset
ForceUseForCommunication Is ForceNone
@@ -506,6 +591,7 @@
usb_device = 0
builtin_mic = 0
back_mic = 0
+ stub = 0
conf: UsbDevice
ForceUseForCommunication Is ForceNone
@@ -517,6 +603,7 @@
usb_device = 1
builtin_mic = 0
back_mic = 0
+ stub = 0
conf: BuiltinMic
AvailableInputDevices Includes BuiltinMic
@@ -532,6 +619,7 @@
usb_device = 0
builtin_mic = 1
back_mic = 0
+ stub = 0
conf: BackMic
ForceUseForCommunication Is ForceSpeaker
@@ -543,6 +631,7 @@
usb_device = 0
builtin_mic = 0
back_mic = 1
+ stub = 0
conf: Default
#
@@ -554,6 +643,7 @@
usb_device = 0
builtin_mic = 1
back_mic = 0
+ stub = 0
domain: RemoteSubmix
conf: RemoteSubmix
@@ -561,10 +651,19 @@
component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
remote_submix = 1
+ stub = 0
+
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
+ remote_submix = 0
+ stub = 1
conf: Default
component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
remote_submix = 0
+ stub = 0
domain: FmTuner
conf: FmTuner
@@ -572,8 +671,29 @@
component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
fm_tuner = 1
+ stub = 0
+
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
+ fm_tuner = 0
+ stub = 1
conf: Default
component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
fm_tuner = 0
+ stub = 0
+
+ domain: Voice
+ conf: Stub
+ AvailableInputDevices Includes Default
+
+ /Policy/policy/input_sources/echo_reference/applicable_input_device/mask/stub = 1
+ /Policy/policy/input_sources/voice_performance/applicable_input_device/mask/stub = 1
+
+ conf: Default
+ /Policy/policy/input_sources/echo_reference/applicable_input_device/mask/stub = 0
+ /Policy/policy/input_sources/voice_performance/applicable_input_device/mask/stub = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
new file mode 100644
index 0000000..ffd494e
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoInput/Android.bp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP No Input configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/phone",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+prebuilt_etc {
+ name: "parameter-framework.policy",
+ filename_from_src: true,
+ vendor: true,
+ src: ":domaingeneratorpolicyrule_gen",
+ sub_dir: "parameter-framework/Settings/Policy",
+ required: [
+ "PolicyClass.xml",
+ "PolicySubsystem.xml",
+ "PolicySubsystem-CommonTypes.xml",
+ ],
+}
+
+genrule {
+ name: "domaingeneratorpolicyrule_gen",
+ defaults: ["domaingeneratorpolicyrule"],
+ srcs: [
+ ":audio_policy_pfw_toplevel",
+ ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criterion_types",
+ ":edd_files",
+ ],
+}
+filegroup {
+ name: "audio_policy_pfw_toplevel",
+ srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "audio_policy_pfw_structure_files",
+ srcs: [
+ ":PolicyClass",
+ ":PolicySubsystem",
+ ":PolicySubsystem-CommonTypes",
+ ],
+}
+filegroup {
+ name: "edd_files",
+ srcs: [
+ "device_for_input_source.pfw",
+ ":volumes.pfw",
+ ],
+}
+prebuilt_etc {
+ name: "PolicySubsystem.xml",
+ vendor: true,
+ src: ":PolicySubsystem-no-strategy",
+ sub_dir: "parameter-framework/Structure/Policy",
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
new file mode 100644
index 0000000..6fca048
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/SettingsNoOutput/Android.bp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP No output configuration example
+
+soong_namespace {
+ imports: [
+ "frameworks/av/services/audiopolicy/engineconfigurable/config/example/phone",
+ "frameworks/av/services/audiopolicy/config",
+ ],
+}
+
+prebuilt_etc {
+ name: "parameter-framework.policy",
+ filename_from_src: true,
+ vendor: true,
+ src: ":domaingeneratorpolicyrule_gen",
+ sub_dir: "parameter-framework/Settings/Policy",
+ required: [
+ "PolicyClass.xml",
+ "PolicySubsystem.xml",
+ "PolicySubsystem-CommonTypes.xml",
+ ],
+}
+genrule {
+ name: "domaingeneratorpolicyrule_gen",
+ defaults: ["domaingeneratorpolicyrule"],
+ srcs: [
+ ":audio_policy_pfw_toplevel",
+ ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criterion_types",
+ ":edd_files",
+ ],
+}
+filegroup {
+ name: "audio_policy_pfw_toplevel",
+ srcs: [":ParameterFrameworkConfigurationPolicy.userdebug.xml"],
+}
+filegroup {
+ name: "audio_policy_pfw_structure_files",
+ srcs: [
+ ":PolicyClass",
+ ":PolicySubsystem",
+ ":PolicySubsystem-CommonTypes",
+ ],
+}
+filegroup {
+ name: "edd_files",
+ srcs: [
+ "device_for_strategies.pfw",
+ ":volumes.pfw",
+ ":device_for_input_source.pfw",
+ ],
+}
+prebuilt_etc {
+ name: "PolicySubsystem.xml",
+ vendor: true,
+ src: ":PolicySubsystem-no-strategy",
+ sub_dir: "parameter-framework/Structure/Policy",
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem.xml
index b55ce2c..585ce87 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/common/Structure/PolicySubsystem.xml
@@ -73,10 +73,13 @@
Mapping="Name:AUDIO_SOURCE_REMOTE_SUBMIX"/>
<Component Name="unprocessed" Type="InputSource"
Mapping="Name:AUDIO_SOURCE_UNPROCESSED"/>
+ <Component Name="voice_performance" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_VOICE_PERFORMANCE"/>
+ <Component Name="echo_reference" Type="InputSource"
+ Mapping="Name:AUDIO_SOURCE_ECHO_REFERENCE"/>
<Component Name="fm_tuner" Type="InputSource" Mapping="Name:AUDIO_SOURCE_FM_TUNER"/>
<Component Name="hotword" Type="InputSource" Mapping="Name:AUDIO_SOURCE_HOTWORD"/>
</ComponentType>
-
<!--#################### INPUT SOURCE END ####################-->
</ComponentLibrary>
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index c37efca..0a88685 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -32,6 +32,9 @@
#include <policy.h>
#include <AudioIODescriptorInterface.h>
#include <ParameterManagerWrapper.h>
+#include <media/AudioContainers.h>
+
+#include <media/TypeConverter.h>
using std::string;
using std::map;
@@ -165,11 +168,13 @@
mPolicyParameterMgr->setDeviceConnectionState(devDesc, state);
if (audio_is_output_device(devDesc->type())) {
+ // FIXME: Use DeviceTypeSet when the interface is ready
return mPolicyParameterMgr->setAvailableOutputDevices(
- getApmObserver()->getAvailableOutputDevices().types());
+ deviceTypesToBitMask(getApmObserver()->getAvailableOutputDevices().types()));
} else if (audio_is_input_device(devDesc->type())) {
+ // FIXME: Use DeviceTypeSet when the interface is ready
return mPolicyParameterMgr->setAvailableInputDevices(
- getApmObserver()->getAvailableInputDevices().types());
+ deviceTypesToBitMask(getApmObserver()->getAvailableInputDevices().types()));
}
return BAD_TYPE;
}
@@ -209,7 +214,7 @@
}
const DeviceVector availableOutputDevices = getApmObserver()->getAvailableOutputDevices();
const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
- uint32_t availableOutputDevicesType = availableOutputDevices.types();
+ DeviceTypeSet availableOutputDevicesTypes = availableOutputDevices.types();
/** This is the only case handled programmatically because the PFW is unable to know the
* activity of streams.
@@ -221,7 +226,7 @@
*
* -When media is not playing anymore, fall back on the sonification behavior
*/
- audio_devices_t devices = AUDIO_DEVICE_NONE;
+ DeviceTypeSet deviceTypes;
if (ps == getProductStrategyForStream(AUDIO_STREAM_NOTIFICATION) &&
!is_state_in_call(getPhoneState()) &&
!outputs.isActiveRemotely(toVolumeSource(AUDIO_STREAM_MUSIC),
@@ -230,7 +235,7 @@
SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
product_strategy_t strategyForMedia =
getProductStrategyForStream(AUDIO_STREAM_MUSIC);
- devices = productStrategies.getDeviceTypesForProductStrategy(strategyForMedia);
+ deviceTypes = productStrategies.getDeviceTypesForProductStrategy(strategyForMedia);
} else if (ps == getProductStrategyForStream(AUDIO_STREAM_ACCESSIBILITY) &&
(outputs.isActive(toVolumeSource(AUDIO_STREAM_RING)) ||
outputs.isActive(toVolumeSource(AUDIO_STREAM_ALARM)))) {
@@ -238,28 +243,37 @@
// compressed format as they would likely not be mixed and dropped.
// Device For Sonification conf file has HDMI, SPDIF and HDMI ARC unreacheable.
product_strategy_t strategyNotification = getProductStrategyForStream(AUDIO_STREAM_RING);
- devices = productStrategies.getDeviceTypesForProductStrategy(strategyNotification);
+ deviceTypes = productStrategies.getDeviceTypesForProductStrategy(strategyNotification);
} else {
- devices = productStrategies.getDeviceTypesForProductStrategy(ps);
+ deviceTypes = productStrategies.getDeviceTypesForProductStrategy(ps);
}
- if (devices == AUDIO_DEVICE_NONE ||
- (devices & availableOutputDevicesType) == AUDIO_DEVICE_NONE) {
- devices = getApmObserver()->getDefaultOutputDevice()->type();
- ALOGE_IF(devices == AUDIO_DEVICE_NONE, "%s: no valid default device defined", __FUNCTION__);
- return DeviceVector(getApmObserver()->getDefaultOutputDevice());
+ if (deviceTypes.empty() ||
+ Intersection(deviceTypes, availableOutputDevicesTypes).empty()) {
+ auto defaultDevice = getApmObserver()->getDefaultOutputDevice();
+ ALOG_ASSERT(defaultDevice != nullptr, "no valid default device defined");
+ return DeviceVector(defaultDevice);
}
- if (/*device_distinguishes_on_address(devices)*/ devices == AUDIO_DEVICE_OUT_BUS) {
+ if (/*device_distinguishes_on_address(*deviceTypes.begin())*/ isSingleDeviceType(
+ deviceTypes, AUDIO_DEVICE_OUT_BUS)) {
// We do expect only one device for these types of devices
// Criterion device address garantee this one is available
// If this criterion is not wished, need to ensure this device is available
const String8 address(productStrategies.getDeviceAddressForProductStrategy(ps).c_str());
- ALOGV("%s:device 0x%x %s %d", __FUNCTION__, devices, address.c_str(), ps);
- return DeviceVector(availableOutputDevices.getDevice(devices,
- address,
- AUDIO_FORMAT_DEFAULT));
+ ALOGV("%s:device %s %s %d",
+ __FUNCTION__, dumpDeviceTypes(deviceTypes).c_str(), address.c_str(), ps);
+ auto busDevice = availableOutputDevices.getDevice(
+ *deviceTypes.begin(), address, AUDIO_FORMAT_DEFAULT);
+ if (busDevice == nullptr) {
+ ALOGE("%s:unavailable device %s %s, fallback on default", __func__,
+ dumpDeviceTypes(deviceTypes).c_str(), address.c_str());
+ auto defaultDevice = getApmObserver()->getDefaultOutputDevice();
+ ALOG_ASSERT(defaultDevice != nullptr, "Default Output Device NOT available");
+ return DeviceVector(defaultDevice);
+ }
+ return DeviceVector(busDevice);
}
- ALOGV("%s:device 0x%x %d", __FUNCTION__, devices, ps);
- return availableOutputDevices.getDevicesFromTypeMask(devices);
+ ALOGV("%s:device %s %d", __FUNCTION__, dumpDeviceTypes(deviceTypes).c_str(), ps);
+ return availableOutputDevices.getDevicesFromTypes(deviceTypes);
}
DeviceVector Engine::getOutputDevicesForAttributes(const audio_attributes_t &attributes,
@@ -356,7 +370,8 @@
ALOGE("%s: set device %d on invalid strategy %d", __FUNCTION__, devices, strategy);
return false;
}
- getProductStrategies().at(strategy)->setDeviceTypes(devices);
+ // FIXME: stop using deviceTypesFromBitMask when the interface is ready
+ getProductStrategies().at(strategy)->setDeviceTypes(deviceTypesFromBitMask(devices));
return true;
}
diff --git a/services/audiopolicy/engineconfigurable/tools/Android.bp b/services/audiopolicy/engineconfigurable/tools/Android.bp
index 8c16972..d9e97af 100644
--- a/services/audiopolicy/engineconfigurable/tools/Android.bp
+++ b/services/audiopolicy/engineconfigurable/tools/Android.bp
@@ -16,14 +16,17 @@
name: "tools_default",
version: {
py2: {
- enabled: true,
+ enabled: false,
},
py3: {
- enabled: false,
+ enabled: true,
},
},
}
+//##################################################################################################
+// Tools for audio policy engine criterion type configuration file
+//
python_binary_host {
name: "buildPolicyCriterionTypes.py",
main: "buildPolicyCriterionTypes.py",
@@ -33,6 +36,30 @@
defaults: ["tools_default"],
}
+genrule_defaults {
+ name: "buildpolicycriteriontypesrule",
+ tools: ["buildPolicyCriterionTypes.py"],
+ cmd: "cp $(locations :audio_policy_configuration_files) $(genDir)/. && " +
+ "cp $(location :audio_policy_configuration_top_file) $(genDir)/audio_policy_configuration.xml && " +
+ "$(location buildPolicyCriterionTypes.py) " +
+ // @todo update if 1428659 is merged "--androidaudiobaseheader $(location :android_audio_base_header_file) " +
+ " --androidaudiobaseheader system/media/audio/include/system/audio-base.h " +
+ "--audiopolicyconfigurationfile $(genDir)/audio_policy_configuration.xml " +
+ "--criteriontypes $(location :audio_policy_engine_criterion_types_template) " +
+ "--outputfile $(out)",
+ srcs: [
+ // The commented inputs must be provided to use this genrule_defaults
+ // @todo uncomment if 1428659 is merged":android_audio_base_header_file",
+ ":audio_policy_engine_criterion_types_template",
+ // ":audio_policy_configuration_top_file",
+ // ":audio_policy_configuration_files",
+ ],
+ out: ["audio_policy_engine_criterion_types.xml"],
+}
+
+//##################################################################################################
+// Tools for audio policy engine parameter framework configurable domains
+//
python_binary_host {
name: "domainGeneratorPolicy.py",
main: "domainGeneratorPolicy.py",
@@ -50,6 +77,38 @@
],
}
+genrule_defaults {
+ name: "domaingeneratorpolicyrule",
+ tools: [
+ "domainGeneratorPolicy.py",
+ "domainGeneratorConnector",
+ ],
+ cmd: "mkdir -p $(genDir)/Structure/Policy && " +
+ "cp $(locations :audio_policy_pfw_structure_files) $(genDir)/Structure/Policy && " +
+ "cp $(location :audio_policy_pfw_toplevel) $(genDir)/top_level && " +
+ "$(location domainGeneratorPolicy.py) " +
+ "--validate " +
+ "--domain-generator-tool $(location domainGeneratorConnector) " +
+ "--toplevel-config $(genDir)/top_level " +
+ "--criteria $(location :audio_policy_engine_criteria) " +
+ "--criteriontypes $(location :audio_policy_engine_criterion_types) " +
+ "--add-edds $(locations :edd_files) " +
+ "--schemas-dir external/parameter-framework/upstream/schemas " +
+ " > $(out)",
+ srcs: [
+ // The commented inputs must be provided to use this genrule_defaults
+ // ":audio_policy_pfw_toplevel",
+ // ":audio_policy_pfw_structure_files",
+ ":audio_policy_engine_criteria",
+ // ":audio_policy_engine_criterion_types",
+ // ":edd_files",
+ ],
+ out: ["PolicyConfigurableDomains.xml"],
+}
+
+//##################################################################################################
+// Tools for policy parameter-framework product strategies structure file generation
+//
python_binary_host {
name: "buildStrategiesStructureFile.py",
main: "buildStrategiesStructureFile.py",
@@ -58,3 +117,19 @@
],
defaults: ["tools_default"],
}
+
+genrule_defaults {
+ name: "buildstrategiesstructurerule",
+ tools: ["buildStrategiesStructureFile.py"],
+ cmd: "cp $(locations :audio_policy_engine_configuration_files) $(genDir) && ls -l $(genDir) &&"+
+ "$(location buildStrategiesStructureFile.py) " +
+ "--audiopolicyengineconfigurationfile $(genDir)/audio_policy_engine_configuration.xml "+
+ "--productstrategiesstructurefile $(location :product_strategies_structure_template) " +
+ "--outputfile $(out)",
+ srcs: [
+ // The commented inputs must be provided to use this genrule_defaults
+ // ":audio_policy_engine_configuration_files",
+ ":product_strategies_structure_template",
+ ],
+ out: ["ProductStrategies.xml"],
+}
diff --git a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
index a63c858..b8b60c1 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildPolicyCriterionTypes.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Copyright 2018, The Android Open Source Project
@@ -19,10 +19,8 @@
import argparse
import re
import sys
-import tempfile
import os
import logging
-import subprocess
import xml.etree.ElementTree as ET
import xml.etree.ElementInclude as EI
import xml.dom.minidom as MINIDOM
@@ -49,33 +47,35 @@
def parseArgs():
argparser = argparse.ArgumentParser(description="Parameter-Framework XML \
- audio criterion type file generator.\n\
- Exit with the number of (recoverable or not) error that occured.")
+ audio criterion type file generator.\n\
+ Exit with the number of (recoverable or not) \
+ error that occured.")
argparser.add_argument('--androidaudiobaseheader',
- help="Android Audio Base C header file, Mandatory.",
- metavar="ANDROID_AUDIO_BASE_HEADER",
- type=argparse.FileType('r'),
- required=True)
+ help="Android Audio Base C header file, Mandatory.",
+ metavar="ANDROID_AUDIO_BASE_HEADER",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--audiopolicyconfigurationfile',
- help="Android Audio Policy Configuration file, Mandatory.",
- metavar="(AUDIO_POLICY_CONFIGURATION_FILE)",
- type=argparse.FileType('r'),
- required=True)
+ help="Android Audio Policy Configuration file, Mandatory.",
+ metavar="(AUDIO_POLICY_CONFIGURATION_FILE)",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--criteriontypes',
- help="Criterion types XML base file, in \
- '<criterion_types> \
- <criterion_type name="" type=<inclusive|exclusive> values=<value1,value2,...>/>' \
- format. Mandatory.",
- metavar="CRITERION_TYPE_FILE",
- type=argparse.FileType('r'),
- required=True)
+ help="Criterion types XML base file, in \
+ '<criterion_types> \
+ <criterion_type name="" type=<inclusive|exclusive> \
+ values=<value1,value2,...>/>' \
+ format. Mandatory.",
+ metavar="CRITERION_TYPE_FILE",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--outputfile',
- help="Criterion types outputfile file. Mandatory.",
- metavar="CRITERION_TYPE_OUTPUT_FILE",
- type=argparse.FileType('w'),
- required=True)
+ help="Criterion types outputfile file. Mandatory.",
+ metavar="CRITERION_TYPE_OUTPUT_FILE",
+ type=argparse.FileType('w'),
+ required=True)
argparser.add_argument('--verbose',
- action='store_true')
+ action='store_true')
return argparser.parse_args()
@@ -120,7 +120,7 @@
reparsed = MINIDOM.parseString(xmlstr)
prettyXmlStr = reparsed.toprettyxml(newl='\r\n')
prettyXmlStr = os.linesep.join([s for s in prettyXmlStr.splitlines() if s.strip()])
- outputFile.write(prettyXmlStr.encode('utf-8'))
+ outputFile.write(prettyXmlStr)
def capitalizeLine(line):
return ' '.join((w.capitalize() for w in line.split(' ')))
@@ -137,30 +137,30 @@
#
address_criteria_mapping_table = {
'sink' : "OutputDevicesAddressesType",
- 'source' : "InputDevicesAddressesType" }
+ 'source' : "InputDevicesAddressesType"}
address_criteria = {
'OutputDevicesAddressesType' : [],
- 'InputDevicesAddressesType' : [] }
+ 'InputDevicesAddressesType' : []}
- oldWorkingDir = os.getcwd()
- print "Current working directory %s" % oldWorkingDir
+ old_working_dir = os.getcwd()
+ print("Current working directory %s" % old_working_dir)
- newDir = os.path.join(oldWorkingDir , audiopolicyconfigurationfile.name)
+ new_dir = os.path.join(old_working_dir, audiopolicyconfigurationfile.name)
policy_in_tree = ET.parse(audiopolicyconfigurationfile)
- os.chdir(os.path.dirname(os.path.normpath(newDir)))
+ os.chdir(os.path.dirname(os.path.normpath(new_dir)))
- print "new working directory %s" % os.getcwd()
+ print("new working directory %s" % os.getcwd())
policy_root = policy_in_tree.getroot()
EI.include(policy_root)
- os.chdir(oldWorkingDir)
+ os.chdir(old_working_dir)
for device in policy_root.iter('devicePort'):
for key in address_criteria_mapping_table.keys():
- if device.get('role') == key and device.get('address') :
+ if device.get('role') == key and device.get('address'):
logging.info("{}: <{}>".format(key, device.get('address')))
address_criteria[address_criteria_mapping_table[key]].append(device.get('address'))
@@ -188,15 +188,15 @@
all_criteria = {
'AndroidModeType' : {},
'OutputDevicesMaskType' : {},
- 'InputDevicesMaskType' : {} }
+ 'InputDevicesMaskType' : {}}
#
# _CNT, _MAX, _ALL and _NONE are prohibited values as ther are just helpers for enum users.
#
- ignored_values = [ 'CNT', 'MAX', 'ALL', 'NONE' ]
+ ignored_values = ['CNT', 'MAX', 'ALL', 'NONE']
criteria_pattern = re.compile(
- r"\s*(?P<type>(?:"+'|'.join(criterion_mapping_table.keys()) + "))\_" \
+ r"\s*(?P<type>(?:"+'|'.join(criterion_mapping_table.keys()) + "))_" \
r"(?P<literal>(?!" + '|'.join(ignored_values) + ")\w*)\s*=\s*" \
r"(?P<values>(?:0[xX])?[0-9a-fA-F]+)")
@@ -221,7 +221,7 @@
logging.info("criterion {} duplicated values:".format(criterion_name))
logging.info("{}:{}".format(numerical_value, literal))
logging.info("KEEPING LATEST")
- for key in all_criteria[criterion_name].keys():
+ for key in list(all_criteria[criterion_name]):
if all_criteria[criterion_name][key] == int(numerical_value, 0):
del all_criteria[criterion_name][key]
diff --git a/services/audiopolicy/engineconfigurable/tools/buildStrategiesStructureFile.py b/services/audiopolicy/engineconfigurable/tools/buildStrategiesStructureFile.py
index af40602..f69d346 100755
--- a/services/audiopolicy/engineconfigurable/tools/buildStrategiesStructureFile.py
+++ b/services/audiopolicy/engineconfigurable/tools/buildStrategiesStructureFile.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Copyright 2019, The Android Open Source Project
@@ -17,16 +17,12 @@
#
import argparse
-import re
import sys
-import tempfile
import os
import logging
-import subprocess
import xml.etree.ElementTree as ET
import xml.etree.ElementInclude as EI
import xml.dom.minidom as MINIDOM
-from collections import OrderedDict
#
# Helper script that helps to feed at build time the XML Product Strategies Structure file file used
@@ -46,33 +42,34 @@
def parseArgs():
argparser = argparse.ArgumentParser(description="Parameter-Framework XML \
- product strategies structure file generator.\n\
- Exit with the number of (recoverable or not) error that occured.")
+ product strategies structure file generator.\n\
+ Exit with the number of (recoverable or not) \
+ error that occured.")
argparser.add_argument('--audiopolicyengineconfigurationfile',
- help="Android Audio Policy Engine Configuration file, Mandatory.",
- metavar="(AUDIO_POLICY_ENGINE_CONFIGURATION_FILE)",
- type=argparse.FileType('r'),
- required=True)
+ help="Android Audio Policy Engine Configuration file, Mandatory.",
+ metavar="(AUDIO_POLICY_ENGINE_CONFIGURATION_FILE)",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--productstrategiesstructurefile',
- help="Product Strategies Structure XML base file, Mandatory.",
- metavar="STRATEGIES_STRUCTURE_FILE",
- type=argparse.FileType('r'),
- required=True)
+ help="Product Strategies Structure XML base file, Mandatory.",
+ metavar="STRATEGIES_STRUCTURE_FILE",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--outputfile',
- help="Product Strategies Structure output file, Mandatory.",
- metavar="STRATEGIES_STRUCTURE_OUTPUT_FILE",
- type=argparse.FileType('w'),
- required=True)
+ help="Product Strategies Structure output file, Mandatory.",
+ metavar="STRATEGIES_STRUCTURE_OUTPUT_FILE",
+ type=argparse.FileType('w'),
+ required=True)
argparser.add_argument('--verbose',
- action='store_true')
+ action='store_true')
return argparser.parse_args()
-def generateXmlStructureFile(strategies, strategyStructureInFile, outputFile):
+def generateXmlStructureFile(strategies, strategy_structure_in_file, output_file):
- logging.info("Importing strategyStructureInFile {}".format(strategyStructureInFile))
- strategies_in_tree = ET.parse(strategyStructureInFile)
+ logging.info("Importing strategy_structure_in_file {}".format(strategy_structure_in_file))
+ strategies_in_tree = ET.parse(strategy_structure_in_file)
strategies_root = strategies_in_tree.getroot()
strategy_components = strategies_root.find('ComponentType')
@@ -80,13 +77,15 @@
for strategy_name in strategies:
context_mapping = "".join(map(str, ["Name:", strategy_name]))
strategy_pfw_name = strategy_name.replace('STRATEGY_', '').lower()
- strategy_component_node = ET.SubElement(strategy_components, "Component", Name=strategy_pfw_name, Type="ProductStrategy", Mapping=context_mapping)
+ ET.SubElement(strategy_components, "Component",
+ Name=strategy_pfw_name, Type="ProductStrategy",
+ Mapping=context_mapping)
xmlstr = ET.tostring(strategies_root, encoding='utf8', method='xml')
reparsed = MINIDOM.parseString(xmlstr)
prettyXmlStr = reparsed.toprettyxml(newl='\r\n')
prettyXmlStr = os.linesep.join([s for s in prettyXmlStr.splitlines() if s.strip()])
- outputFile.write(prettyXmlStr.encode('utf-8'))
+ output_file.write(prettyXmlStr)
def capitalizeLine(line):
return ' '.join((w.capitalize() for w in line.split(' ')))
@@ -97,26 +96,27 @@
#
def parseAndroidAudioPolicyEngineConfigurationFile(audiopolicyengineconfigurationfile):
- logging.info("Checking Audio Policy Engine Configuration file {}".format(audiopolicyengineconfigurationfile))
+ logging.info("Checking Audio Policy Engine Configuration file {}".format(
+ audiopolicyengineconfigurationfile))
#
# extract all product strategies name from audio policy engine configuration file
#
strategy_names = []
- oldWorkingDir = os.getcwd()
- print "Current working directory %s" % oldWorkingDir
+ old_working_dir = os.getcwd()
+ print("Current working directory %s" % old_working_dir)
- newDir = os.path.join(oldWorkingDir , audiopolicyengineconfigurationfile.name)
+ new_dir = os.path.join(old_working_dir, audiopolicyengineconfigurationfile.name)
policy_engine_in_tree = ET.parse(audiopolicyengineconfigurationfile)
- os.chdir(os.path.dirname(os.path.normpath(newDir)))
+ os.chdir(os.path.dirname(os.path.normpath(new_dir)))
- print "new working directory %s" % os.getcwd()
+ print("new working directory %s" % os.getcwd())
policy_engine_root = policy_engine_in_tree.getroot()
EI.include(policy_engine_root)
- os.chdir(oldWorkingDir)
+ os.chdir(old_working_dir)
for strategy in policy_engine_root.iter('ProductStrategy'):
strategy_names.append(strategy.get('name'))
@@ -128,7 +128,8 @@
logging.root.setLevel(logging.INFO)
args = parseArgs()
- strategies = parseAndroidAudioPolicyEngineConfigurationFile(args.audiopolicyengineconfigurationfile)
+ strategies = parseAndroidAudioPolicyEngineConfigurationFile(
+ args.audiopolicyengineconfigurationfile)
product_strategies_structure = args.productstrategiesstructurefile
diff --git a/services/audiopolicy/engineconfigurable/tools/build_audio_pfw_settings.mk b/services/audiopolicy/engineconfigurable/tools/build_audio_pfw_settings.mk
deleted file mode 100644
index ac60ef7..0000000
--- a/services/audiopolicy/engineconfigurable/tools/build_audio_pfw_settings.mk
+++ /dev/null
@@ -1,38 +0,0 @@
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES += \
- $(HOST_OUT_EXECUTABLES)/domainGeneratorPolicy.py \
- $(PFW_TOPLEVEL_FILE) $(PFW_CRITERIA_FILE) $(PFW_CRITERION_TYPES_FILE)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): MY_CRITERION_TYPES_FILE := $(PFW_CRITERION_TYPES_FILE)
-$(LOCAL_BUILT_MODULE): MY_TOOL := $(HOST_OUT_EXECUTABLES)/domainGeneratorPolicy.py
-$(LOCAL_BUILT_MODULE): MY_TOPLEVEL_FILE := $(PFW_TOPLEVEL_FILE)
-$(LOCAL_BUILT_MODULE): MY_CRITERIA_FILE := $(PFW_CRITERIA_FILE)
-$(LOCAL_BUILT_MODULE): MY_TUNING_FILE := $(PFW_TUNING_FILE)
-$(LOCAL_BUILT_MODULE): MY_EDD_FILES := $(PFW_EDD_FILES)
-$(LOCAL_BUILT_MODULE): MY_DOMAIN_FILES := $(PFW_DOMAIN_FILES)
-$(LOCAL_BUILT_MODULE): MY_SCHEMAS_DIR := $(PFW_SCHEMAS_DIR)
-$(LOCAL_BUILT_MODULE): MY_CRITERION_TYPES_FILE := $(PFW_CRITERION_TYPES_FILE)
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-
- "$(MY_TOOL)" --validate \
- --toplevel-config "$(MY_TOPLEVEL_FILE)" \
- --criteria "$(MY_CRITERIA_FILE)" \
- --criteriontypes "$(MY_CRITERION_TYPES_FILE)" \
- --initial-settings $(MY_TUNING_FILE) \
- --add-edds $(MY_EDD_FILES) \
- --add-domains $(MY_DOMAIN_FILES) \
- --schemas-dir $(MY_SCHEMAS_DIR) > "$@"
-
-
-# Clear variables for further use
-PFW_TOPLEVEL_FILE :=
-PFW_STRUCTURE_FILES :=
-PFW_CRITERIA_FILE :=
-PFW_CRITERION_TYPES_FILE :=
-PFW_TUNING_FILE :=
-PFW_EDD_FILES :=
-PFW_DOMAIN_FILES :=
-PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
diff --git a/services/audiopolicy/engineconfigurable/tools/domainGeneratorPolicy.py b/services/audiopolicy/engineconfigurable/tools/domainGeneratorPolicy.py
index 4dec9a2..b0c4b66 100755
--- a/services/audiopolicy/engineconfigurable/tools/domainGeneratorPolicy.py
+++ b/services/audiopolicy/engineconfigurable/tools/domainGeneratorPolicy.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Copyright 2018, The Android Open Source Project
@@ -16,12 +16,7 @@
# limitations under the License.
#
-import EddParser
-from PFWScriptGenerator import PfwScriptTranslator
-import hostConfig
-
import argparse
-import re
import sys
import tempfile
import os
@@ -29,6 +24,10 @@
import subprocess
import xml.etree.ElementTree as ET
+import EddParser
+from PFWScriptGenerator import PfwScriptTranslator
+import hostConfig
+
#
# In order to build the XML Settings file at build time, an instance of the parameter-framework
# shall be started and fed with all the criterion types/criteria that will be used by
@@ -39,61 +38,67 @@
def parseArgs():
argparser = argparse.ArgumentParser(description="Parameter-Framework XML \
- Settings file generator.\n\
- Exit with the number of (recoverable or not) error that occured.")
+ Settings file generator.\n\
+ Exit with the number of (recoverable or not) \
+ error that occured.")
+ argparser.add_argument('--domain-generator-tool',
+ help="ParameterFramework domain generator tool. Mandatory.",
+ metavar="PFW_DOMAIN_GENERATOR_TOOL",
+ required=True)
argparser.add_argument('--toplevel-config',
- help="Top-level parameter-framework configuration file. Mandatory.",
- metavar="TOPLEVEL_CONFIG_FILE",
- required=True)
+ help="Top-level parameter-framework configuration file. Mandatory.",
+ metavar="TOPLEVEL_CONFIG_FILE",
+ required=True)
argparser.add_argument('--criteria',
- help="Criteria file, in XML format: \
- in '<criteria> \
- <criterion name="" type=""/> \
- </criteria>' \
- format. Mandatory.",
- metavar="CRITERIA_FILE",
- type=argparse.FileType('r'),
- required=True)
+ help="Criteria file, in XML format: \
+ in '<criteria> \
+ <criterion name="" type=""/> \
+ </criteria>' \
+ format. Mandatory.",
+ metavar="CRITERIA_FILE",
+ type=argparse.FileType('r'),
+ required=True)
argparser.add_argument('--criteriontypes',
- help="Criterion types XML file, in \
- '<criterion_types> \
- <criterion_type name="" type=<inclusive|exclusive> values=<value1,value2,...>/> \
- </criterion_types>' \
- format. Mandatory.",
- metavar="CRITERION_TYPE_FILE",
- type=argparse.FileType('r'),
- required=False)
+ help="Criterion types XML file, in \
+ '<criterion_types> \
+ <criterion_type name="" type=<inclusive|exclusive> \
+ values=<value1,value2,...>/> \
+ </criterion_types>' \
+ format. Mandatory.",
+ metavar="CRITERION_TYPE_FILE",
+ type=argparse.FileType('r'),
+ required=False)
argparser.add_argument('--initial-settings',
- help="Initial XML settings file (containing a \
- <ConfigurableDomains> tag",
- nargs='?',
- default=None,
- metavar="XML_SETTINGS_FILE")
+ help="Initial XML settings file (containing a \
+ <ConfigurableDomains> tag",
+ nargs='?',
+ default=None,
+ metavar="XML_SETTINGS_FILE")
argparser.add_argument('--add-domains',
- help="List of single domain files (each containing a single \
- <ConfigurableDomain> tag",
- metavar="XML_DOMAIN_FILE",
- nargs='*',
- dest='xml_domain_files',
- default=[])
+ help="List of single domain files (each containing a single \
+ <ConfigurableDomain> tag",
+ metavar="XML_DOMAIN_FILE",
+ nargs='*',
+ dest='xml_domain_files',
+ default=[])
argparser.add_argument('--add-edds',
- help="List of files in EDD syntax (aka \".pfw\" files)",
- metavar="EDD_FILE",
- type=argparse.FileType('r'),
- nargs='*',
- default=[],
- dest='edd_files')
+ help="List of files in EDD syntax (aka \".pfw\" files)",
+ metavar="EDD_FILE",
+ type=argparse.FileType('r'),
+ nargs='*',
+ default=[],
+ dest='edd_files')
argparser.add_argument('--schemas-dir',
- help="Directory of parameter-framework XML Schemas for generation \
- validation",
- default=None)
+ help="Directory of parameter-framework XML Schemas for generation \
+ validation",
+ default=None)
argparser.add_argument('--target-schemas-dir',
- help="Ignored. Kept for retro-compatibility")
+ help="Ignored. Kept for retro-compatibility")
argparser.add_argument('--validate',
- help="Validate the settings against XML schemas",
- action='store_true')
+ help="Validate the settings against XML schemas",
+ action='store_true')
argparser.add_argument('--verbose',
- action='store_true')
+ action='store_true')
return argparser.parse_args()
@@ -112,7 +117,6 @@
logging.info("Importing criterionTypesFile {}".format(criterionTypesFile))
criteria_root = criteria_tree.getroot()
- criterion_types_root = criterion_types_tree.getroot()
all_criteria = []
for criterion in criteria_root.findall('criterion'):
@@ -165,7 +169,7 @@
try:
root.propagate()
- except EddParser.MyPropagationError, ex :
+ except EddParser.MyPropagationError as ex:
logging.critical(str(ex))
logging.info("EXIT ON FAILURE")
exit(1)
@@ -179,32 +183,32 @@
# It takes as input the collection of criteria, the domains and the simplified settings read from
# pfw.
#
-def generateDomainCommands(logging, all_criteria, initial_settings, xml_domain_files, parsed_edds):
- # create and inject all the criteria
- logging.info("Creating all criteria")
- for criterion in all_criteria:
- yield ["createSelectionCriterion", criterion['inclusive'],
- criterion['name']] + criterion['values']
+def generateDomainCommands(logger, all_criteria, initial_settings, xml_domain_files, parsed_edds):
+ # create and inject all the criteria
+ logger.info("Creating all criteria")
+ for criterion in all_criteria:
+ yield ["createSelectionCriterion", criterion['inclusive'],
+ criterion['name']] + criterion['values']
- yield ["start"]
+ yield ["start"]
- # Import initial settings file
- if initial_settings:
- logging.info("Importing initial settings file {}".format(initial_settings))
- yield ["importDomainsWithSettingsXML", initial_settings]
+ # Import initial settings file
+ if initial_settings:
+ logger.info("Importing initial settings file {}".format(initial_settings))
+ yield ["importDomainsWithSettingsXML", initial_settings]
- # Import each standalone domain files
- for domain_file in xml_domain_files:
- logging.info("Importing single domain file {}".format(domain_file))
- yield ["importDomainWithSettingsXML", domain_file]
+ # Import each standalone domain files
+ for domain_file in xml_domain_files:
+ logger.info("Importing single domain file {}".format(domain_file))
+ yield ["importDomainWithSettingsXML", domain_file]
- # Generate the script for each EDD file
- for filename, parsed_edd in parsed_edds:
- logging.info("Translating and injecting EDD file {}".format(filename))
- translator = PfwScriptTranslator()
- parsed_edd.translate(translator)
- for command in translator.getScript():
- yield command
+ # Generate the script for each EDD file
+ for filename, parsed_edd in parsed_edds:
+ logger.info("Translating and injecting EDD file {}".format(filename))
+ translator = PfwScriptTranslator()
+ parsed_edd.translate(translator)
+ for command in translator.getScript():
+ yield command
#
# Entry point of the domain generator.
@@ -232,30 +236,29 @@
prefix="TMPdomainGeneratorPFConfig_")
install_path = os.path.dirname(os.path.realpath(args.toplevel_config))
- hostConfig.configure(
- infile=args.toplevel_config,
- outfile=fake_toplevel_config,
- structPath=install_path)
+ hostConfig.configure(infile=args.toplevel_config,
+ outfile=fake_toplevel_config,
+ structPath=install_path)
fake_toplevel_config.close()
# Create the connector. Pipe its input to us in order to write commands;
# connect its output to stdout in order to have it dump the domains
# there; connect its error output to stderr.
- connector = subprocess.Popen(["domainGeneratorConnector",
- fake_toplevel_config.name,
- 'verbose' if args.verbose else 'no-verbose',
- 'validate' if args.validate else 'no-validate',
- args.schemas_dir],
- stdout=sys.stdout, stdin=subprocess.PIPE, stderr=sys.stderr)
+ connector = subprocess.Popen([args.domain_generator_tool,
+ fake_toplevel_config.name,
+ 'verbose' if args.verbose else 'no-verbose',
+ 'validate' if args.validate else 'no-validate',
+ args.schemas_dir],
+ stdout=sys.stdout, stdin=subprocess.PIPE, stderr=sys.stderr)
initial_settings = None
if args.initial_settings:
initial_settings = os.path.realpath(args.initial_settings)
for command in generateDomainCommands(logging, all_criteria, initial_settings,
- args.xml_domain_files, parsed_edds):
- connector.stdin.write('\0'.join(command))
- connector.stdin.write("\n")
+ args.xml_domain_files, parsed_edds):
+ connector.stdin.write('\0'.join(command).encode('utf-8'))
+ connector.stdin.write("\n".encode('utf-8'))
# Closing the connector's input triggers the domain generation
connector.stdin.close()
diff --git a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk b/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
deleted file mode 100644
index dab5a0f..0000000
--- a/services/audiopolicy/engineconfigurable/tools/provision_criterion_types_from_android_headers.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES += \
- $(HOST_OUT_EXECUTABLES)/buildPolicyCriterionTypes.py \
- $(CRITERION_TYPES_FILE) $(AUDIO_POLICY_CONFIGURATION_FILE) \
- $(ANDROID_AUDIO_BASE_HEADER_FILE)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): MY_CRITERION_TYPES_FILE := $(CRITERION_TYPES_FILE)
-$(LOCAL_BUILT_MODULE): MY_ANDROID_AUDIO_BASE_HEADER_FILE := $(ANDROID_AUDIO_BASE_HEADER_FILE)
-$(LOCAL_BUILT_MODULE): MY_AUDIO_POLICY_CONFIGURATION_FILE := $(AUDIO_POLICY_CONFIGURATION_FILE)
-$(LOCAL_BUILT_MODULE): MY_CRITERION_TOOL := $(HOST_OUT_EXECUTABLES)/buildPolicyCriterionTypes.py
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-
- "$(MY_CRITERION_TOOL)" \
- --androidaudiobaseheader "$(MY_ANDROID_AUDIO_BASE_HEADER_FILE)" \
- --audiopolicyconfigurationfile "$(MY_AUDIO_POLICY_CONFIGURATION_FILE)" \
- --criteriontypes "$(MY_CRITERION_TYPES_FILE)" \
- --outputfile "$(@)"
-
-# Clear variables for further use
-CRITERION_TYPES_FILE :=
-ANDROID_AUDIO_BASE_HEADER_FILE :=
-AUDIO_POLICY_CONFIGURATION_FILE :=
diff --git a/services/audiopolicy/engineconfigurable/tools/provision_strategies_structure.mk b/services/audiopolicy/engineconfigurable/tools/provision_strategies_structure.mk
deleted file mode 100644
index f2b1a19..0000000
--- a/services/audiopolicy/engineconfigurable/tools/provision_strategies_structure.mk
+++ /dev/null
@@ -1,21 +0,0 @@
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_TAGS := optional
-LOCAL_ADDITIONAL_DEPENDENCIES += \
- $(HOST_OUT_EXECUTABLES)/buildStrategiesStructureFile.py \
- $(STRATEGIES_STRUCTURE_FILE) $(AUDIO_POLICY_ENGINE_CONFIGURATION_FILE)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): MY_STRATEGIES_STRUCTURE_FILE := $(STRATEGIES_STRUCTURE_FILE)
-$(LOCAL_BUILT_MODULE): MY_AUDIO_POLICY_ENGINE_CONFIGURATION_FILE := $(AUDIO_POLICY_ENGINE_CONFIGURATION_FILE)
-$(LOCAL_BUILT_MODULE): MY_PROVISION_TOOL := $(HOST_OUT_EXECUTABLES)/buildStrategiesStructureFile.py
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-
- "$(MY_PROVISION_TOOL)" \
- --audiopolicyengineconfigurationfile "$(MY_AUDIO_POLICY_ENGINE_CONFIGURATION_FILE)" \
- --productstrategiesstructurefile "$(MY_STRATEGIES_STRUCTURE_FILE)" \
- --outputfile "$(@)"
-
-# Clear variables for further use
-STRATEGIES_STRUCTURE_FILE :=
-AUDIO_POLICY_ENGINE_CONFIGURATION_FILE :=
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index 4b57444..465a6f9 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -259,7 +259,7 @@
std::string criterionName = audio_is_output_device(devDesc->type()) ?
gOutputDeviceAddressCriterionName : gInputDeviceAddressCriterionName;
- ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().string(),
+ ALOGV("%s: device with address %s %s", __FUNCTION__, devDesc->address().c_str(),
state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE? "disconnected" : "connected");
ISelectionCriterionInterface *criterion =
getElement<ISelectionCriterionInterface>(criterionName, mPolicyCriteria);
@@ -271,7 +271,7 @@
auto criterionType = criterion->getCriterionType();
int deviceAddressId;
- if (not criterionType->getNumericalValue(devDesc->address().string(), deviceAddressId)) {
+ if (not criterionType->getNumericalValue(devDesc->address().c_str(), deviceAddressId)) {
ALOGW("%s: unknown device address reported (%s)", __FUNCTION__, devDesc->address().c_str());
return BAD_TYPE;
}
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
index 72c8de1..8443008 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -16,7 +16,7 @@
#pragma once
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
#include <HwModule.h>
#include <DeviceDescriptor.h>
#include <system/audio.h>
diff --git a/services/audiopolicy/enginedefault/config/example/Android.bp b/services/audiopolicy/enginedefault/config/example/Android.bp
new file mode 100644
index 0000000..0bfcaa1
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/Android.bp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Import this namespace in order to use AOSP Phone with Default Engine configuration example
+
+soong_namespace {
+}
+
+prebuilt_etc {
+ name: "audio_policy_engine_configuration.xml",
+ vendor: true,
+ src: "phone/audio_policy_engine_configuration.xml",
+ required: [
+ ":audio_policy_engine_stream_volumes.xml",
+ ":audio_policy_engine_default_stream_volumes.xml",
+ ":audio_policy_engine_product_strategies.xml",
+ ],
+}
+prebuilt_etc {
+ name: "audio_policy_engine_product_strategies.xml",
+ vendor: true,
+ src: "phone/audio_policy_engine_product_strategies.xml",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_stream_volumes.xml",
+ vendor: true,
+ src: "phone/audio_policy_engine_stream_volumes.xml",
+}
+prebuilt_etc {
+ name: "audio_policy_engine_default_stream_volumes.xml",
+ vendor: true,
+ src: "phone/audio_policy_engine_default_stream_volumes.xml",
+}
diff --git a/services/audiopolicy/enginedefault/config/example/Android.mk b/services/audiopolicy/enginedefault/config/example/Android.mk
deleted file mode 100644
index 0badac8..0000000
--- a/services/audiopolicy/enginedefault/config/example/Android.mk
+++ /dev/null
@@ -1,48 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-##################################################################
-# CONFIGURATION TOP FILE
-##################################################################
-
-ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_configuration.xml
-
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-
-LOCAL_REQUIRED_MODULES := \
- audio_policy_engine_product_strategies.xml \
- audio_policy_engine_stream_volumes.xml \
- audio_policy_engine_default_stream_volumes.xml
-
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_product_strategies.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
-LOCAL_MODULE_TAGS := optional
-LOCAL_MODULE_CLASS := ETC
-LOCAL_VENDOR_MODULE := true
-LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
-include $(BUILD_PREBUILT)
-
-endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index cbc46d5..2a5cd49 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -27,10 +27,11 @@
#include "Engine.h"
#include <android-base/macros.h>
#include <AudioPolicyManagerObserver.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
#include <IOProfile.h>
#include <AudioIODescriptorInterface.h>
#include <policy.h>
+#include <media/AudioContainers.h>
#include <utils/String8.h>
#include <utils/Log.h>
@@ -146,7 +147,7 @@
switch (strategy) {
case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
- devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
break;
case STRATEGY_SONIFICATION_RESPECTFUL:
@@ -161,7 +162,7 @@
toVolumeSource(AUDIO_STREAM_ACCESSIBILITY),
SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY);
// routing is same as media without the "remote" device
- availableOutputDevices.remove(availableOutputDevices.getDevicesFromTypeMask(
+ availableOutputDevices.remove(availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_REMOTE_SUBMIX));
devices = getDevicesForStrategyInt(STRATEGY_MEDIA,
availableOutputDevices,
@@ -171,7 +172,7 @@
if (!media_active_locally) {
devices.replaceDevicesByType(
AUDIO_DEVICE_OUT_SPEAKER,
- availableOutputDevices.getDevicesFromTypeMask(
+ availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
}
@@ -196,6 +197,7 @@
audio_devices_t txDevice = getDeviceForInputSource(
AUDIO_SOURCE_VOICE_COMMUNICATION)->type();
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
+ LOG_ALWAYS_FATAL_IF(primaryOutput == nullptr, "Primary output not found");
DeviceVector availPrimaryInputDevices =
availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
@@ -203,16 +205,16 @@
// audio_policy_configuration.xml, hearing aid is not there, but it's
// a primary device
// FIXME: this is not the right way of solving this problem
- DeviceVector availPrimaryOutputDevices = availableOutputDevices.getDevicesFromTypeMask(
+ DeviceVector availPrimaryOutputDevices = availableOutputDevices.getDevicesFromTypes(
primaryOutput->supportedDevices().types());
availPrimaryOutputDevices.add(
- availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID));
+ availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID));
if ((availableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
String8(""), AUDIO_FORMAT_DEFAULT) == nullptr) ||
((availPrimaryInputDevices.getDevice(
txDevice, String8(""), AUDIO_FORMAT_DEFAULT) != nullptr) &&
- (primaryOutput->getAudioPort()->getModuleVersionMajor() < 3))) {
+ (primaryOutput->getPolicyAudioPort()->getModuleVersionMajor() < 3))) {
availableOutputDevices = availPrimaryOutputDevices;
}
}
@@ -221,7 +223,7 @@
switch (getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION)) {
case AUDIO_POLICY_FORCE_BT_SCO:
if (!isInCall() || strategy != STRATEGY_DTMF) {
- devices = availableOutputDevices.getDevicesFromTypeMask(
+ devices = availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT);
if (!devices.isEmpty()) break;
}
@@ -232,7 +234,7 @@
FALLTHROUGH_INTENDED;
default: // FORCE_NONE
- devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
if (!devices.isEmpty()) break;
// when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
if (!isInCall() &&
@@ -254,7 +256,7 @@
AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
if (!devices.isEmpty()) break;
}
- devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_EARPIECE);
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_EARPIECE);
break;
case AUDIO_POLICY_FORCE_SPEAKER:
@@ -263,7 +265,7 @@
if (!isInCall() &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
outputs.isA2dpSupported()) {
- devices = availableOutputDevices.getDevicesFromTypeMask(
+ devices = availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER);
if (!devices.isEmpty()) break;
}
@@ -274,7 +276,7 @@
AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET});
if (!devices.isEmpty()) break;
}
- devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
break;
}
break;
@@ -298,12 +300,12 @@
if ((strategy == STRATEGY_SONIFICATION) ||
(getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
- devices = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
+ devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
}
// if SCO headset is connected and we are told to use it, play ringtone over
// speaker and BT SCO
- if (!availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_ALL_SCO).isEmpty()) {
+ if (!availableOutputDevices.getDevicesFromTypes(getAudioDeviceOutAllScoSet()).isEmpty()) {
DeviceVector devices2;
devices2 = availableOutputDevices.getFirstDevicesFromTypes({
AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
@@ -324,7 +326,7 @@
if (strategy == STRATEGY_SONIFICATION) {
devices.replaceDevicesByType(
AUDIO_DEVICE_OUT_SPEAKER,
- availableOutputDevices.getDevicesFromTypeMask(
+ availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
if (!devices2.isEmpty()) {
@@ -342,10 +344,10 @@
// compressed format as they would likely not be mixed and dropped.
for (size_t i = 0; i < outputs.size(); i++) {
sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
- if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat)) {
- availableOutputDevices.remove(desc->devices().getDevicesFromTypeMask(
- AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF
- | AUDIO_DEVICE_OUT_HDMI_ARC));
+ if (desc->isActive() && !audio_is_linear_pcm(desc->getFormat())) {
+ availableOutputDevices.remove(desc->devices().getDevicesFromTypes({
+ AUDIO_DEVICE_OUT_HDMI, AUDIO_DEVICE_OUT_SPDIF,
+ AUDIO_DEVICE_OUT_HDMI_ARC}));
}
}
if (outputs.isActive(toVolumeSource(AUDIO_STREAM_RING)) ||
@@ -382,7 +384,7 @@
// FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
- devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_HEARING_AID);
+ devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_HEARING_AID);
}
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
@@ -393,7 +395,7 @@
}
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) == AUDIO_POLICY_FORCE_SPEAKER)) {
- devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
+ devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
}
if (devices2.isEmpty()) {
devices2 = availableOutputDevices.getFirstDevicesFromTypes({
@@ -404,21 +406,21 @@
}
if ((devices2.isEmpty()) && (strategy != STRATEGY_SONIFICATION)) {
// no sonification on aux digital (e.g. HDMI)
- devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_AUX_DIGITAL);
+ devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_AUX_DIGITAL);
}
if ((devices2.isEmpty()) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK) == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
- devices2 = availableOutputDevices.getDevicesFromTypeMask(
+ devices2 = availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET);
}
if (devices2.isEmpty()) {
- devices2 = availableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER);
+ devices2 = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
}
DeviceVector devices3;
if (strategy == STRATEGY_MEDIA) {
// ARC, SPDIF and AUX_LINE can co-exist with others.
- devices3 = availableOutputDevices.getDevicesFromTypeMask(
- AUDIO_DEVICE_OUT_HDMI_ARC | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_AUX_LINE);
+ devices3 = availableOutputDevices.getDevicesFromTypes({
+ AUDIO_DEVICE_OUT_HDMI_ARC, AUDIO_DEVICE_OUT_SPDIF, AUDIO_DEVICE_OUT_AUX_LINE});
}
devices2.add(devices3);
@@ -430,7 +432,7 @@
if ((strategy == STRATEGY_MEDIA) &&
(getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO) ==
AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
- devices.remove(devices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER));
+ devices.remove(devices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
}
// for STRATEGY_SONIFICATION:
@@ -438,7 +440,7 @@
if (strategy == STRATEGY_SONIFICATION) {
devices.replaceDevicesByType(
AUDIO_DEVICE_OUT_SPEAKER,
- availableOutputDevices.getDevicesFromTypeMask(
+ availableOutputDevices.getDevicesFromType(
AUDIO_DEVICE_OUT_SPEAKER_SAFE));
}
} break;
@@ -471,8 +473,8 @@
const SwAudioOutputCollection &outputs = getApmObserver()->getOutputs();
DeviceVector availableDevices = availableInputDevices;
sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
- DeviceVector availablePrimaryDevices = availableInputDevices.getDevicesFromHwModule(
- primaryOutput->getModuleHandle());
+ DeviceVector availablePrimaryDevices = primaryOutput == nullptr ? DeviceVector()
+ : availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
sp<DeviceDescriptor> device;
// when a call is active, force device selection to match source VOICE_COMMUNICATION
@@ -515,6 +517,7 @@
if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
(availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX,
String8(""), AUDIO_FORMAT_DEFAULT)) == nullptr) {
+ LOG_ALWAYS_FATAL_IF(availablePrimaryDevices.isEmpty(), "Primary devices not found");
availableDevices = availablePrimaryDevices;
}
@@ -545,6 +548,9 @@
case AUDIO_SOURCE_UNPROCESSED:
case AUDIO_SOURCE_HOTWORD:
if (inputSource == AUDIO_SOURCE_HOTWORD) {
+ // We should not use primary output criteria for Hotword but rather limit
+ // to devices attached to the same HW module as the build in mic
+ LOG_ALWAYS_FATAL_IF(availablePrimaryDevices.isEmpty(), "Primary devices not found");
availableDevices = availablePrimaryDevices;
}
if (getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO) {
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 499fc8a..3223530 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -112,7 +112,7 @@
void AudioPolicyManager::broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device,
audio_policy_dev_state_t state)
{
- AudioParameter param(device->address());
+ AudioParameter param(String8(device->address().c_str()));
const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
param.addInt(key, device->type());
@@ -425,7 +425,7 @@
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
// Check if the device is currently connected
- DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device);
+ DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromType(device);
if (deviceList.empty()) {
// Nothing to do: device is not connected
return NO_ERROR;
@@ -439,8 +439,8 @@
// Case 1: A2DP active device switches from primary to primary
// module
// Case 2: A2DP device config changes on primary module.
- if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
- sp<HwModule> module = mHwModules.getModuleForDeviceTypes(device, encodedFormat);
+ if (audio_is_a2dp_out_device(device)) {
+ sp<HwModule> module = mHwModules.getModuleForDeviceType(device, encodedFormat);
audio_module_handle_t primaryHandle = mPrimaryOutput->getModuleHandle();
if (availablePrimaryOutputDevices().contains(devDesc) &&
(module != 0 && module->getHandle() == primaryHandle)) {
@@ -496,8 +496,8 @@
ALOGE("%s() unable to get primary module", __func__);
return NO_INIT;
}
- DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
- AUDIO_DEVICE_OUT_ALL_A2DP);
+ DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypes(
+ getAudioDeviceOutAllA2dpSet());
for (const auto& device : declaredDevices) {
formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end());
}
@@ -511,7 +511,8 @@
bool createRxPatch = false;
uint32_t muteWaitMs = 0;
- if(!hasPrimaryOutput() || mPrimaryOutput->devices().types() == AUDIO_DEVICE_OUT_STUB) {
+ if(!hasPrimaryOutput() ||
+ mPrimaryOutput->devices().onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_STUB)) {
return muteWaitMs;
}
ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device");
@@ -535,9 +536,9 @@
}
auto telephonyRxModule =
- mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
+ mHwModules.getModuleForDeviceType(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT);
auto telephonyTxModule =
- mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT);
+ mHwModules.getModuleForDeviceType(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT);
// retrieve Rx Source and Tx Sink device descriptors
sp<DeviceDescriptor> rxSourceDevice =
mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX,
@@ -648,20 +649,6 @@
return audioPatch;
}
-sp<DeviceDescriptor> AudioPolicyManager::findDevice(
- const DeviceVector& devices, audio_devices_t device) const {
- DeviceVector deviceList = devices.getDevicesFromTypeMask(device);
- ALOG_ASSERT(!deviceList.isEmpty(),
- "%s() selected device type %#x is not in devices list", __func__, device);
- return deviceList.itemAt(0);
-}
-
-audio_devices_t AudioPolicyManager::getModuleDeviceTypes(
- const DeviceVector& devices, const char *moduleId) const {
- sp<HwModule> mod = mHwModules.getModuleFromName(moduleId);
- return mod != 0 ? devices.getDeviceTypesFromHwModule(mod->getHandle()) : AUDIO_DEVICE_NONE;
-}
-
bool AudioPolicyManager::isDeviceOfModule(
const sp<DeviceDescriptor>& devDesc, const char *moduleId) const {
sp<HwModule> module = mHwModules.getModuleFromName(moduleId);
@@ -881,7 +868,7 @@
continue;
}
// reject profiles if connected device does not support codec
- if (!curProfile->deviceSupportsEncodedFormats(devices.types())) {
+ if (!curProfile->devicesSupportEncodedFormats(devices.types())) {
continue;
}
if (!directOnly) return curProfile;
@@ -1025,7 +1012,7 @@
// FIXME: provide a more generic approach which is not device specific and move this back
// to getOutputForDevice.
// TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side.
- if (outputDevices.types() == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
+ if (outputDevices.onlyContainsDevicesWithType(AUDIO_DEVICE_OUT_TELEPHONY_TX) &&
(*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) &&
audio_is_linear_pcm(config->format) &&
isInCall()) {
@@ -1106,9 +1093,10 @@
}
audio_config_base_t clientConfig = {.sample_rate = config->sample_rate,
+ .channel_mask = config->channel_mask,
.format = config->format,
- .channel_mask = config->channel_mask };
- *portId = AudioPort::getNextUniqueId();
+ };
+ *portId = PolicyAudioPort::getNextUniqueId();
sp<TrackClientDescriptor> clientDesc =
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
@@ -1205,9 +1193,9 @@
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
// reuse direct output if currently open by the same client
// and configured with same parameters
- if ((config->sample_rate == desc->mSamplingRate) &&
- (config->format == desc->mFormat) &&
- (channelMask == desc->mChannelMask) &&
+ if ((config->sample_rate == desc->getSamplingRate()) &&
+ (config->format == desc->getFormat()) &&
+ (channelMask == desc->getChannelMask()) &&
(session == desc->mDirectClientSession)) {
desc->mDirectOpenCount++;
ALOGI("%s reusing direct output %d for session %d", __func__,
@@ -1234,7 +1222,7 @@
for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) {
const struct audio_port_config *sink = &patch->mPatch.sinks[j];
if (sink->type == AUDIO_PORT_TYPE_DEVICE &&
- (sink->ext.device.type & devices.types()) != AUDIO_DEVICE_NONE &&
+ devices.containsDeviceWithType(sink->ext.device.type) &&
(address.isEmpty() || strncmp(sink->ext.device.address, address.string(),
AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
releaseAudioPatch(patch->mHandle, mUidCached);
@@ -1247,13 +1235,13 @@
// only accept an output with the requested parameters
if (status != NO_ERROR ||
- (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
- (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
- (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
+ (config->sample_rate != 0 && config->sample_rate != outputDesc->getSamplingRate()) ||
+ (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->getFormat()) ||
+ (channelMask != 0 && channelMask != outputDesc->getChannelMask())) {
ALOGV("%s failed opening direct output: output %d sample rate %d %d,"
"format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
- outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
- channelMask, outputDesc->mChannelMask);
+ outputDesc->getSamplingRate(), config->format, outputDesc->getFormat(),
+ channelMask, outputDesc->getChannelMask());
if (output != AUDIO_IO_HANDLE_NONE) {
outputDesc->close();
}
@@ -1359,19 +1347,19 @@
// Each IOProfile represents a MixPort from audio_policy_configuration.xml
for (const auto &inProfile : inputProfiles) {
if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) {
- msdProfiles.appendProfiles(inProfile->getAudioProfiles());
+ appendAudioProfiles(msdProfiles, inProfile->getAudioProfiles());
}
}
AudioProfileVector deviceProfiles;
for (const auto &outProfile : outputProfiles) {
if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) {
- deviceProfiles.appendProfiles(outProfile->getAudioProfiles());
+ appendAudioProfiles(deviceProfiles, outProfile->getAudioProfiles());
}
}
struct audio_config_base bestSinkConfig;
- status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles,
+ status_t result = findBestMatchingOutputConfig(msdProfiles, deviceProfiles,
compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/,
- &bestSinkConfig);
+ bestSinkConfig);
if (result != NO_ERROR) {
ALOGD("%s() no matching profiles found for device: %s, hwAvSync: %d",
__func__, outputDevice->toString().c_str(), hwAvSync);
@@ -1524,13 +1512,13 @@
// If haptic channel is specified, use the haptic output if present.
// When using haptic output, same audio format and sample rate are required.
const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask(
- outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+ outputDesc->getChannelMask() & AUDIO_CHANNEL_HAPTIC_ALL);
if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) {
continue;
}
if (outputHapticChannelCount >= hapticChannelCount
- && format == outputDesc->mFormat
- && samplingRate == outputDesc->mSamplingRate) {
+ && format == outputDesc->getFormat()
+ && samplingRate == outputDesc->getSamplingRate()) {
currentMatchCriteria[0] = outputHapticChannelCount;
}
@@ -1538,12 +1526,13 @@
currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags);
// channel mask and channel count match
- uint32_t outputChannelCount = audio_channel_count_from_out_mask(outputDesc->mChannelMask);
+ uint32_t outputChannelCount = audio_channel_count_from_out_mask(
+ outputDesc->getChannelMask());
if (channelMask != AUDIO_CHANNEL_NONE && channelCount > 2 &&
channelCount <= outputChannelCount) {
if ((audio_channel_mask_get_representation(channelMask) ==
- audio_channel_mask_get_representation(outputDesc->mChannelMask)) &&
- ((channelMask & outputDesc->mChannelMask) == channelMask)) {
+ audio_channel_mask_get_representation(outputDesc->getChannelMask())) &&
+ ((channelMask & outputDesc->getChannelMask()) == channelMask)) {
currentMatchCriteria[2] = outputChannelCount;
}
currentMatchCriteria[3] = outputChannelCount;
@@ -1551,8 +1540,8 @@
// sampling rate match
if (samplingRate > SAMPLE_RATE_HZ_DEFAULT &&
- samplingRate <= outputDesc->mSamplingRate) {
- currentMatchCriteria[4] = outputDesc->mSamplingRate;
+ samplingRate <= outputDesc->getSamplingRate()) {
+ currentMatchCriteria[4] = outputDesc->getSamplingRate();
}
// performance flags match
@@ -1561,8 +1550,8 @@
// format match
if (format != AUDIO_FORMAT_INVALID) {
currentMatchCriteria[6] =
- AudioPort::kFormatDistanceMax -
- AudioPort::formatDistance(format, outputDesc->mFormat);
+ PolicyAudioPort::kFormatDistanceMax -
+ PolicyAudioPort::formatDistance(format, outputDesc->getFormat());
}
// primary output match
@@ -1776,14 +1765,15 @@
}
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
- mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+ mEngine->getForceUse(
+ AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
setStrategyMute(streamToStrategy(AUDIO_STREAM_ALARM), true, outputDesc);
}
// Automatically enable the remote submix input when output is started on a re routing mix
// of type MIX_TYPE_RECORDERS
- if (audio_is_remote_submix_device(devices.types()) && policyMix != NULL &&
- policyMix->mMixType == MIX_TYPE_RECORDERS) {
+ if (isSingleDeviceType(devices.types(), &audio_is_remote_submix_device) &&
+ policyMix != NULL && policyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address,
@@ -1830,7 +1820,8 @@
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote();
- if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
+ if (isSingleDeviceType(
+ outputDesc->devices().types(), &audio_is_remote_submix_device) &&
policyMix != NULL &&
policyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
@@ -2086,7 +2077,7 @@
isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD &&
mSoundTriggerSessions.indexOfKey(session) >= 0;
- *portId = AudioPort::getNextUniqueId();
+ *portId = PolicyAudioPort::getNextUniqueId();
clientDesc = new RecordClientDescriptor(*portId, riid, uid, session, attributes, *config,
requestedDeviceId, attributes.source, flags,
@@ -2429,7 +2420,8 @@
const sp<AudioInputDescriptor> input = mInputs.valueAt(i);
if (input->clientsList().size() == 0
|| !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())
- || (input->getAudioPort()->getFlags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
+ || (input->getPolicyAudioPort()->getFlags()
+ & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0) {
inputsToClose.push_back(mInputs.keyAt(i));
} else {
bool close = false;
@@ -2489,10 +2481,12 @@
{
// if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
// stream by the engine.
+ DeviceTypeSet deviceTypes = {device};
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types();
+ deviceTypes = mEngine->getOutputDevicesForStream(
+ stream, true /*fromCache*/).types();
}
- return getVolumeIndex(getVolumeCurves(stream), *index, device);
+ return getVolumeIndex(getVolumeCurves(stream), *index, deviceTypes);
}
status_t AudioPolicyManager::setVolumeIndexForAttributes(const audio_attributes_t &attributes,
@@ -2517,19 +2511,20 @@
return status;
}
- audio_devices_t curSrcDevice;
+ DeviceTypeSet curSrcDevices;
auto curCurvAttrs = curves.getAttributes();
if (!curCurvAttrs.empty() && curCurvAttrs.front() != defaultAttr) {
auto attr = curCurvAttrs.front();
- curSrcDevice = mEngine->getOutputDevicesForAttributes(attr, nullptr, false).types();
+ curSrcDevices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false).types();
} else if (!curves.getStreamTypes().empty()) {
auto stream = curves.getStreamTypes().front();
- curSrcDevice = mEngine->getOutputDevicesForStream(stream, false).types();
+ curSrcDevices = mEngine->getOutputDevicesForStream(stream, false).types();
} else {
ALOGE("%s: Invalid src %d: no valid attributes nor stream",__func__, vs);
return BAD_VALUE;
}
- curSrcDevice = Volume::getDeviceForVolume(curSrcDevice);
+ audio_devices_t curSrcDevice = Volume::getDeviceForVolume(curSrcDevices);
+ resetDeviceTypes(curSrcDevices, curSrcDevice);
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
@@ -2541,11 +2536,10 @@
// no specific device volume value exists for currently selected device.
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
- audio_devices_t curDevice = desc->devices().types();
+ DeviceTypeSet curDevices = desc->devices().types();
- if (curDevice & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
- curDevice |= AUDIO_DEVICE_OUT_SPEAKER;
- curDevice &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+ if (curDevices.erase(AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+ curDevices.insert(AUDIO_DEVICE_OUT_SPEAKER);
}
// Inter / intra volume group priority management: Loop on strategies arranged by priority
@@ -2589,7 +2583,7 @@
if (!applyVolume) {
continue; // next output
}
- status_t volStatus = checkAndSetVolume(curves, vs, index, desc, curDevice,
+ status_t volStatus = checkAndSetVolume(curves, vs, index, desc, curDevices,
(vs == toVolumeSource(AUDIO_STREAM_SYSTEM)?
TOUCH_SOUND_FIXED_DELAY_MS : 0));
if (volStatus != NO_ERROR) {
@@ -2600,12 +2594,14 @@
if (!(desc->isActive(vs) || isInCall())) {
continue;
}
- if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) && ((curDevice & device) == 0)) {
+ if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME &&
+ curDevices.find(device) == curDevices.end()) {
continue;
}
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- curSrcDevice |= device;
- applyVolume = (Volume::getDeviceForVolume(curDevice) & curSrcDevice) != 0;
+ curSrcDevices.insert(device);
+ applyVolume = (curSrcDevices.find(
+ Volume::getDeviceForVolume(curDevices)) != curSrcDevices.end());
} else {
applyVolume = !curves.hasVolumeIndexForDevice(curSrcDevice);
}
@@ -2614,7 +2610,7 @@
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus = checkAndSetVolume(
- curves, vs, index, desc, curDevice,
+ curves, vs, index, desc, curDevices,
((vs == toVolumeSource(AUDIO_STREAM_SYSTEM))?
TOUCH_SOUND_FIXED_DELAY_MS : 0));
if (volStatus != NO_ERROR) {
@@ -2657,22 +2653,23 @@
{
// if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
// stream by the engine.
+ DeviceTypeSet deviceTypes = {device};
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- device = mEngine->getOutputDevicesForAttributes(attr, nullptr, true /*fromCache*/).types();
+ DeviceTypeSet deviceTypes = mEngine->getOutputDevicesForAttributes(
+ attr, nullptr, true /*fromCache*/).types();
}
- return getVolumeIndex(getVolumeCurves(attr), index, device);
+ return getVolumeIndex(getVolumeCurves(attr), index, deviceTypes);
}
status_t AudioPolicyManager::getVolumeIndex(const IVolumeCurves &curves,
int &index,
- audio_devices_t device) const
+ const DeviceTypeSet& deviceTypes) const
{
- if (!audio_is_output_device(device)) {
+ if (isSingleDeviceType(deviceTypes, audio_is_output_device)) {
return BAD_VALUE;
}
- device = Volume::getDeviceForVolume(device);
- index = curves.getVolumeIndex(device);
- ALOGV("%s: device %08x index %d", __FUNCTION__, device, index);
+ index = curves.getVolumeIndex(deviceTypes);
+ ALOGV("%s: device %s index %d", __FUNCTION__, dumpDeviceTypes(deviceTypes).c_str(), index);
return NO_ERROR;
}
@@ -2921,9 +2918,9 @@
// stereo and let audio flinger do the channel conversion if needed.
outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
- rSubmixModule->addOutputProfile(address, &outputConfig,
+ rSubmixModule->addOutputProfile(address.c_str(), &outputConfig,
AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
- rSubmixModule->addInputProfile(address, &inputConfig,
+ rSubmixModule->addInputProfile(address.c_str(), &inputConfig,
AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
if ((res = setDeviceConnectionStateInt(deviceTypeToMakeAvailable,
@@ -3019,8 +3016,8 @@
}
}
}
- rSubmixModule->removeOutputProfile(address);
- rSubmixModule->removeInputProfile(address);
+ rSubmixModule->removeOutputProfile(address.c_str());
+ rSubmixModule->removeInputProfile(address.c_str());
} else if ((mix.mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
if (mPolicyMixes.unregisterMix(mix) != NO_ERROR) {
@@ -3232,7 +3229,7 @@
ALOGV("%s() profile %sfound with name: %s, "
"sample rate: %u, format: 0x%x, channel_mask: 0x%x, output flags: 0x%x",
__FUNCTION__, profile != 0 ? "" : "NOT ",
- (profile != 0 ? profile->getTagName().string() : "null"),
+ (profile != 0 ? profile->getTagName().c_str() : "null"),
config.sample_rate, config.format, config.channel_mask, output_flags);
return (profile != 0);
}
@@ -3436,8 +3433,8 @@
}
// TODO: reconfigure output format and channels here
- ALOGV("createAudioPatch() setting device %08x on output %d",
- devices.types(), outputDesc->mIoHandle);
+ ALOGV("createAudioPatch() setting device %s on output %d",
+ dumpDeviceTypes(devices.types()).c_str(), outputDesc->mIoHandle);
setOutputDevices(outputDesc, devices, true, 0, handle);
index = mAudioPatches.indexOfKey(*handle);
if (index >= 0) {
@@ -3855,7 +3852,7 @@
return BAD_VALUE;
}
- *portId = AudioPort::getNextUniqueId();
+ *portId = PolicyAudioPort::getNextUniqueId();
struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
@@ -3896,7 +3893,7 @@
if (srcDevice->hasSameHwModuleAs(sinkDevice) &&
srcDevice->getModuleVersionMajor() >= 3 &&
sinkDevice->getModule()->supportsPatch(srcDevice, sinkDevice) &&
- srcDevice->getAudioPort()->mGains.size() > 0) {
+ srcDevice->getAudioPort()->getGains().size() > 0) {
ALOGV("%s Device to Device route supported by >=3.0 HAL", __FUNCTION__);
// TODO: may explicitly specify whether we should use HW or SW patch
// create patch between src device and output device
@@ -3917,12 +3914,14 @@
&selectedDeviceId, &isRequestedDeviceForExclusiveUse,
&secondaryOutputs);
if (output == AUDIO_IO_HANDLE_NONE) {
- ALOGV("%s no output for device %08x", __FUNCTION__, sinkDevices.types());
+ ALOGV("%s no output for device %s",
+ __FUNCTION__, dumpDeviceTypes(sinkDevices.types()).c_str());
return INVALID_OPERATION;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isDuplicated()) {
- ALOGV("%s output for device %08x is duplicated", __FUNCTION__, sinkDevices.types());
+ ALOGV("%s output for device %s is duplicated",
+ __FUNCTION__, dumpDeviceTypes(sinkDevices.types()).c_str());
return INVALID_OPERATION;
}
status_t status = outputDesc->start();
@@ -4031,7 +4030,7 @@
float AudioPolicyManager::getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device)
{
- return computeVolume(getVolumeCurves(stream), toVolumeSource(stream), index, device);
+ return computeVolume(getVolumeCurves(stream), toVolumeSource(stream), index, {device});
}
status_t AudioPolicyManager::getSurroundFormats(unsigned int *numSurroundFormats,
@@ -4131,12 +4130,12 @@
sp<SwAudioOutputDescriptor> outputDesc;
bool profileUpdated = false;
- DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromTypeMask(
- AUDIO_DEVICE_OUT_HDMI);
+ DeviceVector hdmiOutputDevices = mAvailableOutputDevices.getDevicesFromType(
+ AUDIO_DEVICE_OUT_HDMI);
for (size_t i = 0; i < hdmiOutputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
- String8 address = hdmiOutputDevices[i]->address();
- String8 name = hdmiOutputDevices[i]->getName();
+ String8 address = String8(hdmiOutputDevices[i]->address().c_str());
+ std::string name = hdmiOutputDevices[i]->getName();
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.c_str(),
@@ -4153,12 +4152,12 @@
profileUpdated |= (status == NO_ERROR);
}
// FIXME: Why doing this for input HDMI devices if we don't augment their reported formats?
- DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromTypeMask(
+ DeviceVector hdmiInputDevices = mAvailableInputDevices.getDevicesFromType(
AUDIO_DEVICE_IN_HDMI);
for (size_t i = 0; i < hdmiInputDevices.size(); i++) {
// Simulate reconnection to update enabled surround sound formats.
- String8 address = hdmiInputDevices[i]->address();
- String8 name = hdmiInputDevices[i]->getName();
+ String8 address = String8(hdmiInputDevices[i]->address().c_str());
+ std::string name = hdmiInputDevices[i]->getName();
status_t status = setDeviceConnectionStateInt(AUDIO_DEVICE_IN_HDMI,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.c_str(),
@@ -4183,11 +4182,11 @@
return profileUpdated ? NO_ERROR : INVALID_OPERATION;
}
-void AudioPolicyManager::setAppState(uid_t uid, app_state_t state)
+void AudioPolicyManager::setAppState(audio_port_handle_t portId, app_state_t state)
{
- ALOGV("%s(uid:%d, state:%d)", __func__, uid, state);
+ ALOGV("%s(portId:%d, state:%d)", __func__, portId, state);
for (size_t i = 0; i < mInputs.size(); i++) {
- mInputs.valueAt(i)->setAppState(uid, state);
+ mInputs.valueAt(i)->setAppState(portId, state);
}
}
@@ -4469,7 +4468,7 @@
// give a valid ID to an attached device once confirmed it is reachable
if (!device->isAttached()) {
device->attach(hwModule);
- device->importAudioPort(inProfile, true);
+ device->importAudioPortAndPickAudioProfile(inProfile, true);
}
}
inputDesc->close();
@@ -4500,11 +4499,11 @@
}
// If microphones address is empty, set it according to device type
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->address().isEmpty()) {
+ if (mAvailableInputDevices[i]->address().empty()) {
if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
- mAvailableInputDevices[i]->setAddress(String8(AUDIO_BOTTOM_MICROPHONE_ADDRESS));
+ mAvailableInputDevices[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
} else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
- mAvailableInputDevices[i]->setAddress(String8(AUDIO_BACK_MICROPHONE_ADDRESS));
+ mAvailableInputDevices[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS);
}
}
}
@@ -4549,7 +4548,7 @@
const sp<SwAudioOutputDescriptor>& outputDesc)
{
mOutputs.add(output, outputDesc);
- applyStreamVolumes(outputDesc, AUDIO_DEVICE_NONE, 0 /* delayMs */, true /* force */);
+ applyStreamVolumes(outputDesc, DeviceTypeSet(), 0 /* delayMs */, true /* force */);
updateMono(output); // update mono status when adding to output list
selectOutputForMusicEffects();
nextAudioPortGeneration();
@@ -4573,7 +4572,7 @@
SortedVector<audio_io_handle_t>& outputs)
{
audio_devices_t deviceType = device->type();
- const String8 &address = device->address();
+ const String8 &address = String8(device->address().c_str());
sp<SwAudioOutputDescriptor> desc;
if (audio_device_is_digital(deviceType)) {
@@ -4586,7 +4585,7 @@
for (size_t i = 0; i < mOutputs.size(); i++) {
desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && desc->supportsDevice(device)
- && desc->deviceSupportsEncodedFormats(deviceType)) {
+ && desc->devicesSupportEncodedFormats({deviceType})) {
ALOGV("checkOutputsForDevice(): adding opened output %d on device %s",
mOutputs.keyAt(i), device->toString().c_str());
outputs.add(mOutputs.keyAt(i));
@@ -4625,7 +4624,7 @@
// matching profile: save the sample rates, format and channel masks supported
// by the profile in our device descriptor
if (audio_device_is_digital(deviceType)) {
- device->importAudioPort(profile);
+ device->importAudioPortAndPickAudioProfile(profile);
}
break;
}
@@ -4641,7 +4640,7 @@
}
ALOGV("opening output for device %08x with params %s profile %p name %s",
- deviceType, address.string(), profile.get(), profile->getName().string());
+ deviceType, address.string(), profile.get(), profile->getName().c_str());
desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status = desc->open(nullptr, DeviceVector(device),
@@ -4727,7 +4726,7 @@
outputs.add(output);
// Load digital format info only for digital devices
if (audio_device_is_digital(deviceType)) {
- device->importAudioPort(profile);
+ device->importAudioPortAndPickAudioProfile(profile);
}
if (device_distinguishes_on_address(deviceType)) {
@@ -4751,7 +4750,7 @@
if (!desc->isDuplicated()) {
// exact match on device
if (device_distinguishes_on_address(deviceType) && desc->supportsDevice(device)
- && desc->deviceSupportsEncodedFormats(deviceType)) {
+ && desc->devicesSupportEncodedFormats({deviceType})) {
outputs.add(mOutputs.keyAt(i));
} else if (!mAvailableOutputDevices.containsAtLeastOne(desc->supportedDevices())) {
ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
@@ -4821,7 +4820,7 @@
desc = mInputs.valueAt(input_index);
if (desc->mProfile == profile) {
if (audio_device_is_digital(device->type())) {
- device->importAudioPort(profile);
+ device->importAudioPortAndPickAudioProfile(profile);
}
break;
}
@@ -4845,7 +4844,7 @@
&input);
if (status == NO_ERROR) {
- const String8& address = device->address();
+ const String8& address = String8(device->address().c_str());
if (!address.isEmpty()) {
char *param = audio_device_address_to_parameter(device->type(), address);
mpClientInterface->setParameters(input, String8(param));
@@ -4870,7 +4869,7 @@
profile_index--;
} else {
if (audio_device_is_digital(device->type())) {
- device->importAudioPort(profile);
+ device->importAudioPortAndPickAudioProfile(profile);
}
ALOGV("checkInputsForDevice(): adding input %d", input);
}
@@ -5022,7 +5021,7 @@
i, openOutputs.valueAt(i)->isDuplicated(),
openOutputs.valueAt(i)->supportedDevices().toString().c_str());
if (openOutputs.valueAt(i)->supportsAllDevices(devices)
- && openOutputs.valueAt(i)->deviceSupportsEncodedFormats(devices.types())) {
+ && openOutputs.valueAt(i)->devicesSupportEncodedFormats(devices.types())) {
ALOGVV("%s() found output %d", __func__, openOutputs.keyAt(i));
outputs.add(openOutputs.keyAt(i));
}
@@ -5063,10 +5062,12 @@
// also take into account external policy-related changes: add all outputs which are
// associated with policies in the "before" and "after" output vectors
ALOGVV("%s(): policy related outputs", __func__);
+ bool hasDynamicPolicy = false;
for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
if (desc != 0 && desc->mPolicyMix != NULL) {
srcOutputs.add(desc->mIoHandle);
+ hasDynamicPolicy = true;
ALOGVV(" previous outputs: adding %d", desc->mIoHandle);
}
}
@@ -5074,6 +5075,7 @@
const sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != 0 && desc->mPolicyMix != NULL) {
dstOutputs.add(desc->mIoHandle);
+ hasDynamicPolicy = true;
ALOGVV(" new outputs: adding %d", desc->mIoHandle);
}
}
@@ -5082,12 +5084,45 @@
// get maximum latency of all source outputs to determine the minimum mute time guaranteeing
// audio from invalidated tracks will be rendered when unmuting
uint32_t maxLatency = 0;
+ bool invalidate = hasDynamicPolicy;
for (audio_io_handle_t srcOut : srcOutputs) {
sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
- if (desc != 0 && maxLatency < desc->latency()) {
+ if (desc == nullptr) continue;
+
+ if (desc->isStrategyActive(psId) && maxLatency < desc->latency()) {
maxLatency = desc->latency();
}
+
+ if (invalidate) continue;
+
+ for (auto client : desc->clientsList(false /*activeOnly*/)) {
+ if (desc->isDuplicated() || !desc->mProfile->isDirectOutput()) {
+ // a client on a non direct outputs has necessarily a linear PCM format
+ // so we can call selectOutput() safely
+ const audio_io_handle_t newOutput = selectOutput(dstOutputs,
+ client->flags(),
+ client->config().format,
+ client->config().channel_mask,
+ client->config().sample_rate);
+ if (newOutput != srcOut) {
+ invalidate = true;
+ break;
+ }
+ } else {
+ sp<IOProfile> profile = getProfileForOutput(newDevices,
+ client->config().sample_rate,
+ client->config().format,
+ client->config().channel_mask,
+ client->flags(),
+ true /* directOnly */);
+ if (profile != desc->mProfile) {
+ invalidate = true;
+ break;
+ }
+ }
+ }
}
+
ALOGV_IF(!(srcOutputs.isEmpty() || dstOutputs.isEmpty()),
"%s: strategy %d, moving from output %s to output %s", __func__, psId,
std::to_string(srcOutputs[0]).c_str(),
@@ -5095,7 +5130,9 @@
// mute strategy while moving tracks from one output to another
for (audio_io_handle_t srcOut : srcOutputs) {
sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueFor(srcOut);
- if (desc != 0 && desc->isStrategyActive(psId)) {
+ if (desc == nullptr) continue;
+
+ if (desc->isStrategyActive(psId)) {
setStrategyMute(psId, true, desc);
setStrategyMute(psId, false, desc, maxLatency * LATENCY_MUTE_FACTOR,
newDevices.types());
@@ -5111,8 +5148,10 @@
selectOutputForMusicEffects();
}
// Move tracks associated to this stream (and linked) from previous output to new output
- for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
- mpClientInterface->invalidateStream(stream);
+ if (invalidate) {
+ for (auto stream : mEngine->getStreamTypesForProductStrategy(psId)) {
+ mpClientInterface->invalidateStream(stream);
+ }
}
}
}
@@ -5157,9 +5196,8 @@
}
bool isScoConnected =
- ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
- ~AUDIO_DEVICE_BIT_IN) != 0) ||
- ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
+ (mAvailableInputDevices.types().count(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) != 0 ||
+ !Intersection(mAvailableOutputDevices.types(), getAudioDeviceOutAllScoSet()).empty());
// if suspended, restore A2DP output if:
// ((SCO device is NOT connected) ||
@@ -5321,12 +5359,13 @@
}
/*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
and doesn't really need to.*/
- DeviceVector speakerSafeDevices = devices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER_SAFE);
+ DeviceVector speakerSafeDevices = devices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER_SAFE);
if (!speakerSafeDevices.isEmpty()) {
- devices.merge(mAvailableOutputDevices.getDevicesFromTypeMask(AUDIO_DEVICE_OUT_SPEAKER));
+ devices.merge(mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER));
devices.remove(speakerSafeDevices);
}
- return devices.types();
+ // FIXME: use DeviceTypeSet when Java layer is ready for it.
+ return deviceTypesToBitMask(devices.types());
}
void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
@@ -5386,7 +5425,7 @@
auto ttsVolumeSource = toVolumeSource(AUDIO_STREAM_TTS);
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
- setVolumeSourceMute(ttsVolumeSource, mute/*on*/, desc, 0 /*delay*/, AUDIO_DEVICE_NONE);
+ setVolumeSourceMute(ttsVolumeSource, mute/*on*/, desc, 0 /*delay*/, DeviceTypeSet());
const uint32_t latency = desc->latency() * 2;
if (latency > maxLatency) {
maxLatency = latency;
@@ -5702,9 +5741,9 @@
float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
VolumeSource volumeSource,
int index,
- audio_devices_t device)
+ const DeviceTypeSet& deviceTypes)
{
- float volumeDb = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
+ float volumeDb = curves.volIndexToDb(Volume::getDeviceCategory(deviceTypes), index);
// handle the case of accessibility active while a ringtone is playing: if the ringtone is much
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
@@ -5720,7 +5759,7 @@
&& (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
mOutputs.isActive(ringVolumeSrc, 0)) {
auto &ringCurves = getVolumeCurves(AUDIO_STREAM_RING);
- const float ringVolumeDb = computeVolume(ringCurves, ringVolumeSrc, index, device);
+ const float ringVolumeDb = computeVolume(ringCurves, ringVolumeSrc, index, deviceTypes);
return ringVolumeDb - 4 > volumeDb ? ringVolumeDb - 4 : volumeDb;
}
@@ -5735,9 +5774,9 @@
volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
volumeSource == a11yVolumeSrc)) {
auto &voiceCurves = getVolumeCurves(callVolumeSrc);
- int voiceVolumeIndex = voiceCurves.getVolumeIndex(device);
+ int voiceVolumeIndex = voiceCurves.getVolumeIndex(deviceTypes);
const float maxVoiceVolDb =
- computeVolume(voiceCurves, callVolumeSrc, voiceVolumeIndex, device)
+ computeVolume(voiceCurves, callVolumeSrc, voiceVolumeIndex, deviceTypes)
+ IN_CALL_EARPIECE_HEADROOM_DB;
// FIXME: Workaround for call screening applications until a proper audio mode is defined
// to support this scenario : Exempt the RING stream from the audio cap if the audio was
@@ -5763,9 +5802,10 @@
// speaker is part of the select devices
// - if music is playing, always limit the volume to current music volume,
// with a minimum threshold at -36dB so that notification is always perceived.
- if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP | AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
- AUDIO_DEVICE_OUT_USB_HEADSET | AUDIO_DEVICE_OUT_HEARING_AID)) &&
+ if (!Intersection(deviceTypes,
+ {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,
+ AUDIO_DEVICE_OUT_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADPHONE,
+ AUDIO_DEVICE_OUT_USB_HEADSET, AUDIO_DEVICE_OUT_HEARING_AID}).empty() &&
((volumeSource == alarmVolumeSrc ||
volumeSource == ringVolumeSrc) ||
(volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION)) ||
@@ -5780,31 +5820,33 @@
if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
mLimitRingtoneVolume) {
volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
- audio_devices_t musicDevice =
+ DeviceTypeSet musicDevice =
mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
nullptr, true /*fromCache*/).types();
auto &musicCurves = getVolumeCurves(AUDIO_STREAM_MUSIC);
- float musicVolDb = computeVolume(musicCurves, musicVolumeSrc,
- musicCurves.getVolumeIndex(musicDevice), musicDevice);
+ float musicVolDb = computeVolume(musicCurves,
+ musicVolumeSrc,
+ musicCurves.getVolumeIndex(musicDevice),
+ musicDevice);
float minVolDb = (musicVolDb > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
musicVolDb : SONIFICATION_HEADSET_VOLUME_MIN_DB;
if (volumeDb > minVolDb) {
volumeDb = minVolDb;
ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDb, musicVolDb);
}
- if (device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
+ if (!Intersection(deviceTypes, {AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES}).empty()) {
// on A2DP, also ensure notification volume is not too low compared to media when
// intended to be played
if ((volumeDb > -96.0f) &&
(musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDb)) {
- ALOGV("%s increasing volume for volume source=%d device=0x%X from %f to %f",
- __func__, volumeSource, device, volumeDb,
+ ALOGV("%s increasing volume for volume source=%d device=%s from %f to %f",
+ __func__, volumeSource, dumpDeviceTypes(deviceTypes).c_str(), volumeDb,
musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
volumeDb = musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
}
}
- } else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
+ } else if ((Volume::getDeviceForVolume(deviceTypes) != AUDIO_DEVICE_OUT_SPEAKER) ||
(!(volumeSource == alarmVolumeSrc || volumeSource == ringVolumeSrc))) {
volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
}
@@ -5843,7 +5885,7 @@
VolumeSource volumeSource,
int index,
const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
+ DeviceTypeSet deviceTypes,
int delayMs,
bool force)
{
@@ -5869,17 +5911,20 @@
volumeSource, forceUseForComm);
return INVALID_OPERATION;
}
- if (device == AUDIO_DEVICE_NONE) {
- device = outputDesc->devices().types();
+ if (deviceTypes.empty()) {
+ deviceTypes = outputDesc->devices().types();
}
- float volumeDb = computeVolume(curves, volumeSource, index, device);
- if (outputDesc->isFixedVolume(device) ||
+ float volumeDb = computeVolume(curves, volumeSource, index, deviceTypes);
+ if (outputDesc->isFixedVolume(deviceTypes) ||
// Force VoIP volume to max for bluetooth SCO
- ((isVoiceVolSrc || isBtScoVolSrc) && (device & AUDIO_DEVICE_OUT_ALL_SCO) != 0)) {
+
+ ((isVoiceVolSrc || isBtScoVolSrc) &&
+ isSingleDeviceType(deviceTypes, audio_is_bluetooth_out_sco_device))) {
volumeDb = 0.0f;
}
- outputDesc->setVolume(volumeDb, volumeSource, curves.getStreamTypes(), device, delayMs, force);
+ outputDesc->setVolume(
+ volumeDb, volumeSource, curves.getStreamTypes(), deviceTypes, delayMs, force);
if (isVoiceVolSrc || isBtScoVolSrc) {
float voiceVolume;
@@ -5898,15 +5943,16 @@
}
void AudioPolicyManager::applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
- int delayMs,
- bool force)
+ const DeviceTypeSet& deviceTypes,
+ int delayMs,
+ bool force)
{
ALOGVV("applyStreamVolumes() for device %08x", device);
for (const auto &volumeGroup : mEngine->getVolumeGroups()) {
auto &curves = getVolumeCurves(toVolumeSource(volumeGroup));
checkAndSetVolume(curves, toVolumeSource(volumeGroup),
- curves.getVolumeIndex(device), outputDesc, device, delayMs, force);
+ curves.getVolumeIndex(deviceTypes),
+ outputDesc, deviceTypes, delayMs, force);
}
}
@@ -5914,7 +5960,7 @@
bool on,
const sp<AudioOutputDescriptor>& outputDesc,
int delayMs,
- audio_devices_t device)
+ DeviceTypeSet deviceTypes)
{
std::vector<VolumeSource> sourcesToMute;
for (auto attributes: mEngine->getAllAttributesForProductStrategy(strategy)) {
@@ -5926,7 +5972,7 @@
}
}
for (auto source : sourcesToMute) {
- setVolumeSourceMute(source, on, outputDesc, delayMs, device);
+ setVolumeSourceMute(source, on, outputDesc, delayMs, deviceTypes);
}
}
@@ -5935,10 +5981,10 @@
bool on,
const sp<AudioOutputDescriptor>& outputDesc,
int delayMs,
- audio_devices_t device)
+ DeviceTypeSet deviceTypes)
{
- if (device == AUDIO_DEVICE_NONE) {
- device = outputDesc->devices().types();
+ if (deviceTypes.empty()) {
+ deviceTypes = outputDesc->devices().types();
}
auto &curves = getVolumeCurves(volumeSource);
if (on) {
@@ -5947,7 +5993,7 @@
(volumeSource != toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
AUDIO_POLICY_FORCE_NONE))) {
- checkAndSetVolume(curves, volumeSource, 0, outputDesc, device, delayMs);
+ checkAndSetVolume(curves, volumeSource, 0, outputDesc, deviceTypes, delayMs);
}
}
// increment mMuteCount after calling checkAndSetVolume() so that volume change is not
@@ -5960,9 +6006,9 @@
}
if (outputDesc->decMuteCount(volumeSource) == 0) {
checkAndSetVolume(curves, volumeSource,
- curves.getVolumeIndex(device),
+ curves.getVolumeIndex(deviceTypes),
outputDesc,
- device,
+ deviceTypes,
delayMs);
}
}
@@ -6166,7 +6212,7 @@
|| isDeviceOfModule(devDesc, AUDIO_HARDWARE_MODULE_ID_MSD)) {
modifySurroundFormats(devDesc, &formats);
}
- profiles.setFormats(formats);
+ addProfilesForFormats(profiles, formats);
}
for (audio_format_t format : profiles.getSupportedFormats()) {
@@ -6202,7 +6248,8 @@
}
}
}
- profiles.addProfileFromHal(new AudioProfile(format, channelMasks, samplingRates));
+ addDynamicAudioProfileAndSort(
+ profiles, new AudioProfile(format, channelMasks, samplingRates));
}
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 02c6171..707e4b0 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -31,14 +31,14 @@
#include <utils/SortedVector.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
+#include <media/AudioProfile.h>
#include <media/PatchBuilder.h>
#include "AudioPolicyInterface.h"
#include <AudioPolicyManagerObserver.h>
#include <AudioPolicyConfig.h>
-#include <AudioPort.h>
+#include <PolicyAudioPort.h>
#include <AudioPatch.h>
-#include <AudioProfile.h>
#include <DeviceDescriptor.h>
#include <IOProfile.h>
#include <HwModule.h>
@@ -176,7 +176,7 @@
IVolumeCurves &volumeCurves);
status_t getVolumeIndex(const IVolumeCurves &curves, int &index,
- audio_devices_t device) const;
+ const DeviceTypeSet& deviceTypes) const;
// return the strategy corresponding to a given stream type
virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
@@ -278,7 +278,7 @@
virtual status_t getHwOffloadEncodingFormatsSupportedForA2DP(
std::vector<audio_format_t> *formats);
- virtual void setAppState(uid_t uid, app_state_t state);
+ virtual void setAppState(audio_port_handle_t portId, app_state_t state);
virtual bool isHapticPlaybackSupported();
@@ -346,7 +346,7 @@
}
virtual const DeviceVector getAvailableOutputDevices() const
{
- return mAvailableOutputDevices;
+ return mAvailableOutputDevices.filterForEngine();
}
virtual const DeviceVector getAvailableInputDevices() const
{
@@ -422,7 +422,7 @@
virtual float computeVolume(IVolumeCurves &curves,
VolumeSource volumeSource,
int index,
- audio_devices_t device);
+ const DeviceTypeSet& deviceTypes);
// rescale volume index from srcStream within range of dstStream
int rescaleVolumeIndex(int srcIndex,
@@ -432,12 +432,13 @@
virtual status_t checkAndSetVolume(IVolumeCurves &curves,
VolumeSource volumeSource, int index,
const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
+ DeviceTypeSet deviceTypes,
int delayMs = 0, bool force = false);
// apply all stream volumes to the specified output and device
void applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device, int delayMs = 0, bool force = false);
+ const DeviceTypeSet& deviceTypes,
+ int delayMs = 0, bool force = false);
/**
* @brief setStrategyMute Mute or unmute all active clients on the considered output
@@ -452,7 +453,7 @@
bool on,
const sp<AudioOutputDescriptor>& outputDesc,
int delayMs = 0,
- audio_devices_t device = AUDIO_DEVICE_NONE);
+ DeviceTypeSet deviceTypes = DeviceTypeSet());
/**
* @brief setVolumeSourceMute Mute or unmute the volume source on the specified output
@@ -467,7 +468,7 @@
bool on,
const sp<AudioOutputDescriptor>& outputDesc,
int delayMs = 0,
- audio_devices_t device = AUDIO_DEVICE_NONE);
+ DeviceTypeSet deviceTypes = DeviceTypeSet());
audio_mode_t getPhoneState();
@@ -646,16 +647,13 @@
}
String8 getFirstDeviceAddress(const DeviceVector &devices) const
{
- return (devices.size() > 0) ? devices.itemAt(0)->address() : String8("");
+ return (devices.size() > 0) ?
+ String8(devices.itemAt(0)->address().c_str()) : String8("");
}
uint32_t updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs = 0);
sp<AudioPatch> createTelephonyPatch(bool isRx, const sp<DeviceDescriptor> &device,
uint32_t delayMs);
- sp<DeviceDescriptor> findDevice(
- const DeviceVector& devices, audio_devices_t device) const;
- audio_devices_t getModuleDeviceTypes(
- const DeviceVector& devices, const char *moduleId) const;
bool isDeviceOfModule(const sp<DeviceDescriptor>& devDesc, const char *moduleId) const;
status_t startSource(const sp<SwAudioOutputDescriptor>& outputDesc,
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index d51cc6e..6de0c80 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -39,8 +39,7 @@
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
{
@@ -49,7 +48,7 @@
ALOGW("%s: could not get AudioFlinger", __func__);
return PERMISSION_DENIED;
}
- return af->openOutput(module, output, config, devices, address, latencyMs, flags);
+ return af->openOutput(module, output, config, device, latencyMs, flags);
}
audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput(
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index fa8da89..875f51d 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -222,7 +222,7 @@
if (result == NO_ERROR) {
sp <AudioPlaybackClient> client =
- new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+ new AudioPlaybackClient(*attr, *output, uid, pid, session, *portId, *selectedDeviceId, *stream);
mAudioPlaybackClients.add(*portId, client);
}
return result;
@@ -377,8 +377,10 @@
pid = callingPid;
}
- // check calling permissions
- if (!recordingAllowed(opPackageName, pid, uid)) {
+ // check calling permissions.
+ // Capturing from FM_TUNER source is controlled by captureAudioOutputAllowed() only as this
+ // does not affect users privacy as does capturing from an actual microphone.
+ if (!(recordingAllowed(opPackageName, pid, uid) || attr->source == AUDIO_SOURCE_FM_TUNER)) {
ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
__func__, uid, pid);
return PERMISSION_DENIED;
@@ -388,7 +390,8 @@
if ((attr->source == AUDIO_SOURCE_VOICE_UPLINK ||
attr->source == AUDIO_SOURCE_VOICE_DOWNLINK ||
attr->source == AUDIO_SOURCE_VOICE_CALL ||
- attr->source == AUDIO_SOURCE_ECHO_REFERENCE) &&
+ attr->source == AUDIO_SOURCE_ECHO_REFERENCE||
+ attr->source == AUDIO_SOURCE_FM_TUNER) &&
!canCaptureOutput) {
return PERMISSION_DENIED;
}
@@ -451,7 +454,7 @@
return status;
}
- sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session,
+ sp<AudioRecordClient> client = new AudioRecordClient(*attr, *input, uid, pid, session, *portId,
*selectedDeviceId, opPackageName,
canCaptureOutput, canCaptureHotword);
mAudioRecordClients.add(*portId, client);
@@ -494,7 +497,8 @@
}
// check calling permissions
- if (!startRecording(client->opPackageName, client->pid, client->uid)) {
+ if (!(startRecording(client->opPackageName, client->pid, client->uid)
+ || client->attributes.source == AUDIO_SOURCE_FM_TUNER)) {
ALOGE("%s permission denied: recording not allowed for uid %d pid %d",
__func__, client->uid, client->pid);
return PERMISSION_DENIED;
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index a6afaeb..c8d7d0c 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -406,8 +406,7 @@
{
// Go over all active clients and allow capture (does not force silence) in the
// following cases:
-// Another client in the same UID has already been allowed to capture
-// OR The client is the assistant
+// The client is the assistant
// AND an accessibility service is on TOP or a RTT call is active
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR uses VOICE_RECOGNITION AND is on TOP
@@ -423,12 +422,18 @@
// AND is on TOP
// AND the source is VOICE_RECOGNITION or HOTWORD
// OR the client source is virtual (remote submix, call audio TX or RX...)
+// OR the client source is HOTWORD
+// AND is on TOP
+// OR all active clients are using HOTWORD source
+// AND no call is active
+// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
// OR Any client
// AND The assistant is not on TOP
// AND is on TOP or latest started
// AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+
sp<AudioRecordClient> topActive;
sp<AudioRecordClient> latestActive;
sp<AudioRecordClient> latestSensitiveActive;
@@ -443,6 +448,7 @@
bool rttCallActive =
(mPhoneState == AUDIO_MODE_IN_CALL || mPhoneState == AUDIO_MODE_IN_COMMUNICATION)
&& mUidPolicy->isRttEnabled();
+ bool onlyHotwordActive = true;
// if Sensor Privacy is enabled then all recordings should be silenced.
if (mSensorPrivacyPolicy->isSensorPrivacyEnabled()) {
@@ -474,12 +480,11 @@
isAssistantOnTop = true;
}
}
- // Assistant capturing for HOTWORD or Accessibility services not considered
+ // Client capturing for HOTWORD or Accessibility services not considered
// for latest active to avoid masking regular clients started before
if (current->startTimeNs > latestStartNs
- && !((current->attributes.source == AUDIO_SOURCE_HOTWORD
- || isA11yOnTop || rttCallActive)
- && isAssistant)
+ && !(current->attributes.source == AUDIO_SOURCE_HOTWORD
+ || ((isA11yOnTop || rttCallActive) && isAssistant))
&& !isAccessibility) {
latestActive = current;
latestStartNs = current->startTimeNs;
@@ -491,6 +496,9 @@
}
isSensitiveActive = true;
}
+ if (current->attributes.source != AUDIO_SOURCE_HOTWORD) {
+ onlyHotwordActive = false;
+ }
}
// if no active client with UI on Top, consider latest active as top
@@ -498,21 +506,12 @@
topActive = latestActive;
}
- std::vector<uid_t> enabledUids;
-
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!current->active) {
continue;
}
- // keep capture allowed if another client with the same UID has already
- // been allowed to capture
- if (std::find(enabledUids.begin(), enabledUids.end(), current->uid)
- != enabledUids.end()) {
- continue;
- }
-
audio_source_t source = current->attributes.source;
bool isTopOrLatestActive = topActive == nullptr ? false : current->uid == topActive->uid;
bool isLatestSensitive = latestSensitiveActive == nullptr ?
@@ -552,29 +551,32 @@
}
} else if (mUidPolicy->isA11yUid(current->uid)) {
// For accessibility service allow capture if:
- // Is on TOP
- // AND the source is VOICE_RECOGNITION or HOTWORD
- // Or
- // The assistant is not on TOP
- // AND there is no active privacy sensitive capture or call
+ // The assistant is not on TOP
+ // AND there is no active privacy sensitive capture or call
// OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+ // OR
+ // Is on TOP AND the source is VOICE_RECOGNITION or HOTWORD
+ if (!isAssistantOnTop
+ && (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
+ allowCapture = true;
+ }
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD) {
allowCapture = true;
}
- } else {
- if (!isAssistantOnTop
- && (!(isSensitiveActive || isInCall) || current->canCaptureOutput)) {
- allowCapture = true;
- }
+ }
+ } else if (source == AUDIO_SOURCE_HOTWORD) {
+ // For HOTWORD source allow capture when not on TOP if:
+ // All active clients are using HOTWORD source
+ // AND no call is active
+ // OR client has CAPTURE_AUDIO_OUTPUT privileged permission
+ if (onlyHotwordActive && !(isInCall && !current->canCaptureOutput)) {
+ allowCapture = true;
}
}
- setAppState_l(current->uid,
+ setAppState_l(current->portId,
allowCapture ? apmStatFromAmState(mUidPolicy->getUidState(current->uid)) :
APP_STATE_IDLE);
- if (allowCapture) {
- enabledUids.push_back(current->uid);
- }
}
}
@@ -582,7 +584,7 @@
for (size_t i = 0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!isVirtualSource(current->attributes.source)) {
- setAppState_l(current->uid, APP_STATE_IDLE);
+ setAppState_l(current->portId, APP_STATE_IDLE);
}
}
}
@@ -628,17 +630,17 @@
return false;
}
-void AudioPolicyService::setAppState_l(uid_t uid, app_state_t state)
+void AudioPolicyService::setAppState_l(audio_port_handle_t portId, app_state_t state)
{
AutoCallerClear acc;
if (mAudioPolicyManager) {
- mAudioPolicyManager->setAppState(uid, state);
+ mAudioPolicyManager->setAppState(portId, state);
}
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af) {
bool silenced = state == APP_STATE_IDLE;
- af->setRecordSilenced(uid, silenced);
+ af->setRecordSilenced(portId, silenced);
}
}
@@ -725,11 +727,11 @@
if (in == BAD_TYPE || out == BAD_TYPE || err == BAD_TYPE) {
return BAD_VALUE;
}
- if (args.size() == 3 && args[0] == String16("set-uid-state")) {
+ if (args.size() >= 3 && args[0] == String16("set-uid-state")) {
return handleSetUidState(args, err);
- } else if (args.size() == 2 && args[0] == String16("reset-uid-state")) {
+ } else if (args.size() >= 2 && args[0] == String16("reset-uid-state")) {
return handleResetUidState(args, err);
- } else if (args.size() == 2 && args[0] == String16("get-uid-state")) {
+ } else if (args.size() >= 2 && args[0] == String16("get-uid-state")) {
return handleGetUidState(args, out, err);
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
@@ -739,14 +741,32 @@
return BAD_VALUE;
}
-status_t AudioPolicyService::handleSetUidState(Vector<String16>& args, int err) {
- PermissionController pc;
- int uid = pc.getPackageUid(args[1], 0);
- if (uid <= 0) {
- ALOGE("Unknown package: '%s'", String8(args[1]).string());
- dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+static status_t getUidForPackage(String16 packageName, int userId, /*inout*/uid_t& uid, int err) {
+ if (userId < 0) {
+ ALOGE("Invalid user: %d", userId);
+ dprintf(err, "Invalid user: %d\n", userId);
return BAD_VALUE;
}
+
+ PermissionController pc;
+ uid = pc.getPackageUid(packageName, 0);
+ if (uid <= 0) {
+ ALOGE("Unknown package: '%s'", String8(packageName).string());
+ dprintf(err, "Unknown package: '%s'\n", String8(packageName).string());
+ return BAD_VALUE;
+ }
+
+ uid = multiuser_get_uid(userId, uid);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyService::handleSetUidState(Vector<String16>& args, int err) {
+ // Valid arg.size() is 3 or 5, args.size() is 5 with --user option.
+ if (!(args.size() == 3 || args.size() == 5)) {
+ printHelp(err);
+ return BAD_VALUE;
+ }
+
bool active = false;
if (args[2] == String16("active")) {
active = true;
@@ -754,30 +774,59 @@
ALOGE("Expected active or idle but got: '%s'", String8(args[2]).string());
return BAD_VALUE;
}
+
+ int userId = 0;
+ if (args.size() >= 5 && args[3] == String16("--user")) {
+ userId = atoi(String8(args[4]));
+ }
+
+ uid_t uid;
+ if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+ return BAD_VALUE;
+ }
+
mUidPolicy->addOverrideUid(uid, active);
return NO_ERROR;
}
status_t AudioPolicyService::handleResetUidState(Vector<String16>& args, int err) {
- PermissionController pc;
- int uid = pc.getPackageUid(args[1], 0);
- if (uid < 0) {
- ALOGE("Unknown package: '%s'", String8(args[1]).string());
- dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ // Valid arg.size() is 2 or 4, args.size() is 4 with --user option.
+ if (!(args.size() == 2 || args.size() == 4)) {
+ printHelp(err);
return BAD_VALUE;
}
+
+ int userId = 0;
+ if (args.size() >= 4 && args[2] == String16("--user")) {
+ userId = atoi(String8(args[3]));
+ }
+
+ uid_t uid;
+ if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+ return BAD_VALUE;
+ }
+
mUidPolicy->removeOverrideUid(uid);
return NO_ERROR;
}
status_t AudioPolicyService::handleGetUidState(Vector<String16>& args, int out, int err) {
- PermissionController pc;
- int uid = pc.getPackageUid(args[1], 0);
- if (uid < 0) {
- ALOGE("Unknown package: '%s'", String8(args[1]).string());
- dprintf(err, "Unknown package: '%s'\n", String8(args[1]).string());
+ // Valid arg.size() is 2 or 4, args.size() is 4 with --user option.
+ if (!(args.size() == 2 || args.size() == 4)) {
+ printHelp(err);
return BAD_VALUE;
}
+
+ int userId = 0;
+ if (args.size() >= 4 && args[2] == String16("--user")) {
+ userId = atoi(String8(args[3]));
+ }
+
+ uid_t uid;
+ if (getUidForPackage(args[1], userId, uid, err) == BAD_VALUE) {
+ return BAD_VALUE;
+ }
+
if (mUidPolicy->isUidActive(uid)) {
return dprintf(out, "active\n");
} else {
@@ -787,9 +836,9 @@
status_t AudioPolicyService::printHelp(int out) {
return dprintf(out, "Audio policy service commands:\n"
- " get-uid-state <PACKAGE> gets the uid state\n"
- " set-uid-state <PACKAGE> <active|idle> overrides the uid state\n"
- " reset-uid-state <PACKAGE> clears the uid state override\n"
+ " get-uid-state <PACKAGE> [--user USER_ID] gets the uid state\n"
+ " set-uid-state <PACKAGE> <active|idle> [--user USER_ID] overrides the uid state\n"
+ " reset-uid-state <PACKAGE> [--user USER_ID] clears the uid state override\n"
" help print this message\n");
}
@@ -994,8 +1043,7 @@
bool AudioPolicyService::UidPolicy::isA11yOnTop() {
for (const auto &uid : mCachedUids) {
- std::vector<uid_t>::iterator it = find(mA11yUids.begin(), mA11yUids.end(), uid.first);
- if (it == mA11yUids.end()) {
+ if (!isA11yUid(uid.first)) {
continue;
}
if (uid.second.second >= ActivityManager::PROCESS_STATE_TOP
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 4ca90d1..17e0437 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -311,7 +311,7 @@
virtual status_t shellCommand(int in, int out, int err, Vector<String16>& args);
// Sets whether the given UID records only silence
- virtual void setAppState_l(uid_t uid, app_state_t state);
+ virtual void setAppState_l(audio_port_handle_t portId, app_state_t state);
// Overrides the UID state as if it is idle
status_t handleSetUidState(Vector<String16>& args, int err);
@@ -623,8 +623,7 @@
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
- audio_devices_t *devices,
- const String8& address,
+ const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags);
// creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
@@ -760,9 +759,10 @@
public:
AudioClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
- const audio_session_t session, const audio_port_handle_t deviceId) :
+ const audio_session_t session, audio_port_handle_t portId,
+ const audio_port_handle_t deviceId) :
attributes(attributes), io(io), uid(uid), pid(pid),
- session(session), deviceId(deviceId), active(false) {}
+ session(session), portId(portId), deviceId(deviceId), active(false) {}
~AudioClient() override = default;
@@ -771,6 +771,7 @@
const uid_t uid; // client UID
const pid_t pid; // client PID
const audio_session_t session; // audio session ID
+ const audio_port_handle_t portId;
const audio_port_handle_t deviceId; // selected input device port ID
bool active; // Playback/Capture is active or inactive
};
@@ -782,10 +783,10 @@
public:
AudioRecordClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
- const audio_session_t session, const audio_port_handle_t deviceId,
- const String16& opPackageName,
+ const audio_session_t session, audio_port_handle_t portId,
+ const audio_port_handle_t deviceId, const String16& opPackageName,
bool canCaptureOutput, bool canCaptureHotword) :
- AudioClient(attributes, io, uid, pid, session, deviceId),
+ AudioClient(attributes, io, uid, pid, session, portId, deviceId),
opPackageName(opPackageName), startTimeNs(0),
canCaptureOutput(canCaptureOutput), canCaptureHotword(canCaptureHotword) {}
~AudioRecordClient() override = default;
@@ -803,9 +804,9 @@
public:
AudioPlaybackClient(const audio_attributes_t attributes,
const audio_io_handle_t io, uid_t uid, pid_t pid,
- const audio_session_t session, audio_port_handle_t deviceId,
- audio_stream_type_t stream) :
- AudioClient(attributes, io, uid, pid, session, deviceId), stream(stream) {}
+ const audio_session_t session, audio_port_handle_t portId,
+ audio_port_handle_t deviceId, audio_stream_type_t stream) :
+ AudioClient(attributes, io, uid, pid, session, portId, deviceId), stream(stream) {}
~AudioPlaybackClient() override = default;
const audio_stream_type_t stream;
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
new file mode 100644
index 0000000..efdb241
--- /dev/null
+++ b/services/audiopolicy/tests/Android.bp
@@ -0,0 +1,71 @@
+cc_test {
+ name: "audiopolicy_tests",
+
+ include_dirs: [
+ "frameworks/av/services/audiopolicy",
+ ],
+
+ shared_libs: [
+ "libaudioclient",
+ "libaudiofoundation",
+ "libaudiopolicy",
+ "libaudiopolicymanagerdefault",
+ "libbase",
+ "libhidlbase",
+ "liblog",
+ "libmedia_helper",
+ "libutils",
+ "libxml2",
+ ],
+
+ static_libs: ["libaudiopolicycomponents"],
+
+ header_libs: [
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ ],
+
+ srcs: ["audiopolicymanager_tests.cpp"],
+
+ data: [":audiopolicytest_configuration_files",],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ test_suites: ["device-tests"],
+
+}
+
+
+cc_test {
+ name: "audio_health_tests",
+
+ shared_libs: [
+ "libaudiofoundation",
+ "libaudioclient",
+ "libaudiopolicymanagerdefault",
+ "liblog",
+ "libmedia_helper",
+ "libutils",
+ ],
+
+ static_libs: ["libaudiopolicycomponents"],
+
+ header_libs: [
+ "libaudiopolicyengine_interface_headers",
+ "libaudiopolicymanager_interface_headers",
+ ],
+
+ srcs: ["audio_health_tests.cpp"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+
+ test_suites: ["device-tests"],
+
+}
diff --git a/services/audiopolicy/tests/Android.mk b/services/audiopolicy/tests/Android.mk
deleted file mode 100644
index c8d1459..0000000
--- a/services/audiopolicy/tests/Android.mk
+++ /dev/null
@@ -1,67 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_C_INCLUDES := \
- frameworks/av/services/audiopolicy \
- $(call include-path-for, audio-utils) \
-
-LOCAL_SHARED_LIBRARIES := \
- libaudiofoundation \
- libaudiopolicymanagerdefault \
- libbase \
- liblog \
- libmedia_helper \
- libutils \
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents \
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiopolicycommon \
- libaudiopolicyengine_interface_headers \
- libaudiopolicymanager_interface_headers
-
-LOCAL_SRC_FILES := \
- audiopolicymanager_tests.cpp \
-
-LOCAL_MODULE := audiopolicy_tests
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_COMPATIBILITY_SUITE := device-tests
-
-include $(BUILD_NATIVE_TEST)
-
-# system/audio.h utilities test
-
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
- libaudiofoundation \
- libbase \
- liblog \
- libmedia_helper \
- libutils
-
-LOCAL_HEADER_LIBRARIES := \
- libmedia_headers
-
-LOCAL_SRC_FILES := \
- systemaudio_tests.cpp \
-
-LOCAL_MODULE := systemaudio_tests
-
-LOCAL_MODULE_TAGS := tests
-
-LOCAL_CFLAGS := -Werror -Wall
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_COMPATIBILITY_SUITE := device-tests
-
-include $(BUILD_NATIVE_TEST)
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
new file mode 100644
index 0000000..c2a92d7
--- /dev/null
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <map>
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include "AudioPolicyTestClient.h"
+
+namespace android {
+
+class AudioPolicyManagerTestClient : public AudioPolicyTestClient {
+public:
+ // AudioPolicyClientInterface implementation
+ audio_module_handle_t loadHwModule(const char * /*name*/) override {
+ return mNextModuleHandle++;
+ }
+
+ status_t openOutput(audio_module_handle_t module,
+ audio_io_handle_t *output,
+ audio_config_t * /*config*/,
+ const sp<DeviceDescriptorBase>& /*device*/,
+ uint32_t * /*latencyMs*/,
+ audio_output_flags_t /*flags*/) override {
+ if (module >= mNextModuleHandle) {
+ ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
+ __func__, module, mNextModuleHandle);
+ return BAD_VALUE;
+ }
+ *output = mNextIoHandle++;
+ return NO_ERROR;
+ }
+
+ audio_io_handle_t openDuplicateOutput(audio_io_handle_t /*output1*/,
+ audio_io_handle_t /*output2*/) override {
+ audio_io_handle_t id = mNextIoHandle++;
+ return id;
+ }
+
+ status_t openInput(audio_module_handle_t module,
+ audio_io_handle_t *input,
+ audio_config_t * /*config*/,
+ audio_devices_t * /*device*/,
+ const String8 & /*address*/,
+ audio_source_t /*source*/,
+ audio_input_flags_t /*flags*/) override {
+ if (module >= mNextModuleHandle) {
+ ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
+ __func__, module, mNextModuleHandle);
+ return BAD_VALUE;
+ }
+ *input = mNextIoHandle++;
+ return NO_ERROR;
+ }
+
+ status_t createAudioPatch(const struct audio_patch *patch,
+ audio_patch_handle_t *handle,
+ int /*delayMs*/) override {
+ *handle = mNextPatchHandle++;
+ mActivePatches.insert(std::make_pair(*handle, *patch));
+ return NO_ERROR;
+ }
+
+ status_t releaseAudioPatch(audio_patch_handle_t handle,
+ int /*delayMs*/) override {
+ if (mActivePatches.erase(handle) != 1) {
+ if (handle >= mNextPatchHandle) {
+ ALOGE("%s: Patch handle %d has not been allocated yet (next is %d)",
+ __func__, handle, mNextPatchHandle);
+ } else {
+ ALOGE("%s: Attempt to release patch %d twice", __func__, handle);
+ }
+ return BAD_VALUE;
+ }
+ return NO_ERROR;
+ }
+
+ // Helper methods for tests
+ size_t getActivePatchesCount() const { return mActivePatches.size(); }
+
+ const struct audio_patch *getLastAddedPatch() const {
+ if (mActivePatches.empty()) {
+ return nullptr;
+ }
+ auto it = --mActivePatches.end();
+ return &it->second;
+ };
+
+private:
+ audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
+ audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
+ audio_patch_handle_t mNextPatchHandle = AUDIO_PATCH_HANDLE_NONE + 1;
+ std::map<audio_patch_handle_t, struct audio_patch> mActivePatches;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index e4c64e5..b92a2e6 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -31,8 +31,7 @@
status_t openOutput(audio_module_handle_t /*module*/,
audio_io_handle_t* /*output*/,
audio_config_t* /*config*/,
- audio_devices_t* /*devices*/,
- const String8& /*address*/,
+ const sp<DeviceDescriptorBase>& /*device*/,
uint32_t* /*latencyMs*/,
audio_output_flags_t /*flags*/) override { return NO_INIT; }
audio_io_handle_t openDuplicateOutput(audio_io_handle_t /*output1*/,
diff --git a/services/audiopolicy/tests/AudioPolicyTestManager.h b/services/audiopolicy/tests/AudioPolicyTestManager.h
index fe543a6..bafcc63 100644
--- a/services/audiopolicy/tests/AudioPolicyTestManager.h
+++ b/services/audiopolicy/tests/AudioPolicyTestManager.h
@@ -24,7 +24,9 @@
explicit AudioPolicyTestManager(AudioPolicyClientInterface *clientInterface)
: AudioPolicyManager(clientInterface, true /*forTesting*/) { }
using AudioPolicyManager::getConfig;
+ using AudioPolicyManager::loadConfig;
using AudioPolicyManager::initialize;
+ using AudioPolicyManager::getOutputs;
};
} // namespace android
diff --git a/services/audiopolicy/tests/audio_health_tests.cpp b/services/audiopolicy/tests/audio_health_tests.cpp
new file mode 100644
index 0000000..8736cf1
--- /dev/null
+++ b/services/audiopolicy/tests/audio_health_tests.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicy_Boot_Test"
+
+#include <unordered_set>
+
+#include <gtest/gtest.h>
+
+#include <media/AudioSystem.h>
+#include <system/audio.h>
+#include <utils/Log.h>
+
+#include "AudioPolicyManagerTestClient.h"
+#include "AudioPolicyTestManager.h"
+
+using namespace android;
+
+TEST(AudioHealthTest, AttachedDeviceFound) {
+ unsigned int numPorts;
+ unsigned int generation1;
+ unsigned int generation;
+ struct audio_port *audioPorts = NULL;
+ int attempts = 10;
+ do {
+ if (attempts-- < 0) {
+ free(audioPorts);
+ GTEST_FAIL() << "Query audio ports time out";
+ }
+ numPorts = 0;
+ ASSERT_EQ(NO_ERROR, AudioSystem::listAudioPorts(
+ AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_DEVICE, &numPorts, NULL, &generation1));
+ if (numPorts == 0) {
+ free(audioPorts);
+ GTEST_FAIL() << "Number of audio ports should not be zero";
+ }
+
+ audioPorts = (struct audio_port *)realloc(audioPorts, numPorts * sizeof(struct audio_port));
+ status_t status = AudioSystem::listAudioPorts(
+ AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_DEVICE, &numPorts, audioPorts, &generation);
+ if (status != NO_ERROR) {
+ free(audioPorts);
+ GTEST_FAIL() << "Query audio ports failed";
+ }
+ } while (generation1 != generation);
+ std::unordered_set<audio_devices_t> attachedDevices;
+ for (int i = 0 ; i < numPorts; i++) {
+ attachedDevices.insert(audioPorts[i].ext.device.type);
+ }
+ free(audioPorts);
+
+ AudioPolicyManagerTestClient client;
+ AudioPolicyTestManager manager(&client);
+ manager.loadConfig();
+ ASSERT_NE("AudioPolicyConfig::setDefault", manager.getConfig().getSource());
+
+ for (auto desc : manager.getConfig().getAvailableInputDevices()) {
+ ASSERT_NE(attachedDevices.end(), attachedDevices.find(desc->type()));
+ }
+ for (auto desc : manager.getConfig().getAvailableOutputDevices()) {
+ ASSERT_NE(attachedDevices.end(), attachedDevices.find(desc->type()));
+ }
+}
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index e10a716..1ee1eea 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -15,16 +15,23 @@
*/
#include <memory>
-#include <set>
+#include <string>
#include <sys/wait.h>
#include <unistd.h>
#include <gtest/gtest.h>
#define LOG_TAG "APM_Test"
-#include <log/log.h>
+#include <Serializer.h>
+#include <android-base/file.h>
+#include <media/AudioPolicy.h>
#include <media/PatchBuilder.h>
+#include <media/RecordingActivityTracker.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include "AudioPolicyInterface.h"
+#include "AudioPolicyManagerTestClient.h"
#include "AudioPolicyTestClient.h"
#include "AudioPolicyTestManager.h"
@@ -50,77 +57,6 @@
}
-class AudioPolicyManagerTestClient : public AudioPolicyTestClient {
- public:
- // AudioPolicyClientInterface implementation
- audio_module_handle_t loadHwModule(const char* /*name*/) override {
- return mNextModuleHandle++;
- }
-
- status_t openOutput(audio_module_handle_t module,
- audio_io_handle_t* output,
- audio_config_t* /*config*/,
- audio_devices_t* /*devices*/,
- const String8& /*address*/,
- uint32_t* /*latencyMs*/,
- audio_output_flags_t /*flags*/) override {
- if (module >= mNextModuleHandle) {
- ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
- __func__, module, mNextModuleHandle);
- return BAD_VALUE;
- }
- *output = mNextIoHandle++;
- return NO_ERROR;
- }
-
- status_t openInput(audio_module_handle_t module,
- audio_io_handle_t* input,
- audio_config_t* /*config*/,
- audio_devices_t* /*device*/,
- const String8& /*address*/,
- audio_source_t /*source*/,
- audio_input_flags_t /*flags*/) override {
- if (module >= mNextModuleHandle) {
- ALOGE("%s: Module handle %d has not been allocated yet (next is %d)",
- __func__, module, mNextModuleHandle);
- return BAD_VALUE;
- }
- *input = mNextIoHandle++;
- return NO_ERROR;
- }
-
- status_t createAudioPatch(const struct audio_patch* /*patch*/,
- audio_patch_handle_t* handle,
- int /*delayMs*/) override {
- *handle = mNextPatchHandle++;
- mActivePatches.insert(*handle);
- return NO_ERROR;
- }
-
- status_t releaseAudioPatch(audio_patch_handle_t handle,
- int /*delayMs*/) override {
- if (mActivePatches.erase(handle) != 1) {
- if (handle >= mNextPatchHandle) {
- ALOGE("%s: Patch handle %d has not been allocated yet (next is %d)",
- __func__, handle, mNextPatchHandle);
- } else {
- ALOGE("%s: Attempt to release patch %d twice", __func__, handle);
- }
- return BAD_VALUE;
- }
- return NO_ERROR;
- }
-
- // Helper methods for tests
- size_t getActivePatchesCount() const { return mActivePatches.size(); }
-
- private:
- audio_module_handle_t mNextModuleHandle = AUDIO_MODULE_HANDLE_NONE + 1;
- audio_io_handle_t mNextIoHandle = AUDIO_IO_HANDLE_NONE + 1;
- audio_patch_handle_t mNextPatchHandle = AUDIO_PATCH_HANDLE_NONE + 1;
- std::set<audio_patch_handle_t> mActivePatches;
-};
-
class PatchCountCheck {
public:
explicit PatchCountCheck(AudioPolicyManagerTestClient *client)
@@ -143,18 +79,35 @@
protected:
void SetUp() override;
void TearDown() override;
- virtual void SetUpConfig(AudioPolicyConfig *config) { (void)config; }
+ virtual void SetUpManagerConfig();
void dumpToLog();
+ // When explicit routing is needed, selectedDeviceId needs to be set as the wanted port
+ // id. Otherwise, selectedDeviceId needs to be initialized as AUDIO_PORT_HANDLE_NONE.
void getOutputForAttr(
audio_port_handle_t *selectedDeviceId,
audio_format_t format,
int channelMask,
int sampleRate,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ audio_io_handle_t *output = nullptr,
+ audio_port_handle_t *portId = nullptr,
+ audio_attributes_t attr = {});
+ void getInputForAttr(
+ const audio_attributes_t &attr,
+ audio_unique_id_t riid,
+ audio_port_handle_t *selectedDeviceId,
+ audio_format_t format,
+ int channelMask,
+ int sampleRate,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
audio_port_handle_t *portId = nullptr);
PatchCountCheck snapshotPatchCount() { return PatchCountCheck(mClient.get()); }
+ void findDevicePort(audio_port_role_t role, audio_devices_t deviceType,
+ const std::string &address, audio_port &foundPort);
+ static audio_port_handle_t getDeviceIdFromPatch(const struct audio_patch* patch);
+
std::unique_ptr<AudioPolicyManagerTestClient> mClient;
std::unique_ptr<AudioPolicyTestManager> mManager;
};
@@ -162,8 +115,7 @@
void AudioPolicyManagerTest::SetUp() {
mClient.reset(new AudioPolicyManagerTestClient);
mManager.reset(new AudioPolicyTestManager(mClient.get()));
- mManager->getConfig().setDefault();
- SetUpConfig(&mManager->getConfig()); // Subclasses may want to customize the config.
+ SetUpManagerConfig(); // Subclasses may want to customize the config.
ASSERT_EQ(NO_ERROR, mManager->initialize());
ASSERT_EQ(NO_ERROR, mManager->initCheck());
}
@@ -173,6 +125,10 @@
mClient.reset();
}
+void AudioPolicyManagerTest::SetUpManagerConfig() {
+ mManager->getConfig().setDefault();
+}
+
void AudioPolicyManagerTest::dumpToLog() {
int pipefd[2];
ASSERT_NE(-1, pipe(pipefd));
@@ -209,22 +165,90 @@
int channelMask,
int sampleRate,
audio_output_flags_t flags,
- audio_port_handle_t *portId) {
- audio_attributes_t attr = {};
- audio_io_handle_t output = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t *output,
+ audio_port_handle_t *portId,
+ audio_attributes_t attr) {
+ audio_io_handle_t localOutput;
+ if (!output) output = &localOutput;
+ *output = AUDIO_IO_HANDLE_NONE;
audio_stream_type_t stream = AUDIO_STREAM_DEFAULT;
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = sampleRate;
config.channel_mask = channelMask;
config.format = format;
- *selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
audio_port_handle_t localPortId;
if (!portId) portId = &localPortId;
*portId = AUDIO_PORT_HANDLE_NONE;
ASSERT_EQ(OK, mManager->getOutputForAttr(
- &attr, &output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
+ &attr, output, AUDIO_SESSION_NONE, &stream, 0 /*uid*/, &config, &flags,
selectedDeviceId, portId, {}));
ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
+ ASSERT_NE(AUDIO_IO_HANDLE_NONE, *output);
+}
+
+void AudioPolicyManagerTest::getInputForAttr(
+ const audio_attributes_t &attr,
+ audio_unique_id_t riid,
+ audio_port_handle_t *selectedDeviceId,
+ audio_format_t format,
+ int channelMask,
+ int sampleRate,
+ audio_input_flags_t flags,
+ audio_port_handle_t *portId) {
+ audio_io_handle_t input = AUDIO_PORT_HANDLE_NONE;
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ config.sample_rate = sampleRate;
+ config.channel_mask = channelMask;
+ config.format = format;
+ audio_port_handle_t localPortId;
+ if (!portId) portId = &localPortId;
+ *portId = AUDIO_PORT_HANDLE_NONE;
+ AudioPolicyInterface::input_type_t inputType;
+ ASSERT_EQ(OK, mManager->getInputForAttr(
+ &attr, &input, riid, AUDIO_SESSION_NONE, 0 /*uid*/, &config, flags,
+ selectedDeviceId, &inputType, portId));
+ ASSERT_NE(AUDIO_PORT_HANDLE_NONE, *portId);
+}
+
+void AudioPolicyManagerTest::findDevicePort(audio_port_role_t role,
+ audio_devices_t deviceType, const std::string &address, audio_port &foundPort) {
+ uint32_t numPorts = 0;
+ uint32_t generation1;
+ status_t ret;
+
+ ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, nullptr, &generation1);
+ ASSERT_EQ(NO_ERROR, ret);
+
+ uint32_t generation2;
+ struct audio_port ports[numPorts];
+ ret = mManager->listAudioPorts(role, AUDIO_PORT_TYPE_DEVICE, &numPorts, ports, &generation2);
+ ASSERT_EQ(NO_ERROR, ret);
+ ASSERT_EQ(generation1, generation2);
+
+ for (const auto &port : ports) {
+ if (port.role == role && port.ext.device.type == deviceType &&
+ (strncmp(port.ext.device.address, address.c_str(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+ foundPort = port;
+ return;
+ }
+ }
+ GTEST_FAIL() << "Device port with role " << role << " and address " << address << "not found";
+}
+
+audio_port_handle_t AudioPolicyManagerTest::getDeviceIdFromPatch(
+ const struct audio_patch* patch) {
+ // The logic here is the same as the one in AudioIoDescriptor.
+ // Note this function is aim to get routed device id for test.
+ // In that case, device to device patch is not expected here.
+ if (patch->num_sources != 0 && patch->num_sinks != 0) {
+ if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
+ return patch->sinks[0].id;
+ } else {
+ return patch->sources[0].id;
+ }
+ }
+ return AUDIO_PORT_HANDLE_NONE;
}
@@ -286,15 +310,17 @@
class AudioPolicyManagerTestMsd : public AudioPolicyManagerTest {
protected:
- void SetUpConfig(AudioPolicyConfig *config) override;
+ void SetUpManagerConfig() override;
void TearDown() override;
sp<DeviceDescriptor> mMsdOutputDevice;
sp<DeviceDescriptor> mMsdInputDevice;
};
-void AudioPolicyManagerTestMsd::SetUpConfig(AudioPolicyConfig *config) {
+void AudioPolicyManagerTestMsd::SetUpManagerConfig() {
// TODO: Consider using Serializer to load part of the config from a string.
+ AudioPolicyManagerTest::SetUpManagerConfig();
+ AudioPolicyConfig& config = mManager->getConfig();
mMsdOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_BUS);
sp<AudioProfile> pcmOutputProfile = new AudioProfile(
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
@@ -307,22 +333,21 @@
sp<AudioProfile> pcmInputProfile = new AudioProfile(
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO, 44100);
mMsdInputDevice->addAudioProfile(pcmInputProfile);
- config->addAvailableDevice(mMsdOutputDevice);
- config->addAvailableDevice(mMsdInputDevice);
+ config.addAvailableDevice(mMsdOutputDevice);
+ config.addAvailableDevice(mMsdInputDevice);
sp<HwModule> msdModule = new HwModule(AUDIO_HARDWARE_MODULE_ID_MSD, 2 /*halVersionMajor*/);
- HwModuleCollection modules = config->getHwModules();
+ HwModuleCollection modules = config.getHwModules();
modules.add(msdModule);
- config->setHwModules(modules);
+ config.setHwModules(modules);
mMsdOutputDevice->attach(msdModule);
mMsdInputDevice->attach(msdModule);
- sp<OutputProfile> msdOutputProfile = new OutputProfile(String8("msd input"));
+ sp<OutputProfile> msdOutputProfile = new OutputProfile("msd input");
msdOutputProfile->addAudioProfile(pcmOutputProfile);
msdOutputProfile->addSupportedDevice(mMsdOutputDevice);
msdModule->addOutputProfile(msdOutputProfile);
- sp<OutputProfile> msdCompressedOutputProfile =
- new OutputProfile(String8("msd compressed input"));
+ sp<OutputProfile> msdCompressedOutputProfile = new OutputProfile("msd compressed input");
msdCompressedOutputProfile->addAudioProfile(ac3OutputProfile);
msdCompressedOutputProfile->setFlags(
AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
@@ -330,7 +355,7 @@
msdCompressedOutputProfile->addSupportedDevice(mMsdOutputDevice);
msdModule->addOutputProfile(msdCompressedOutputProfile);
- sp<InputProfile> msdInputProfile = new InputProfile(String8("msd output"));
+ sp<InputProfile> msdInputProfile = new InputProfile("msd output");
msdInputProfile->addAudioProfile(pcmInputProfile);
msdInputProfile->addSupportedDevice(mMsdInputDevice);
msdModule->addInputProfile(msdInputProfile);
@@ -339,12 +364,12 @@
// of streams that are not supported by MSD.
sp<AudioProfile> dtsOutputProfile = new AudioProfile(
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000);
- config->getDefaultOutputDevice()->addAudioProfile(dtsOutputProfile);
- sp<OutputProfile> primaryEncodedOutputProfile = new OutputProfile(String8("encoded"));
+ config.getDefaultOutputDevice()->addAudioProfile(dtsOutputProfile);
+ sp<OutputProfile> primaryEncodedOutputProfile = new OutputProfile("encoded");
primaryEncodedOutputProfile->addAudioProfile(dtsOutputProfile);
primaryEncodedOutputProfile->setFlags(AUDIO_OUTPUT_FLAG_DIRECT);
- primaryEncodedOutputProfile->addSupportedDevice(config->getDefaultOutputDevice());
- config->getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
+ primaryEncodedOutputProfile->addSupportedDevice(config.getDefaultOutputDevice());
+ config.getHwModules().getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY)->
addOutputProfile(primaryEncodedOutputProfile);
}
@@ -372,7 +397,7 @@
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -381,7 +406,7 @@
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrPcmRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -390,7 +415,7 @@
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrEncodedPlusPcmRoutesToMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
@@ -403,7 +428,7 @@
TEST_F(AudioPolicyManagerTestMsd, GetOutputForAttrUnsupportedFormatBypassesMsd) {
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
@@ -414,10 +439,11 @@
// Switch between formats that are supported and not supported by MSD.
{
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId, portId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_port_handle_t portId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
- &portId);
+ nullptr /*output*/, &portId);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
ASSERT_EQ(1, patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
@@ -425,10 +451,11 @@
}
{
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId, portId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_port_handle_t portId;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_DTS, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT,
- &portId);
+ nullptr /*output*/, &portId);
ASSERT_NE(selectedDeviceId, mMsdOutputDevice->getId());
ASSERT_EQ(-1, patchCount.deltaFromSnapshot());
mManager->releaseOutput(portId);
@@ -436,10 +463,662 @@
}
{
const PatchCountCheck patchCount = snapshotPatchCount();
- audio_port_handle_t selectedDeviceId;
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
getOutputForAttr(&selectedDeviceId,
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, 48000, AUDIO_OUTPUT_FLAG_DIRECT);
ASSERT_EQ(selectedDeviceId, mMsdOutputDevice->getId());
ASSERT_EQ(0, patchCount.deltaFromSnapshot());
}
}
+
+class AudioPolicyManagerTestWithConfigurationFile : public AudioPolicyManagerTest {
+protected:
+ void SetUpManagerConfig() override;
+ virtual std::string getConfigFile() { return sDefaultConfig; }
+
+ static const std::string sExecutableDir;
+ static const std::string sDefaultConfig;
+};
+
+const std::string AudioPolicyManagerTestWithConfigurationFile::sExecutableDir =
+ base::GetExecutableDirectory() + "/";
+
+const std::string AudioPolicyManagerTestWithConfigurationFile::sDefaultConfig =
+ sExecutableDir + "test_audio_policy_configuration.xml";
+
+void AudioPolicyManagerTestWithConfigurationFile::SetUpManagerConfig() {
+ status_t status = deserializeAudioPolicyFile(getConfigFile().c_str(), &mManager->getConfig());
+ ASSERT_EQ(NO_ERROR, status);
+}
+
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, InitSuccess) {
+ // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestWithConfigurationFile, Dump) {
+ dumpToLog();
+}
+
+using PolicyMixTuple = std::tuple<audio_usage_t, audio_source_t, uint32_t>;
+
+class AudioPolicyManagerTestDynamicPolicy : public AudioPolicyManagerTestWithConfigurationFile {
+protected:
+ void TearDown() override;
+
+ status_t addPolicyMix(int mixType, int mixFlag, audio_devices_t deviceType,
+ std::string mixAddress, const audio_config_t& audioConfig,
+ const std::vector<PolicyMixTuple>& rules);
+ void clearPolicyMix();
+
+ Vector<AudioMix> mAudioMixes;
+ const std::string mMixAddress = "remote_submix_media";
+};
+
+void AudioPolicyManagerTestDynamicPolicy::TearDown() {
+ mManager->unregisterPolicyMixes(mAudioMixes);
+ AudioPolicyManagerTestWithConfigurationFile::TearDown();
+}
+
+status_t AudioPolicyManagerTestDynamicPolicy::addPolicyMix(int mixType, int mixFlag,
+ audio_devices_t deviceType, std::string mixAddress, const audio_config_t& audioConfig,
+ const std::vector<PolicyMixTuple>& rules) {
+ Vector<AudioMixMatchCriterion> myMixMatchCriteria;
+
+ for(const auto &rule: rules) {
+ myMixMatchCriteria.add(AudioMixMatchCriterion(
+ std::get<0>(rule), std::get<1>(rule), std::get<2>(rule)));
+ }
+
+ AudioMix myAudioMix(myMixMatchCriteria, mixType, audioConfig, mixFlag,
+ String8(mixAddress.c_str()), 0);
+ myAudioMix.mDeviceType = deviceType;
+ // Clear mAudioMix before add new one to make sure we don't add already exist mixes.
+ mAudioMixes.clear();
+ mAudioMixes.add(myAudioMix);
+
+ // As the policy mixes registration may fail at some case,
+ // caller need to check the returned status.
+ status_t ret = mManager->registerPolicyMixes(mAudioMixes);
+ return ret;
+}
+
+void AudioPolicyManagerTestDynamicPolicy::clearPolicyMix() {
+ if (mManager != nullptr) {
+ mManager->unregisterPolicyMixes(mAudioMixes);
+ }
+ mAudioMixes.clear();
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, InitSuccess) {
+ // SetUp must finish with no assertions
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, Dump) {
+ dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, RegisterPolicyMixes) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+
+ // Only capture of playback is allowed in LOOP_BACK &RENDER mode
+ ret = addPolicyMix(MIX_TYPE_RECORDERS, MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ // Fail due to the device is already connected.
+ clearPolicyMix();
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ // The first time to register policy mixes with valid parameter should succeed.
+ clearPolicyMix();
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = 48000;
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+ std::vector<PolicyMixTuple>());
+ ASSERT_EQ(NO_ERROR, ret);
+ // Registering the same policy mixes should fail.
+ ret = mManager->registerPolicyMixes(mAudioMixes);
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ // Registration should fail due to device not found.
+ // Note that earpiece is not present in the test configuration file.
+ // This will need to be updated if earpiece is added in the test configuration file.
+ clearPolicyMix();
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_EARPIECE, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ // Registration should fail due to output not found.
+ clearPolicyMix();
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ // The first time to register valid policy mixes should succeed.
+ clearPolicyMix();
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_SPEAKER, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(NO_ERROR, ret);
+ // Registering the same policy mixes should fail.
+ ret = mManager->registerPolicyMixes(mAudioMixes);
+ ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+TEST_F(AudioPolicyManagerTestDynamicPolicy, UnregisterPolicyMixes) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = 48000;
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig,
+ std::vector<PolicyMixTuple>());
+ ASSERT_EQ(NO_ERROR, ret);
+
+ // After successfully registering policy mixes, it should be able to unregister.
+ ret = mManager->unregisterPolicyMixes(mAudioMixes);
+ ASSERT_EQ(NO_ERROR, ret);
+
+ // After unregistering policy mixes successfully, it should fail unregistering
+ // the same policy mixes as they are not registered.
+ ret = mManager->unregisterPolicyMixes(mAudioMixes);
+ ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+class AudioPolicyManagerTestDPNoRemoteSubmixModule : public AudioPolicyManagerTestDynamicPolicy {
+protected:
+ std::string getConfigFile() override { return sPrimaryOnlyConfig; }
+
+ static const std::string sPrimaryOnlyConfig;
+};
+
+const std::string AudioPolicyManagerTestDPNoRemoteSubmixModule::sPrimaryOnlyConfig =
+ sExecutableDir + "test_audio_policy_primary_only_configuration.xml";
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, InitSuccess) {
+ // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, Dump) {
+ dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTestDPNoRemoteSubmixModule, RegistrationFailure) {
+ // Registration/Unregistration should fail due to module for remote submix not found.
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = 48000;
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "", audioConfig, std::vector<PolicyMixTuple>());
+ ASSERT_EQ(INVALID_OPERATION, ret);
+
+ ret = mManager->unregisterPolicyMixes(mAudioMixes);
+ ASSERT_EQ(INVALID_OPERATION, ret);
+}
+
+class AudioPolicyManagerTestDPPlaybackReRouting : public AudioPolicyManagerTestDynamicPolicy,
+ public testing::WithParamInterface<audio_attributes_t> {
+protected:
+ void SetUp() override;
+ void TearDown() override;
+
+ std::unique_ptr<RecordingActivityTracker> mTracker;
+
+ std::vector<PolicyMixTuple> mUsageRules = {
+ {AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE},
+ {AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, RULE_MATCH_ATTRIBUTE_USAGE}
+ };
+
+ struct audio_port mInjectionPort;
+ audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+};
+
+void AudioPolicyManagerTestDPPlaybackReRouting::SetUp() {
+ AudioPolicyManagerTestDynamicPolicy::SetUp();
+
+ mTracker.reset(new RecordingActivityTracker());
+
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = 48000;
+ status_t ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mMixAddress, audioConfig, mUsageRules);
+ ASSERT_EQ(NO_ERROR, ret);
+
+ struct audio_port extractionPort;
+ findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+ mMixAddress, extractionPort);
+
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_source_t source = AUDIO_SOURCE_REMOTE_SUBMIX;
+ audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, source, 0, ""};
+ std::string tags = "addr=" + mMixAddress;
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ getInputForAttr(attr, mTracker->getRiid(), &selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE, &mPortId);
+ ASSERT_EQ(NO_ERROR, mManager->startInput(mPortId));
+ ASSERT_EQ(extractionPort.id, selectedDeviceId);
+
+ findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ mMixAddress, mInjectionPort);
+}
+
+void AudioPolicyManagerTestDPPlaybackReRouting::TearDown() {
+ mManager->stopInput(mPortId);
+ AudioPolicyManagerTestDynamicPolicy::TearDown();
+}
+
+TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, InitSuccess) {
+ // SetUp must finish with no assertions
+}
+
+TEST_F(AudioPolicyManagerTestDPPlaybackReRouting, Dump) {
+ dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDPPlaybackReRouting, PlaybackReRouting) {
+ const audio_attributes_t attr = GetParam();
+ const audio_usage_t usage = attr.usage;
+
+ audio_port_handle_t playbackRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+ getOutputForAttr(&playbackRoutedPortId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ 48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE,
+ nullptr /*output*/, nullptr /*portId*/, attr);
+ if (std::find_if(begin(mUsageRules), end(mUsageRules), [&usage](const auto &usageRule) {
+ return (std::get<0>(usageRule) == usage) &&
+ (std::get<2>(usageRule) == RULE_MATCH_ATTRIBUTE_USAGE);}) != end(mUsageRules) ||
+ (strncmp(attr.tags, "addr=", strlen("addr=")) == 0 &&
+ strncmp(attr.tags + strlen("addr="), mMixAddress.c_str(),
+ AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0)) {
+ EXPECT_EQ(mInjectionPort.id, playbackRoutedPortId);
+ } else {
+ EXPECT_NE(mInjectionPort.id, playbackRoutedPortId);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(
+ PlaybackReroutingUsageMatch,
+ AudioPolicyManagerTestDPPlaybackReRouting,
+ testing::Values(
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
+ AUDIO_SOURCE_DEFAULT, 0, ""}
+ )
+ );
+
+INSTANTIATE_TEST_CASE_P(
+ PlaybackReroutingAddressPriorityMatch,
+ AudioPolicyManagerTestDPPlaybackReRouting,
+ testing::Values(
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ALARM,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VIRTUAL_SOURCE,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+ AUDIO_SOURCE_DEFAULT, 0, "addr=remote_submix_media"}
+ )
+ );
+
+INSTANTIATE_TEST_CASE_P(
+ PlaybackReroutingUnHandledUsages,
+ AudioPolicyManagerTestDPPlaybackReRouting,
+ testing::Values(
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_VOICE_COMMUNICATION,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_NOTIFICATION_EVENT,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC,
+ AUDIO_USAGE_ASSISTANCE_SONIFICATION,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_GAME,
+ AUDIO_SOURCE_DEFAULT, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_MUSIC, AUDIO_USAGE_ASSISTANT,
+ AUDIO_SOURCE_DEFAULT, 0, ""}
+ )
+ );
+
+class AudioPolicyManagerTestDPMixRecordInjection : public AudioPolicyManagerTestDynamicPolicy,
+ public testing::WithParamInterface<audio_attributes_t> {
+protected:
+ void SetUp() override;
+ void TearDown() override;
+
+ std::unique_ptr<RecordingActivityTracker> mTracker;
+
+ std::vector<PolicyMixTuple> mSourceRules = {
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_CAMCORDER, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_MIC, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET},
+ {AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_VOICE_COMMUNICATION, RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET}
+ };
+
+ struct audio_port mExtractionPort;
+ audio_port_handle_t mPortId = AUDIO_PORT_HANDLE_NONE;
+};
+
+void AudioPolicyManagerTestDPMixRecordInjection::SetUp() {
+ AudioPolicyManagerTestDynamicPolicy::SetUp();
+
+ mTracker.reset(new RecordingActivityTracker());
+
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = 48000;
+ status_t ret = addPolicyMix(MIX_TYPE_RECORDERS, MIX_ROUTE_FLAG_LOOP_BACK,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, mMixAddress, audioConfig, mSourceRules);
+ ASSERT_EQ(NO_ERROR, ret);
+
+ struct audio_port injectionPort;
+ findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ mMixAddress, injectionPort);
+
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_usage_t usage = AUDIO_USAGE_VIRTUAL_SOURCE;
+ audio_attributes_t attr = {AUDIO_CONTENT_TYPE_UNKNOWN, usage, AUDIO_SOURCE_DEFAULT, 0, ""};
+ std::string tags = std::string("addr=") + mMixAddress;
+ strncpy(attr.tags, tags.c_str(), AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1);
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ 48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE, nullptr /*output*/, &mPortId, attr);
+ ASSERT_EQ(NO_ERROR, mManager->startOutput(mPortId));
+ ASSERT_EQ(injectionPort.id, getDeviceIdFromPatch(mClient->getLastAddedPatch()));
+
+ findDevicePort(AUDIO_PORT_ROLE_SOURCE, AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+ mMixAddress, mExtractionPort);
+}
+
+void AudioPolicyManagerTestDPMixRecordInjection::TearDown() {
+ mManager->stopOutput(mPortId);
+ AudioPolicyManagerTestDynamicPolicy::TearDown();
+}
+
+TEST_F(AudioPolicyManagerTestDPMixRecordInjection, InitSuccess) {
+ // SetUp mush finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDPMixRecordInjection, Dump) {
+ dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDPMixRecordInjection, RecordingInjection) {
+ const audio_attributes_t attr = GetParam();
+ const audio_source_t source = attr.source;
+
+ audio_port_handle_t captureRoutedPortId = AUDIO_PORT_HANDLE_NONE;
+ audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
+ getInputForAttr(attr, mTracker->getRiid(), &captureRoutedPortId, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE, &portId);
+ if (std::find_if(begin(mSourceRules), end(mSourceRules), [&source](const auto &sourceRule) {
+ return (std::get<1>(sourceRule) == source) &&
+ (std::get<2>(sourceRule) == RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET);})
+ != end(mSourceRules)) {
+ EXPECT_EQ(mExtractionPort.id, captureRoutedPortId);
+ } else {
+ EXPECT_NE(mExtractionPort.id, captureRoutedPortId);
+ }
+}
+
+// No address priority rule for remote recording, address is a "don't care"
+INSTANTIATE_TEST_CASE_P(
+ RecordInjectionSourceMatch,
+ AudioPolicyManagerTestDPMixRecordInjection,
+ testing::Values(
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_CAMCORDER, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_CAMCORDER, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_MIC, 0, "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_MIC, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_VOICE_COMMUNICATION, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_VOICE_COMMUNICATION, 0,
+ "addr=remote_submix_media"}
+ )
+ );
+
+// No address priority rule for remote recording
+INSTANTIATE_TEST_CASE_P(
+ RecordInjectionSourceNotMatch,
+ AudioPolicyManagerTestDPMixRecordInjection,
+ testing::Values(
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_VOICE_RECOGNITION, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_HOTWORD, 0, ""},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_VOICE_RECOGNITION, 0,
+ "addr=remote_submix_media"},
+ (audio_attributes_t){AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN,
+ AUDIO_SOURCE_HOTWORD, 0, "addr=remote_submix_media"}
+ )
+ );
+
+using DeviceConnectionTestParams =
+ std::tuple<audio_devices_t /*type*/, std::string /*name*/, std::string /*address*/>;
+
+class AudioPolicyManagerTestDeviceConnection : public AudioPolicyManagerTestWithConfigurationFile,
+ public testing::WithParamInterface<DeviceConnectionTestParams> {
+};
+
+TEST_F(AudioPolicyManagerTestDeviceConnection, InitSuccess) {
+ // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTestDeviceConnection, Dump) {
+ dumpToLog();
+}
+
+TEST_P(AudioPolicyManagerTestDeviceConnection, SetDeviceConnectionState) {
+ const audio_devices_t type = std::get<0>(GetParam());
+ const std::string name = std::get<1>(GetParam());
+ const std::string address = std::get<2>(GetParam());
+
+ if (type == AUDIO_DEVICE_OUT_HDMI) {
+ // Set device connection state failed due to no device descriptor found
+ // For HDMI case, it is easier to simulate device descriptor not found error
+ // by using a undeclared encoded format.
+ ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_MAT_2_1));
+ }
+ // Connect with valid parameters should succeed
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+ // Try to connect with the same device again should fail
+ ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+ // Disconnect the connected device should succeed
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+ // Disconnect device that is not connected should fail
+ ASSERT_EQ(INVALID_OPERATION, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+ // Try to set device connection state with a invalid connection state should fail
+ ASSERT_EQ(BAD_VALUE, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_CNT,
+ "", "", AUDIO_FORMAT_DEFAULT));
+}
+
+TEST_P(AudioPolicyManagerTestDeviceConnection, ExplicitlyRoutingAfterConnection) {
+ const audio_devices_t type = std::get<0>(GetParam());
+ const std::string name = std::get<1>(GetParam());
+ const std::string address = std::get<2>(GetParam());
+
+ // Connect device to do explicitly routing test
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+
+ audio_port devicePort;
+ const audio_port_role_t role = audio_is_output_device(type)
+ ? AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+ findDevicePort(role, type, address, devicePort);
+
+ audio_port_handle_t routedPortId = devicePort.id;
+ // Try start input or output according to the device type
+ if (audio_is_output_devices(type)) {
+ getOutputForAttr(&routedPortId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ 48000 /*sampleRate*/, AUDIO_OUTPUT_FLAG_NONE);
+ } else if (audio_is_input_device(type)) {
+ RecordingActivityTracker tracker;
+ getInputForAttr({}, tracker.getRiid(), &routedPortId, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_IN_STEREO, 48000 /*sampleRate*/, AUDIO_INPUT_FLAG_NONE);
+ }
+ ASSERT_EQ(devicePort.id, routedPortId);
+
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ type, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.c_str(), name.c_str(), AUDIO_FORMAT_DEFAULT));
+}
+
+INSTANTIATE_TEST_CASE_P(
+ DeviceConnectionState,
+ AudioPolicyManagerTestDeviceConnection,
+ testing::Values(
+ DeviceConnectionTestParams({AUDIO_DEVICE_IN_HDMI, "test_in_hdmi",
+ "audio_policy_test_in_hdmi"}),
+ DeviceConnectionTestParams({AUDIO_DEVICE_OUT_HDMI, "test_out_hdmi",
+ "audio_policy_test_out_hdmi"}),
+ DeviceConnectionTestParams({AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, "bt_hfp_in",
+ "hfp_client_in"}),
+ DeviceConnectionTestParams({AUDIO_DEVICE_OUT_BLUETOOTH_SCO, "bt_hfp_out",
+ "hfp_client_out"})
+ )
+ );
+
+class AudioPolicyManagerTVTest : public AudioPolicyManagerTestWithConfigurationFile {
+protected:
+ std::string getConfigFile() override { return sTvConfig; }
+ void testHDMIPortSelection(audio_output_flags_t flags, const char* expectedMixPortName);
+
+ static const std::string sTvConfig;
+};
+
+const std::string AudioPolicyManagerTVTest::sTvConfig =
+ AudioPolicyManagerTVTest::sExecutableDir + "test_tv_apm_configuration.xml";
+
+// SwAudioOutputDescriptor doesn't populate flags so check against the port name.
+void AudioPolicyManagerTVTest::testHDMIPortSelection(
+ audio_output_flags_t flags, const char* expectedMixPortName) {
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT));
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO, 48000,
+ flags, &output, &portId);
+ sp<SwAudioOutputDescriptor> outDesc = mManager->getOutputs().valueFor(output);
+ ASSERT_NE(nullptr, outDesc.get());
+ audio_port port = {};
+ outDesc->toAudioPort(&port);
+ mManager->releaseOutput(portId);
+ ASSERT_EQ(NO_ERROR, mManager->setDeviceConnectionState(
+ AUDIO_DEVICE_OUT_AUX_DIGITAL, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ "" /*address*/, "" /*name*/, AUDIO_FORMAT_DEFAULT));
+ ASSERT_EQ(AUDIO_PORT_TYPE_MIX, port.type);
+ ASSERT_EQ(AUDIO_PORT_ROLE_SOURCE, port.role);
+ ASSERT_STREQ(expectedMixPortName, port.name);
+}
+
+TEST_F(AudioPolicyManagerTVTest, InitSuccess) {
+ // SetUp must finish with no assertions.
+}
+
+TEST_F(AudioPolicyManagerTVTest, Dump) {
+ dumpToLog();
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchNoFlags) {
+ testHDMIPortSelection(AUDIO_OUTPUT_FLAG_NONE, "primary output");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectNoHwAvSync) {
+ // b/140447125: The selected port must not have HW AV Sync flag (see the config file).
+ testHDMIPortSelection(AUDIO_OUTPUT_FLAG_DIRECT, "direct");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectHwAvSync) {
+ testHDMIPortSelection(static_cast<audio_output_flags_t>(
+ AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+ "tunnel");
+}
+
+TEST_F(AudioPolicyManagerTVTest, MatchOutputDirectMMapNoIrq) {
+ testHDMIPortSelection(static_cast<audio_output_flags_t>(
+ AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ),
+ "low latency");
+}
diff --git a/services/audiopolicy/tests/resources/Android.bp b/services/audiopolicy/tests/resources/Android.bp
new file mode 100644
index 0000000..d9476d9
--- /dev/null
+++ b/services/audiopolicy/tests/resources/Android.bp
@@ -0,0 +1,8 @@
+filegroup {
+ name: "audiopolicytest_configuration_files",
+ srcs: [
+ "test_audio_policy_configuration.xml",
+ "test_audio_policy_primary_only_configuration.xml",
+ "test_tv_apm_configuration.xml",
+ ],
+}
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
new file mode 100644
index 0000000..87f0ab9
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_audio_policy_configuration.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+ <modules>
+ <!-- Primary module -->
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ <mixPort name="mixport_bt_hfp_output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="mixport_bt_hfp_input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,16000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ <devicePort tagName="Hdmi" type="AUDIO_DEVICE_OUT_HDMI" role="sink">
+ </devicePort>
+ <devicePort tagName="Hdmi-In Mic" type="AUDIO_DEVICE_IN_HDMI" role="source">
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO"
+ role="sink" address="hfp_client_out">
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET"
+ role="source" address="hfp_client_in">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Hdmi-In Mic"/>
+ <route type="mix" sink="Hdmi"
+ sources="primary output"/>
+ <route type="mix" sink="BT SCO"
+ sources="mixport_bt_hfp_output"/>
+ <route type="mix" sink="mixport_bt_hfp_input"
+ sources="BT SCO Headset Mic"/>
+ </routes>
+ </module>
+
+ <!-- Remote Submix module -->
+ <module name="r_submix" halVersion="2.0">
+ <attachedDevices>
+ <item>Remote Submix In</item>
+ </attachedDevices>
+ <mixPorts>
+ <mixPort name="r_submix output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="r_submix input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Remote Submix Out" type="AUDIO_DEVICE_OUT_REMOTE_SUBMIX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Remote Submix In" type="AUDIO_DEVICE_IN_REMOTE_SUBMIX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Remote Submix Out"
+ sources="r_submix output"/>
+ <route type="mix" sink="r_submix input"
+ sources="Remote Submix In"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml b/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml
new file mode 100644
index 0000000..edc0adb
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_audio_policy_primary_only_configuration.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+ <modules>
+ <!-- Primary module -->
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000"
+ channelMasks="AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink">
+ </devicePort>
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker"
+ sources="primary output"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml b/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml
new file mode 100644
index 0000000..f1638f3
--- /dev/null
+++ b/services/audiopolicy/tests/resources/test_tv_apm_configuration.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <globalConfiguration speaker_drc_enabled="false"/>
+ <modules>
+ <module name="primary" halVersion="2.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <!-- Profiles on the HDMI port are explicit for simplicity. In reality they are dynamic -->
+ <!-- Note: ports are intentionally arranged from more specific to less
+ specific in order to test b/140447125 for HW AV Sync, and similar "explicit matches" -->
+ <mixPort name="tunnel" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_HW_AV_SYNC">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="low latency" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_MMAP_NOIRQ">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="direct" role="source" flags="AUDIO_OUTPUT_FLAG_DIRECT">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="Speaker" type="AUDIO_DEVICE_OUT_SPEAKER" role="sink" />
+ <devicePort tagName="Out Aux Digital" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink" />
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="Speaker" sources="primary output"/>
+ <route type="mix" sink="Out Aux Digital" sources="primary output,tunnel,direct,low latency"/>
+ </routes>
+ </module>
+ </modules>
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/tests/systemaudio_tests.cpp b/services/audiopolicy/tests/systemaudio_tests.cpp
deleted file mode 100644
index abaae52..0000000
--- a/services/audiopolicy/tests/systemaudio_tests.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#define LOG_TAG "SysAudio_Test"
-#include <log/log.h>
-#include <media/PatchBuilder.h>
-#include <system/audio.h>
-
-using namespace android;
-
-TEST(SystemAudioTest, PatchInvalid) {
- audio_patch patch{};
- ASSERT_FALSE(audio_patch_is_valid(&patch));
- patch.num_sources = AUDIO_PATCH_PORTS_MAX + 1;
- patch.num_sinks = 1;
- ASSERT_FALSE(audio_patch_is_valid(&patch));
- patch.num_sources = 1;
- patch.num_sinks = AUDIO_PATCH_PORTS_MAX + 1;
- ASSERT_FALSE(audio_patch_is_valid(&patch));
- patch.num_sources = 0;
- patch.num_sinks = 1;
- ASSERT_FALSE(audio_patch_is_valid(&patch));
-}
-
-TEST(SystemAudioTest, PatchValid) {
- const audio_port_config src = {
- .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
- // It's OK not to have sinks.
- ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).patch()));
- const audio_port_config sink = {
- .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
- ASSERT_TRUE(audio_patch_is_valid((PatchBuilder{}).addSource(src).addSink(sink).patch()));
- ASSERT_TRUE(audio_patch_is_valid(
- (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
- ASSERT_TRUE(audio_patch_is_valid(
- (PatchBuilder{}).addSource(src).addSink(sink).addSink(sink).patch()));
- ASSERT_TRUE(audio_patch_is_valid(
- (PatchBuilder{}).addSource(src).addSource(src).
- addSink(sink).addSink(sink).patch()));
-}
-
-TEST(SystemAudioTest, PatchHwAvSync) {
- audio_port_config device_src_cfg = {
- .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
- device_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_src_cfg));
- device_src_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
- ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_src_cfg));
-
- audio_port_config device_sink_cfg = {
- .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
- device_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
- device_sink_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
- ASSERT_TRUE(audio_port_config_has_hw_av_sync(&device_sink_cfg));
-
- audio_port_config mix_sink_cfg = {
- .id = 1, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_MIX };
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
- mix_sink_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
- mix_sink_cfg.flags.input = AUDIO_INPUT_FLAG_HW_AV_SYNC;
- ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_sink_cfg));
-
- audio_port_config mix_src_cfg = {
- .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_MIX };
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
- mix_src_cfg.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- ASSERT_FALSE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
- mix_src_cfg.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
- ASSERT_TRUE(audio_port_config_has_hw_av_sync(&mix_src_cfg));
-}
-
-TEST(SystemAudioTest, PatchEqual) {
- const audio_patch patch1{}, patch2{};
- // Invalid patches are not equal.
- ASSERT_FALSE(audio_patches_are_equal(&patch1, &patch2));
- const audio_port_config src = {
- .id = 1, .role = AUDIO_PORT_ROLE_SOURCE, .type = AUDIO_PORT_TYPE_DEVICE };
- const audio_port_config sink = {
- .id = 2, .role = AUDIO_PORT_ROLE_SINK, .type = AUDIO_PORT_TYPE_DEVICE };
- ASSERT_FALSE(audio_patches_are_equal(
- (PatchBuilder{}).addSource(src).patch(),
- (PatchBuilder{}).addSource(src).addSink(sink).patch()));
- ASSERT_TRUE(audio_patches_are_equal(
- (PatchBuilder{}).addSource(src).addSink(sink).patch(),
- (PatchBuilder{}).addSource(src).addSink(sink).patch()));
- ASSERT_FALSE(audio_patches_are_equal(
- (PatchBuilder{}).addSource(src).addSink(sink).patch(),
- (PatchBuilder{}).addSource(src).addSource(src).addSink(sink).patch()));
- audio_port_config sink_hw_av_sync = sink;
- sink_hw_av_sync.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- sink_hw_av_sync.flags.output = AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
- ASSERT_FALSE(audio_patches_are_equal(
- (PatchBuilder{}).addSource(src).addSink(sink).patch(),
- (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
- ASSERT_TRUE(audio_patches_are_equal(
- (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch(),
- (PatchBuilder{}).addSource(src).addSink(sink_hw_av_sync).patch()));
-}
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 1c1f5e6..c50a3c6 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -69,6 +69,10 @@
"utils/LatencyHistogram.cpp",
],
+ header_libs: [
+ "libmediadrm_headers"
+ ],
+
shared_libs: [
"libbase",
"libdl",
@@ -86,10 +90,9 @@
"libfmq",
"libgui",
"libhardware",
- "libhwbinder",
"libhidlbase",
- "libhidltransport",
"libjpeg",
+ "libmedia_codeclist",
"libmedia_omx",
"libmemunreachable",
"libsensorprivacy",
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index ec3c7d5..55bc4fb 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -135,7 +135,9 @@
CameraService::CameraService() :
mEventLog(DEFAULT_EVENT_LOG_LENGTH),
mNumberOfCameras(0),
- mSoundRef(0), mInitialized(false) {
+ mNumberOfCamerasWithoutSystemCamera(0),
+ mSoundRef(0), mInitialized(false),
+ mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE) {
ALOGI("CameraService started (pid=%d)", getpid());
mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
}
@@ -158,17 +160,21 @@
mInitialized = true;
}
- CameraService::pingCameraServiceProxy();
-
mUidPolicy = new UidPolicy(this);
mUidPolicy->registerSelf();
mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
mSensorPrivacyPolicy->registerSelf();
+ mAppOps.setCameraAudioRestriction(mAudioRestriction);
sp<HidlCameraService> hcs = HidlCameraService::getInstance(this);
if (hcs->registerAsService() != android::OK) {
ALOGE("%s: Failed to register default android.frameworks.cameraservice.service@1.0",
__FUNCTION__);
}
+
+ // This needs to be last call in this function, so that it's as close to
+ // ServiceManager::addService() as possible.
+ CameraService::pingCameraServiceProxy();
+ ALOGI("CameraService pinged cameraservice proxy");
}
status_t CameraService::enumerateProviders() {
@@ -258,21 +264,62 @@
enumerateProviders();
}
+void CameraService::filterAPI1SystemCameraLocked(
+ const std::vector<std::string> &normalDeviceIds) {
+ mNormalDeviceIdsWithoutSystemCamera.clear();
+ for (auto &deviceId : normalDeviceIds) {
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(String8(deviceId.c_str()), &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, deviceId.c_str());
+ continue;
+ }
+ if (deviceKind == SystemCameraKind::SYSTEM_ONLY_CAMERA) {
+ // All system camera ids will necessarily come after public camera
+ // device ids as per the HAL interface contract.
+ break;
+ }
+ mNormalDeviceIdsWithoutSystemCamera.push_back(deviceId);
+ }
+ ALOGV("%s: number of API1 compatible public cameras is %zu", __FUNCTION__,
+ mNormalDeviceIdsWithoutSystemCamera.size());
+}
+
+status_t CameraService::getSystemCameraKind(const String8& cameraId, SystemCameraKind *kind) const {
+ auto state = getCameraState(cameraId);
+ if (state != nullptr) {
+ *kind = state->getSystemCameraKind();
+ return OK;
+ }
+ // Hidden physical camera ids won't have CameraState
+ return mCameraProviderManager->getSystemCameraKind(cameraId.c_str(), kind);
+}
+
void CameraService::updateCameraNumAndIds() {
Mutex::Autolock l(mServiceLock);
- mNumberOfCameras = mCameraProviderManager->getCameraCount();
+ std::pair<int, int> systemAndNonSystemCameras = mCameraProviderManager->getCameraCount();
+ // Excludes hidden secure cameras
+ mNumberOfCameras =
+ systemAndNonSystemCameras.first + systemAndNonSystemCameras.second;
+ mNumberOfCamerasWithoutSystemCamera = systemAndNonSystemCameras.second;
mNormalDeviceIds =
mCameraProviderManager->getAPI1CompatibleCameraDeviceIds();
+ filterAPI1SystemCameraLocked(mNormalDeviceIds);
}
void CameraService::addStates(const String8 id) {
std::string cameraId(id.c_str());
hardware::camera::common::V1_0::CameraResourceCost cost;
status_t res = mCameraProviderManager->getResourceCost(cameraId, &cost);
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
if (res != OK) {
ALOGE("Failed to query device resource cost: %s (%d)", strerror(-res), res);
return;
}
+ res = mCameraProviderManager->getSystemCameraKind(cameraId, &deviceKind);
+ if (res != OK) {
+ ALOGE("Failed to query device kind: %s (%d)", strerror(-res), res);
+ return;
+ }
std::set<String8> conflicting;
for (size_t i = 0; i < cost.conflictingDevices.size(); i++) {
conflicting.emplace(String8(cost.conflictingDevices[i].c_str()));
@@ -281,7 +328,7 @@
{
Mutex::Autolock lock(mCameraStatesLock);
mCameraStates.emplace(id, std::make_shared<CameraState>(id, cost.resourceCost,
- conflicting));
+ conflicting, deviceKind));
}
if (mFlashlight->hasFlashUnit(id)) {
@@ -444,15 +491,31 @@
broadcastTorchModeStatus(cameraId, newStatus);
}
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
+ return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
+ checkPermission(sCameraPermission, callingPid, callingUid);
+}
+
Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
ATRACE_CALL();
Mutex::Autolock l(mServiceLock);
+ bool hasSystemCameraPermissions =
+ hasPermissionsForSystemCamera(CameraThreadState::getCallingPid(),
+ CameraThreadState::getCallingUid());
switch (type) {
case CAMERA_TYPE_BACKWARD_COMPATIBLE:
- *numCameras = static_cast<int>(mNormalDeviceIds.size());
+ if (hasSystemCameraPermissions) {
+ *numCameras = static_cast<int>(mNormalDeviceIds.size());
+ } else {
+ *numCameras = static_cast<int>(mNormalDeviceIdsWithoutSystemCamera.size());
+ }
break;
case CAMERA_TYPE_ALL:
- *numCameras = mNumberOfCameras;
+ if (hasSystemCameraPermissions) {
+ *numCameras = mNumberOfCameras;
+ } else {
+ *numCameras = mNumberOfCamerasWithoutSystemCamera;
+ }
break;
default:
ALOGW("%s: Unknown camera type %d",
@@ -467,20 +530,31 @@
CameraInfo* cameraInfo) {
ATRACE_CALL();
Mutex::Autolock l(mServiceLock);
+ std::string cameraIdStr = cameraIdIntToStrLocked(cameraId);
+ if (shouldRejectSystemCameraConnection(String8(cameraIdStr.c_str()))) {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera"
+ "characteristics for system only device %s: ", cameraIdStr.c_str());
+ }
if (!mInitialized) {
return STATUS_ERROR(ERROR_DISCONNECTED,
"Camera subsystem is not available");
}
-
- if (cameraId < 0 || cameraId >= mNumberOfCameras) {
+ bool hasSystemCameraPermissions =
+ hasPermissionsForSystemCamera(CameraThreadState::getCallingPid(),
+ CameraThreadState::getCallingUid());
+ int cameraIdBound = mNumberOfCamerasWithoutSystemCamera;
+ if (hasSystemCameraPermissions) {
+ cameraIdBound = mNumberOfCameras;
+ }
+ if (cameraId < 0 || cameraId >= cameraIdBound) {
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
"CameraId is not valid");
}
Status ret = Status::ok();
status_t err = mCameraProviderManager->getCameraInfo(
- cameraIdIntToStrLocked(cameraId), cameraInfo);
+ cameraIdStr.c_str(), cameraInfo);
if (err != OK) {
ret = STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
"Error retrieving camera info from device %d: %s (%d)", cameraId,
@@ -491,13 +565,20 @@
}
std::string CameraService::cameraIdIntToStrLocked(int cameraIdInt) {
- if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(mNormalDeviceIds.size())) {
+ const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
+ auto callingPid = CameraThreadState::getCallingPid();
+ auto callingUid = CameraThreadState::getCallingUid();
+ if (checkPermission(sSystemCameraPermission, callingPid, callingUid) ||
+ getpid() == callingPid) {
+ deviceIds = &mNormalDeviceIds;
+ }
+ if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
ALOGE("%s: input id %d invalid: valid range (0, %zu)",
- __FUNCTION__, cameraIdInt, mNormalDeviceIds.size());
+ __FUNCTION__, cameraIdInt, deviceIds->size());
return std::string{};
}
- return mNormalDeviceIds[cameraIdInt];
+ return (*deviceIds)[cameraIdInt];
}
String8 CameraService::cameraIdIntToStr(int cameraIdInt) {
@@ -533,7 +614,12 @@
"characteristics for device %s: %s (%d)", String8(cameraId).string(),
strerror(-res), res);
}
-
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(String8(cameraId), &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, String8(cameraId).string());
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION, "Unable to retrieve camera kind "
+ "for device %s", String8(cameraId).string());
+ }
int callingPid = CameraThreadState::getCallingPid();
int callingUid = CameraThreadState::getCallingUid();
std::vector<int32_t> tagsRemoved;
@@ -541,7 +627,7 @@
// android.permission.CAMERA is required. If android.permission.SYSTEM_CAMERA was needed,
// it would've already been checked in shouldRejectSystemCameraConnection.
if ((callingPid != getpid()) &&
- (getSystemCameraKind(String8(cameraId)) != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
+ (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
!checkPermission(sCameraPermission, callingPid, callingUid)) {
res = cameraInfo->removePermissionEntries(
mCameraProviderManager->getProviderTagIdLocked(String8(cameraId).string()),
@@ -988,11 +1074,19 @@
return STATUS_ERROR_FMT(ERROR_DISCONNECTED, "No camera device with ID \"%s\" is"
"available", cameraId.string());
}
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(cameraId, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, cameraId.string());
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT, "No camera device with ID \"%s\""
+ "found while trying to query device kind", cameraId.string());
+
+ }
+
// If it's not calling from cameraserver, check the permission if the
// device isn't a system only camera (shouldRejectSystemCameraConnection already checks for
// android.permission.SYSTEM_CAMERA for system only camera devices).
if (callingPid != getpid() &&
- (getSystemCameraKind(cameraId) != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
+ (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
!checkPermission(sCameraPermission, clientPid, clientUid)) {
ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
@@ -1346,14 +1440,8 @@
return ret;
}
-static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
- return checkPermission(sSystemCameraPermission, callingPid, callingUid) &&
- checkPermission(sCameraPermission, callingPid, callingUid);
-}
-
-bool CameraService::shouldSkipStatusUpdates(const String8& cameraId, bool isVendorListener,
- int clientPid, int clientUid) const {
- SystemCameraKind systemCameraKind = getSystemCameraKind(cameraId);
+bool CameraService::shouldSkipStatusUpdates(SystemCameraKind systemCameraKind,
+ bool isVendorListener, int clientPid, int clientUid) {
// If the client is not a vendor client, don't add listener if
// a) the camera is a publicly hidden secure camera OR
// b) the camera is a system only camera and the client doesn't
@@ -1379,7 +1467,11 @@
int cPid = CameraThreadState::getCallingPid();
int cUid = CameraThreadState::getCallingUid();
- SystemCameraKind systemCameraKind = getSystemCameraKind(cameraId);
+ SystemCameraKind systemCameraKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(cameraId, &systemCameraKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, ", __FUNCTION__, cameraId.c_str());
+ return true;
+ }
// (1) Cameraserver trying to connect, accept.
if (CameraThreadState::getCallingPid() == getpid()) {
@@ -1870,14 +1962,24 @@
{
Mutex::Autolock lock(mCameraStatesLock);
for (auto& i : mCameraStates) {
- if (shouldSkipStatusUpdates(i.first, isVendorListener, clientPid, clientUid)) {
- ALOGV("Cannot add public listener for hidden system-only %s for pid %d",
- i.first.c_str(), CameraThreadState::getCallingPid());
- continue;
- }
cameraStatuses->emplace_back(i.first, mapToInterface(i.second->getStatus()));
}
}
+ // Remove the camera statuses that should be hidden from the client, we do
+ // this after collecting the states in order to avoid holding
+ // mCameraStatesLock and mInterfaceLock (held in getSystemCameraKind()) at
+ // the same time.
+ cameraStatuses->erase(std::remove_if(cameraStatuses->begin(), cameraStatuses->end(),
+ [this, &isVendorListener, &clientPid, &clientUid](const hardware::CameraStatus& s) {
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(s.cameraId, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping status update",
+ __FUNCTION__, s.cameraId.c_str());
+ return true;
+ }
+ return shouldSkipStatusUpdates(deviceKind, isVendorListener, clientPid,
+ clientUid);}), cameraStatuses->end());
+
/*
* Immediately signal current torch status to this listener only
@@ -2027,6 +2129,7 @@
mActiveClientManager.remove(i);
}
}
+ updateAudioRestrictionLocked();
}
bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) {
@@ -2400,6 +2503,7 @@
mClientPackageName(clientPackageName), mClientPid(clientPid), mClientUid(clientUid),
mServicePid(servicePid),
mDisconnected(false),
+ mAudioRestriction(hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE),
mRemoteBinder(remoteCallback)
{
if (sCameraService == nullptr) {
@@ -2503,6 +2607,35 @@
return level == API_2;
}
+status_t CameraService::BasicClient::setAudioRestriction(int32_t mode) {
+ {
+ Mutex::Autolock l(mAudioRestrictionLock);
+ mAudioRestriction = mode;
+ }
+ sCameraService->updateAudioRestriction();
+ return OK;
+}
+
+int32_t CameraService::BasicClient::getServiceAudioRestriction() const {
+ return sCameraService->updateAudioRestriction();
+}
+
+int32_t CameraService::BasicClient::getAudioRestriction() const {
+ Mutex::Autolock l(mAudioRestrictionLock);
+ return mAudioRestriction;
+}
+
+bool CameraService::BasicClient::isValidAudioRestriction(int32_t mode) {
+ switch (mode) {
+ case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_NONE:
+ case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_VIBRATION:
+ case hardware::camera2::ICameraDeviceUser::AUDIO_RESTRICTION_VIBRATION_SOUND:
+ return true;
+ default:
+ return false;
+ }
+}
+
status_t CameraService::BasicClient::startCameraOps() {
ATRACE_CALL();
@@ -2936,8 +3069,9 @@
// ----------------------------------------------------------------------------
CameraService::CameraState::CameraState(const String8& id, int cost,
- const std::set<String8>& conflicting) : mId(id),
- mStatus(StatusInternal::NOT_PRESENT), mCost(cost), mConflicting(conflicting) {}
+ const std::set<String8>& conflicting, SystemCameraKind systemCameraKind) : mId(id),
+ mStatus(StatusInternal::NOT_PRESENT), mCost(cost), mConflicting(conflicting),
+ mSystemCameraKind(systemCameraKind) {}
CameraService::CameraState::~CameraState() {}
@@ -2966,6 +3100,10 @@
return mId;
}
+SystemCameraKind CameraService::CameraState::getSystemCameraKind() const {
+ return mSystemCameraKind;
+}
+
// ----------------------------------------------------------------------------
// ClientEventListener
// ----------------------------------------------------------------------------
@@ -3118,6 +3256,8 @@
dprintf(fd, "\n== Service global info: ==\n\n");
dprintf(fd, "Number of camera devices: %d\n", mNumberOfCameras);
dprintf(fd, "Number of normal camera devices: %zu\n", mNormalDeviceIds.size());
+ dprintf(fd, "Number of public camera devices visible to API1: %zu\n",
+ mNormalDeviceIdsWithoutSystemCamera.size());
for (size_t i = 0; i < mNormalDeviceIds.size(); i++) {
dprintf(fd, " Device %zu maps to \"%s\"\n", i, mNormalDeviceIds[i].c_str());
}
@@ -3302,9 +3442,16 @@
return;
}
+ // Avoid calling getSystemCameraKind() with mStatusListenerLock held (b/141756275)
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKind(cameraId, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, cameraId.string());
+ return;
+ }
+
// Update the status for this camera state, then send the onStatusChangedCallbacks to each
// of the listeners with both the mStatusStatus and mStatusListenerLock held
- state->updateStatus(status, cameraId, rejectSourceStates, [this]
+ state->updateStatus(status, cameraId, rejectSourceStates, [this, &deviceKind]
(const String8& cameraId, StatusInternal status) {
if (status != StatusInternal::ENUMERATING) {
@@ -3326,7 +3473,7 @@
Mutex::Autolock lock(mStatusListenerLock);
for (auto& listener : mListenerList) {
- if (shouldSkipStatusUpdates(cameraId, listener->isVendorListener(),
+ if (shouldSkipStatusUpdates(deviceKind, listener->isVendorListener(),
listener->getListenerPid(), listener->getListenerUid())) {
ALOGV("Skipping camera discovery callback for system-only camera %s",
cameraId.c_str());
@@ -3532,4 +3679,25 @@
" help print this message\n");
}
+int32_t CameraService::updateAudioRestriction() {
+ Mutex::Autolock lock(mServiceLock);
+ return updateAudioRestrictionLocked();
+}
+
+int32_t CameraService::updateAudioRestrictionLocked() {
+ int32_t mode = 0;
+ // iterate through all active client
+ for (const auto& i : mActiveClientManager.getAll()) {
+ const auto clientSp = i->getValue();
+ mode |= clientSp->getAudioRestriction();
+ }
+
+ bool modeChanged = (mAudioRestriction != mode);
+ mAudioRestriction = mode;
+ if (modeChanged) {
+ mAppOps.setCameraAudioRestriction(mode);
+ }
+ return mode;
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4c179e7..829a3ee 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -258,6 +258,19 @@
// Block the client form using the camera
virtual void block();
+
+ // set audio restriction from client
+ // Will call into camera service and hold mServiceLock
+ virtual status_t setAudioRestriction(int32_t mode);
+
+ // Get current global audio restriction setting
+ // Will call into camera service and hold mServiceLock
+ virtual int32_t getServiceAudioRestriction() const;
+
+ // Get current audio restriction setting for this client
+ virtual int32_t getAudioRestriction() const;
+
+ static bool isValidAudioRestriction(int32_t mode);
protected:
BasicClient(const sp<CameraService>& cameraService,
const sp<IBinder>& remoteCallback,
@@ -286,6 +299,9 @@
const pid_t mServicePid;
bool mDisconnected;
+ mutable Mutex mAudioRestrictionLock;
+ int32_t mAudioRestriction;
+
// - The app-side Binder interface to receive callbacks from us
sp<IBinder> mRemoteBinder; // immutable after constructor
@@ -439,6 +455,9 @@
}; // class CameraClientManager
+ int32_t updateAudioRestriction();
+ int32_t updateAudioRestrictionLocked();
+
private:
typedef hardware::camera::common::V1_0::CameraDeviceStatus CameraDeviceStatus;
@@ -471,7 +490,8 @@
* Make a new CameraState and set the ID, cost, and conflicting devices using the values
* returned in the HAL's camera_info struct for each device.
*/
- CameraState(const String8& id, int cost, const std::set<String8>& conflicting);
+ CameraState(const String8& id, int cost, const std::set<String8>& conflicting,
+ SystemCameraKind deviceKind);
virtual ~CameraState();
/**
@@ -523,6 +543,11 @@
*/
String8 getId() const;
+ /**
+ * Return the kind (SystemCameraKind) of this camera device.
+ */
+ SystemCameraKind getSystemCameraKind() const;
+
private:
const String8 mId;
StatusInternal mStatus; // protected by mStatusLock
@@ -530,6 +555,7 @@
std::set<String8> mConflicting;
mutable Mutex mStatusLock;
CameraParameters mShimParams;
+ const SystemCameraKind mSystemCameraKind;
}; // class CameraState
// Observer for UID lifecycle enforcing that UIDs in idle
@@ -642,12 +668,21 @@
// Should a device status update be skipped for a particular camera device ? (this can happen
// under various conditions. For example if a camera device is advertised as
// system only or hidden secure camera, amongst possible others.
- bool shouldSkipStatusUpdates(const String8& cameraId, bool isVendorListener, int clientPid,
- int clientUid) const;
+ static bool shouldSkipStatusUpdates(SystemCameraKind systemCameraKind, bool isVendorListener,
+ int clientPid, int clientUid);
- inline SystemCameraKind getSystemCameraKind(const String8& cameraId) const {
- return mCameraProviderManager->getSystemCameraKind(cameraId.c_str());
- }
+ // Gets the kind of camera device (i.e public, hidden secure or system only)
+ // getSystemCameraKind() needs mInterfaceMutex which might lead to deadlocks
+ // if held along with mStatusListenerLock (depending on lock ordering, b/141756275), it is
+ // recommended that we don't call this function with mStatusListenerLock held.
+ status_t getSystemCameraKind(const String8& cameraId, SystemCameraKind *kind) const;
+
+ // Update the set of API1Compatible camera devices without including system
+ // cameras and secure cameras. This is used for hiding system only cameras
+ // from clients using camera1 api and not having android.permission.SYSTEM_CAMERA.
+ // This function expects @param normalDeviceIds, to have normalDeviceIds
+ // sorted in alpha-numeric order.
+ void filterAPI1SystemCameraLocked(const std::vector<std::string> &normalDeviceIds);
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
@@ -803,9 +838,14 @@
*/
void updateCameraNumAndIds();
+ // Number of camera devices (excluding hidden secure cameras)
int mNumberOfCameras;
+ // Number of camera devices (excluding hidden secure cameras and
+ // system cameras)
+ int mNumberOfCamerasWithoutSystemCamera;
std::vector<std::string> mNormalDeviceIds;
+ std::vector<std::string> mNormalDeviceIdsWithoutSystemCamera;
// sounds
sp<MediaPlayer> newMediaPlayer(const char *file);
@@ -967,6 +1007,13 @@
void broadcastTorchModeStatus(const String8& cameraId,
hardware::camera::common::V1_0::TorchModeStatus status);
+
+ // TODO: right now each BasicClient holds one AppOpsManager instance.
+ // We can refactor the code so all of clients share this instance
+ AppOpsManager mAppOps;
+
+ // Aggreated audio restriction mode for all camera clients
+ int32_t mAudioRestriction;
};
} // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 162b50f..c273881 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -959,6 +959,11 @@
case Parameters::RECORD:
case Parameters::PREVIEW:
syncWithDevice();
+ // Due to flush a camera device sync is not a sufficient
+ // guarantee that the current client parameters are
+ // correctly applied. To resolve this wait for the current
+ // request id to return in the results.
+ waitUntilCurrentRequestIdLocked();
res = stopStream();
if (res != OK) {
ALOGE("%s: Camera %d: Can't stop streaming: %s (%d)",
@@ -2253,6 +2258,58 @@
return OK;
}
+status_t Camera2Client::setAudioRestriction(int /*mode*/) {
+ // Empty implementation. setAudioRestriction is hidden interface and not
+ // supported by android.hardware.Camera API
+ return INVALID_OPERATION;
+}
+
+int32_t Camera2Client::getGlobalAudioRestriction() {
+ // Empty implementation. getAudioRestriction is hidden interface and not
+ // supported by android.hardware.Camera API
+ return INVALID_OPERATION;
+}
+
+status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
+ int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
+ if (activeRequestId != 0) {
+ auto res = waitUntilRequestIdApplied(activeRequestId,
+ mDevice->getExpectedInFlightDuration());
+ if (res == TIMED_OUT) {
+ ALOGE("%s: Camera %d: Timed out waiting for current request id to return in results!",
+ __FUNCTION__, mCameraId);
+ return res;
+ } else if (res != OK) {
+ ALOGE("%s: Camera %d: Error while waiting for current request id to return in results!",
+ __FUNCTION__, mCameraId);
+ return res;
+ }
+ }
+
+ return OK;
+}
+
+status_t Camera2Client::waitUntilRequestIdApplied(int32_t requestId, nsecs_t timeout) {
+ Mutex::Autolock l(mLatestRequestMutex);
+ while (mLatestRequestId != requestId) {
+ nsecs_t startTime = systemTime();
+
+ auto res = mLatestRequestSignal.waitRelative(mLatestRequestMutex, timeout);
+ if (res != OK) return res;
+
+ timeout -= (systemTime() - startTime);
+ }
+
+ return OK;
+}
+
+void Camera2Client::notifyRequestId(int32_t requestId) {
+ Mutex::Autolock al(mLatestRequestMutex);
+
+ mLatestRequestId = requestId;
+ mLatestRequestSignal.signal();
+}
+
const char* Camera2Client::kAutofocusLabel = "autofocus";
const char* Camera2Client::kTakepictureLabel = "take_picture";
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index a9ea271..8a17b17 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -83,6 +83,8 @@
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
virtual status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
+ virtual status_t setAudioRestriction(int mode);
+ virtual int32_t getGlobalAudioRestriction();
/**
* Interface used by CameraService
@@ -122,6 +124,8 @@
camera2::SharedParameters& getParameters();
+ void notifyRequestId(int32_t requestId);
+
int getPreviewStreamId() const;
int getCaptureStreamId() const;
int getCallbackStreamId() const;
@@ -227,6 +231,12 @@
status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
bool isZslEnabledInStillTemplate();
+
+ mutable Mutex mLatestRequestMutex;
+ Condition mLatestRequestSignal;
+ int32_t mLatestRequestId = -1;
+ status_t waitUntilRequestIdApplied(int32_t requestId, nsecs_t timeout);
+ status_t waitUntilCurrentRequestIdLocked();
};
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index d65ac7b..388a5dc 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -534,7 +534,7 @@
}
if (mHardware != nullptr) {
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
metadata->eType = kMetadataBufferTypeNativeHandleSource;
metadata->pHandle = handle;
mHardware->releaseRecordingFrame(dataPtr);
@@ -573,7 +573,7 @@
}
if (!disconnected) {
- VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
metadata->eType = kMetadataBufferTypeNativeHandleSource;
metadata->pHandle = handle;
frames.push_back(dataPtr);
@@ -916,8 +916,12 @@
ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
return;
}
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(msg.dataPtr->pointer());
+ (VideoNativeHandleMetadata*)(msg.dataPtr->unsecurePointer());
if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
handle = metadata->pHandle;
}
@@ -1073,8 +1077,12 @@
// Check if dataPtr contains a VideoNativeHandleMetadata.
if (dataPtr->size() == sizeof(VideoNativeHandleMetadata)) {
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
VideoNativeHandleMetadata *metadata =
- (VideoNativeHandleMetadata*)(dataPtr->pointer());
+ (VideoNativeHandleMetadata*)(dataPtr->unsecurePointer());
if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
handle = metadata->pHandle;
}
@@ -1171,4 +1179,25 @@
return INVALID_OPERATION;
}
+status_t CameraClient::setAudioRestriction(int mode) {
+ if (!isValidAudioRestriction(mode)) {
+ ALOGE("%s: invalid audio restriction mode %d", __FUNCTION__, mode);
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock lock(mLock);
+ if (checkPidAndHardware() != NO_ERROR) {
+ return INVALID_OPERATION;
+ }
+ return BasicClient::setAudioRestriction(mode);
+}
+
+int32_t CameraClient::getGlobalAudioRestriction() {
+ Mutex::Autolock lock(mLock);
+ if (checkPidAndHardware() != NO_ERROR) {
+ return INVALID_OPERATION;
+ }
+ return BasicClient::getServiceAudioRestriction();
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 9530b6c..b26b612 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -59,6 +59,8 @@
virtual String8 getParameters() const;
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
virtual status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
+ virtual status_t setAudioRestriction(int mode);
+ virtual int32_t getGlobalAudioRestriction();
// Interface used by CameraService
CameraClient(const sp<CameraService>& cameraService,
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 683e84d..63e293a 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -86,6 +86,12 @@
process3aState(frame, client);
}
+ if (mCurrentRequestId != frame.mResultExtras.requestId) {
+ mCurrentRequestId = frame.mResultExtras.requestId;
+
+ client->notifyRequestId(mCurrentRequestId);
+ }
+
return FrameProcessorBase::processSingleFrame(frame, device);
}
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 8183c12..142b8cd 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -94,6 +94,7 @@
};
AlgState m3aState;
+ int32_t mCurrentRequestId = -1;
// frame number -> pending 3A states that not all data are received yet.
KeyedVector<int32_t, AlgState> mPending3AStates;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index c7a4f2b..08fb153 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1870,6 +1870,34 @@
return res;
}
+binder::Status CameraDeviceClient::setCameraAudioRestriction(int32_t mode) {
+ ATRACE_CALL();
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+ if (!isValidAudioRestriction(mode)) {
+ String8 msg = String8::format("Camera %s: invalid audio restriction mode %d",
+ mCameraIdStr.string(), mode);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+ BasicClient::setAudioRestriction(mode);
+ return binder::Status::ok();
+}
+
+binder::Status CameraDeviceClient::getGlobalAudioRestriction(/*out*/ int32_t* outMode) {
+ ATRACE_CALL();
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+ Mutex::Autolock icl(mBinderSerializationLock);
+ if (outMode != nullptr) {
+ *outMode = BasicClient::getServiceAudioRestriction();
+ }
+ return binder::Status::ok();
+}
+
status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
return BasicClient::dump(fd, args);
}
@@ -2004,6 +2032,15 @@
}
}
+ for (size_t i = 0; i < mCompositeStreamMap.size(); i++) {
+ auto ret = mCompositeStreamMap.valueAt(i)->deleteInternalStreams();
+ if (ret != OK) {
+ ALOGE("%s: Failed removing composite stream %s (%d)", __FUNCTION__,
+ strerror(-ret), ret);
+ }
+ }
+ mCompositeStreamMap.clear();
+
Camera2ClientBase::detachDevice();
}
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 1c5abb0..fe25010 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -152,6 +152,10 @@
virtual binder::Status finalizeOutputConfigurations(int32_t streamId,
const hardware::camera2::params::OutputConfiguration &outputConfiguration) override;
+ virtual binder::Status setCameraAudioRestriction(int32_t mode) override;
+
+ virtual binder::Status getGlobalAudioRestriction(/*out*/int32_t* outMode) override;
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index 8ebaa2b..0b91016 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -247,7 +247,7 @@
return ret;
}
-status_t DepthCompositeStream::processInputFrame(const InputFrame &inputFrame) {
+status_t DepthCompositeStream::processInputFrame(nsecs_t ts, const InputFrame &inputFrame) {
status_t res;
sp<ANativeWindow> outputANW = mOutputSurface;
ANativeWindowBuffer *anb;
@@ -370,6 +370,13 @@
return NO_MEMORY;
}
+ res = native_window_set_buffers_timestamp(mOutputSurface.get(), ts);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)", __FUNCTION__,
+ getStreamId(), strerror(-res), res);
+ return res;
+ }
+
ALOGV("%s: Final jpeg size: %zu", __func__, finalJpegSize);
uint8_t* header = static_cast<uint8_t *> (dstBuffer) +
(gb->getWidth() - sizeof(struct camera3_jpeg_blob));
@@ -459,7 +466,7 @@
}
}
- auto res = processInputFrame(mPendingInputFrames[currentTs]);
+ auto res = processInputFrame(currentTs, mPendingInputFrames[currentTs]);
Mutex::Autolock l(mMutex);
if (res != OK) {
ALOGE("%s: Failed processing frame with timestamp: %" PRIu64 ": %s (%d)", __FUNCTION__,
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.h b/services/camera/libcameraservice/api2/DepthCompositeStream.h
index 975c59b..28a7826 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.h
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.h
@@ -97,7 +97,7 @@
size_t maxJpegSize, uint8_t jpegQuality,
std::vector<std::unique_ptr<Item>>* items /*out*/);
std::unique_ptr<ImagingModel> getImagingModel();
- status_t processInputFrame(const InputFrame &inputFrame);
+ status_t processInputFrame(nsecs_t ts, const InputFrame &inputFrame);
// Buffer/Results handling
void compilePendingInputLocked();
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 5a87134..26459f9 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -28,10 +28,9 @@
#include <utils/Log.h>
#include <utils/Trace.h>
-#include <media/ICrypto.h>
+#include <mediadrm/ICrypto.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/MediaDefs.h>
#include <media/stagefright/MediaCodecConstants.h>
@@ -61,12 +60,13 @@
mUseGrid(false),
mAppSegmentStreamId(-1),
mAppSegmentSurfaceId(-1),
- mAppSegmentBufferAcquired(false),
mMainImageStreamId(-1),
mMainImageSurfaceId(-1),
mYuvBufferAcquired(false),
mProducerListener(new ProducerListener()),
- mOutputBufferCounter(0),
+ mDequeuedOutputBufferCnt(0),
+ mLockedAppSegmentBufferCnt(0),
+ mCodecOutputCounter(0),
mGridTimestampUs(0) {
}
@@ -132,7 +132,7 @@
sp<IGraphicBufferProducer> producer;
sp<IGraphicBufferConsumer> consumer;
BufferQueue::createBufferQueue(&producer, &consumer);
- mAppSegmentConsumer = new CpuConsumer(consumer, 1);
+ mAppSegmentConsumer = new CpuConsumer(consumer, kMaxAcquiredAppSegment);
mAppSegmentConsumer->setFrameAvailableListener(this);
mAppSegmentConsumer->setName(String8("Camera3-HeicComposite-AppSegmentStream"));
mAppSegmentSurface = new Surface(producer);
@@ -231,6 +231,8 @@
if (bufferInfo.mError) return;
mCodecOutputBufferTimestamps.push(bufferInfo.mTimestamp);
+ ALOGV("%s: [%" PRId64 "]: Adding codecOutputBufferTimestamp (%zu timestamps in total)",
+ __FUNCTION__, bufferInfo.mTimestamp, mCodecOutputBufferTimestamps.size());
}
// We need to get the settings early to handle the case where the codec output
@@ -361,6 +363,8 @@
mCodecOutputBuffers.push_back(outputBufferInfo);
mInputReadyCondition.signal();
} else {
+ ALOGV("%s: Releasing output buffer: size %d flags: 0x%x ", __FUNCTION__,
+ outputBufferInfo.size, outputBufferInfo.flags);
mCodec->releaseOutputBuffer(outputBufferInfo.index);
}
} else {
@@ -414,8 +418,10 @@
mNumOutputTiles = 1;
}
- ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
mFormat = newFormat;
+
+ ALOGV("%s: mNumOutputTiles is %zu", __FUNCTION__, mNumOutputTiles);
+ mInputReadyCondition.signal();
}
void HeicCompositeStream::onHeicCodecError() {
@@ -459,9 +465,8 @@
// Cannot use SourceSurface buffer count since it could be codec's 512*512 tile
// buffer count.
- int maxProducerBuffers = 1;
if ((res = native_window_set_buffer_count(
- anwConsumer, maxProducerBuffers + maxConsumerBuffers)) != OK) {
+ anwConsumer, kMaxOutputSurfaceProducerCount + maxConsumerBuffers)) != OK) {
ALOGE("%s: Unable to set buffer count for stream %d", __FUNCTION__, mMainImageStreamId);
return res;
}
@@ -505,6 +510,8 @@
}
if (mSettingsByFrameNumber.find(resultExtras.frameNumber) != mSettingsByFrameNumber.end()) {
+ ALOGV("%s: [%" PRId64 "]: frameNumber %" PRId64, __FUNCTION__,
+ timestamp, resultExtras.frameNumber);
mFrameNumberMap.emplace(resultExtras.frameNumber, timestamp);
mSettingsByTimestamp[timestamp] = mSettingsByFrameNumber[resultExtras.frameNumber];
mSettingsByFrameNumber.erase(resultExtras.frameNumber);
@@ -520,12 +527,12 @@
mSettingsByTimestamp.erase(it);
}
- while (!mInputAppSegmentBuffers.empty() && !mAppSegmentBufferAcquired) {
+ while (!mInputAppSegmentBuffers.empty()) {
CpuConsumer::LockedBuffer imgBuffer;
auto it = mInputAppSegmentBuffers.begin();
auto res = mAppSegmentConsumer->lockNextBuffer(&imgBuffer);
if (res == NOT_ENOUGH_DATA) {
- // Canot not lock any more buffers.
+ // Can not lock any more buffers.
break;
} else if ((res != OK) || (*it != imgBuffer.timestamp)) {
if (res != OK) {
@@ -535,6 +542,7 @@
ALOGE("%s: Expecting JPEG_APP_SEGMENTS buffer with time stamp: %" PRId64
" received buffer with time stamp: %" PRId64, __FUNCTION__,
*it, imgBuffer.timestamp);
+ mAppSegmentConsumer->unlockBuffer(imgBuffer);
}
mPendingInputFrames[*it].error = true;
mInputAppSegmentBuffers.erase(it);
@@ -546,7 +554,7 @@
mAppSegmentConsumer->unlockBuffer(imgBuffer);
} else {
mPendingInputFrames[imgBuffer.timestamp].appSegmentBuffer = imgBuffer;
- mAppSegmentBufferAcquired = true;
+ mLockedAppSegmentBufferCnt++;
}
mInputAppSegmentBuffers.erase(it);
}
@@ -556,7 +564,7 @@
auto it = mInputYuvBuffers.begin();
auto res = mMainImageConsumer->lockNextBuffer(&imgBuffer);
if (res == NOT_ENOUGH_DATA) {
- // Canot not lock any more buffers.
+ // Can not lock any more buffers.
break;
} else if (res != OK) {
ALOGE("%s: Error locking YUV_888 image buffer: %s (%d)", __FUNCTION__,
@@ -593,13 +601,15 @@
} else {
// Direct mapping between camera timestamp (in ns) and codec timestamp (in us).
bufferTime = mCodecOutputBufferTimestamps.front();
- mOutputBufferCounter++;
- if (mOutputBufferCounter == mNumOutputTiles) {
+ mCodecOutputCounter++;
+ if (mCodecOutputCounter == mNumOutputTiles) {
mCodecOutputBufferTimestamps.pop();
- mOutputBufferCounter = 0;
+ mCodecOutputCounter = 0;
}
mPendingInputFrames[bufferTime].codecOutputBuffers.push_back(*it);
+ ALOGV("%s: [%" PRId64 "]: Pushing codecOutputBuffers (time %" PRId64 " us)",
+ __FUNCTION__, bufferTime, it->timeUs);
}
mCodecOutputBuffers.erase(it);
}
@@ -607,6 +617,7 @@
while (!mFrameNumberMap.empty()) {
auto it = mFrameNumberMap.begin();
mPendingInputFrames[it->second].frameNumber = it->first;
+ ALOGV("%s: [%" PRId64 "]: frameNumber is %" PRId64, __FUNCTION__, it->second, it->first);
mFrameNumberMap.erase(it);
}
@@ -675,16 +686,29 @@
}
bool newInputAvailable = false;
- for (const auto& it : mPendingInputFrames) {
+ for (auto& it : mPendingInputFrames) {
+ // New input is considered to be available only if:
+ // 1. input buffers are ready, or
+ // 2. App segment and muxer is created, or
+ // 3. A codec output tile is ready, and an output buffer is available.
+ // This makes sure that muxer gets created only when an output tile is
+ // generated, because right now we only handle 1 HEIC output buffer at a
+ // time (max dequeued buffer count is 1).
bool appSegmentReady = (it.second.appSegmentBuffer.data != nullptr) &&
- !it.second.appSegmentWritten && it.second.result != nullptr;
+ !it.second.appSegmentWritten && it.second.result != nullptr &&
+ it.second.muxer != nullptr;
bool codecOutputReady = !it.second.codecOutputBuffers.empty();
bool codecInputReady = (it.second.yuvBuffer.data != nullptr) &&
(!it.second.codecInputBuffers.empty());
+ bool hasOutputBuffer = it.second.muxer != nullptr ||
+ (mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
if ((!it.second.error) &&
(it.first < *currentTs) &&
- (appSegmentReady || codecOutputReady || codecInputReady)) {
+ (appSegmentReady || (codecOutputReady && hasOutputBuffer) || codecInputReady)) {
*currentTs = it.first;
+ if (it.second.format == nullptr && mFormat != nullptr) {
+ it.second.format = mFormat->dup();
+ }
newInputAvailable = true;
break;
}
@@ -716,15 +740,17 @@
status_t res = OK;
bool appSegmentReady = inputFrame.appSegmentBuffer.data != nullptr &&
- !inputFrame.appSegmentWritten && inputFrame.result != nullptr;
+ !inputFrame.appSegmentWritten && inputFrame.result != nullptr &&
+ inputFrame.muxer != nullptr;
bool codecOutputReady = inputFrame.codecOutputBuffers.size() > 0;
bool codecInputReady = inputFrame.yuvBuffer.data != nullptr &&
- !inputFrame.codecInputBuffers.empty();
+ !inputFrame.codecInputBuffers.empty();
+ bool hasOutputBuffer = inputFrame.muxer != nullptr ||
+ (mDequeuedOutputBufferCnt < kMaxOutputSurfaceProducerCount);
- if (!appSegmentReady && !codecOutputReady && !codecInputReady) {
- ALOGW("%s: No valid appSegmentBuffer/codec input/outputBuffer available!", __FUNCTION__);
- return OK;
- }
+ ALOGV("%s: [%" PRId64 "]: appSegmentReady %d, codecOutputReady %d, codecInputReady %d,"
+ " dequeuedOutputBuffer %d", __FUNCTION__, timestamp, appSegmentReady,
+ codecOutputReady, codecInputReady, mDequeuedOutputBufferCnt);
// Handle inputs for Hevc tiling
if (codecInputReady) {
@@ -736,7 +762,13 @@
}
}
- // Initialize and start muxer if not yet done so
+ if (!(codecOutputReady && hasOutputBuffer) && !appSegmentReady) {
+ return OK;
+ }
+
+ // Initialize and start muxer if not yet done so. In this case,
+ // codecOutputReady must be true. Otherwise, appSegmentReady is guaranteed
+ // to be false, and the function must have returned early.
if (inputFrame.muxer == nullptr) {
res = startMuxerForInputFrame(timestamp, inputFrame);
if (res != OK) {
@@ -747,7 +779,7 @@
}
// Write JPEG APP segments data to the muxer.
- if (appSegmentReady && inputFrame.muxer != nullptr) {
+ if (appSegmentReady) {
res = processAppSegment(timestamp, inputFrame);
if (res != OK) {
ALOGE("%s: Failed to process JPEG APP segments: %s (%d)", __FUNCTION__,
@@ -766,12 +798,18 @@
}
}
- if (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0) {
- res = processCompletedInputFrame(timestamp, inputFrame);
- if (res != OK) {
- ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
- strerror(-res), res);
- return res;
+ if (inputFrame.pendingOutputTiles == 0) {
+ if (inputFrame.appSegmentWritten) {
+ res = processCompletedInputFrame(timestamp, inputFrame);
+ if (res != OK) {
+ ALOGE("%s: Failed to process completed input frame: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ return res;
+ }
+ } else if (mLockedAppSegmentBufferCnt == kMaxAcquiredAppSegment) {
+ ALOGE("%s: Out-of-order app segment buffers reaches limit %u", __FUNCTION__,
+ kMaxAcquiredAppSegment);
+ return INVALID_OPERATION;
}
}
@@ -780,11 +818,6 @@
status_t HeicCompositeStream::startMuxerForInputFrame(nsecs_t timestamp, InputFrame &inputFrame) {
sp<ANativeWindow> outputANW = mOutputSurface;
- if (inputFrame.codecOutputBuffers.size() == 0) {
- // No single codec output buffer has been generated. Continue to
- // wait.
- return OK;
- }
auto res = outputANW->dequeueBuffer(mOutputSurface.get(), &inputFrame.anb, &inputFrame.fenceFd);
if (res != OK) {
@@ -792,6 +825,7 @@
res);
return res;
}
+ mDequeuedOutputBufferCnt++;
// Combine current thread id, stream id and timestamp to uniquely identify image.
std::ostringstream tempOutputFile;
@@ -828,7 +862,7 @@
}
}
- ssize_t trackId = inputFrame.muxer->addTrack(mFormat);
+ ssize_t trackId = inputFrame.muxer->addTrack(inputFrame.format);
if (trackId < 0) {
ALOGE("%s: Failed to addTrack to the muxer: %zd", __FUNCTION__, trackId);
return NO_INIT;
@@ -844,6 +878,8 @@
return res;
}
+ ALOGV("%s: [%" PRId64 "]: Muxer started for inputFrame", __FUNCTION__,
+ timestamp);
return OK;
}
@@ -852,9 +888,6 @@
auto appSegmentSize = findAppSegmentsSize(inputFrame.appSegmentBuffer.data,
inputFrame.appSegmentBuffer.width * inputFrame.appSegmentBuffer.height,
&app1Size);
- ALOGV("%s: appSegmentSize is %zu, width %d, height %d, app1Size %zu", __FUNCTION__,
- appSegmentSize, inputFrame.appSegmentBuffer.width,
- inputFrame.appSegmentBuffer.height, app1Size);
if (appSegmentSize == 0) {
ALOGE("%s: Failed to find JPEG APP segment size", __FUNCTION__);
return NO_INIT;
@@ -910,7 +943,16 @@
__FUNCTION__, strerror(-res), res);
return res;
}
+
+ ALOGV("%s: [%" PRId64 "]: appSegmentSize is %zu, width %d, height %d, app1Size %zu",
+ __FUNCTION__, timestamp, appSegmentSize, inputFrame.appSegmentBuffer.width,
+ inputFrame.appSegmentBuffer.height, app1Size);
+
inputFrame.appSegmentWritten = true;
+ // Release the buffer now so any pending input app segments can be processed
+ mAppSegmentConsumer->unlockBuffer(inputFrame.appSegmentBuffer);
+ inputFrame.appSegmentBuffer.data = nullptr;
+ mLockedAppSegmentBufferCnt--;
return OK;
}
@@ -934,8 +976,9 @@
mOutputWidth - tileX * mGridWidth : mGridWidth;
size_t height = (tileY == static_cast<size_t>(mGridRows) - 1) ?
mOutputHeight - tileY * mGridHeight : mGridHeight;
- ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu",
- __FUNCTION__, tileX, tileY, top, left, width, height);
+ ALOGV("%s: inputBuffer tileIndex [%zu, %zu], top %zu, left %zu, width %zu, height %zu,"
+ " timeUs %" PRId64, __FUNCTION__, tileX, tileY, top, left, width, height,
+ inputBuffer.timeUs);
res = copyOneYuvTile(buffer, inputFrame.yuvBuffer, top, left, width, height);
if (res != OK) {
@@ -990,6 +1033,9 @@
}
inputFrame.codecOutputBuffers.erase(inputFrame.codecOutputBuffers.begin());
+
+ ALOGV("%s: [%" PRId64 "]: Output buffer index %d",
+ __FUNCTION__, timestamp, it->index);
return OK;
}
@@ -1046,7 +1092,9 @@
return res;
}
inputFrame.anb = nullptr;
+ mDequeuedOutputBufferCnt--;
+ ALOGV("%s: [%" PRId64 "]", __FUNCTION__, timestamp);
ATRACE_ASYNC_END("HEIC capture", inputFrame.frameNumber);
return OK;
}
@@ -1060,7 +1108,6 @@
if (inputFrame->appSegmentBuffer.data != nullptr) {
mAppSegmentConsumer->unlockBuffer(inputFrame->appSegmentBuffer);
inputFrame->appSegmentBuffer.data = nullptr;
- mAppSegmentBufferAcquired = false;
}
while (!inputFrame->codecOutputBuffers.empty()) {
@@ -1098,11 +1145,13 @@
}
}
-void HeicCompositeStream::releaseInputFramesLocked(int64_t currentTs) {
+void HeicCompositeStream::releaseInputFramesLocked() {
auto it = mPendingInputFrames.begin();
while (it != mPendingInputFrames.end()) {
- if (it->first <= currentTs) {
- releaseInputFrameLocked(&it->second);
+ auto& inputFrame = it->second;
+ if (inputFrame.error ||
+ (inputFrame.appSegmentWritten && inputFrame.pendingOutputTiles == 0)) {
+ releaseInputFrameLocked(&inputFrame);
it = mPendingInputFrames.erase(it);
} else {
it++;
@@ -1210,7 +1259,7 @@
outputFormat->setInt32(KEY_I_FRAME_INTERVAL, 0);
outputFormat->setInt32(KEY_COLOR_FORMAT,
useGrid ? COLOR_FormatYUV420Flexible : COLOR_FormatSurface);
- outputFormat->setInt32(KEY_FRAME_RATE, gridRows * gridCols);
+ outputFormat->setInt32(KEY_FRAME_RATE, useGrid ? gridRows * gridCols : kNoGridOpRate);
// This only serves as a hint to encoder when encoding is not real-time.
outputFormat->setInt32(KEY_OPERATING_RATE, useGrid ? kGridOpRate : kNoGridOpRate);
@@ -1506,7 +1555,7 @@
// In case we landed in error state, return any pending buffers and
// halt all further processing.
compilePendingInputLocked();
- releaseInputFramesLocked(currentTs);
+ releaseInputFramesLocked();
return false;
}
@@ -1548,11 +1597,7 @@
mPendingInputFrames[currentTs].error = true;
}
- if (mPendingInputFrames[currentTs].error ||
- (mPendingInputFrames[currentTs].appSegmentWritten &&
- mPendingInputFrames[currentTs].pendingOutputTiles == 0)) {
- releaseInputFramesLocked(currentTs);
- }
+ releaseInputFramesLocked();
return true;
}
@@ -1671,8 +1716,13 @@
ALOGE("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
break;
}
-
- parent->onHeicFormatChanged(format);
+ // Here format is MediaCodec's internal copy of output format.
+ // Make a copy since onHeicFormatChanged() might modify it.
+ sp<AMessage> formatCopy;
+ if (format != nullptr) {
+ formatCopy = format->dup();
+ }
+ parent->onHeicFormatChanged(formatCopy);
break;
}
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.h b/services/camera/libcameraservice/api2/HeicCompositeStream.h
index 260c68e..04e7b83 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.h
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.h
@@ -25,6 +25,7 @@
#include <media/hardware/VideoAPI.h>
#include <media/MediaCodecBuffer.h>
#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaMuxer.h>
@@ -157,6 +158,7 @@
bool errorNotified;
int64_t frameNumber;
+ sp<AMessage> format;
sp<MediaMuxer> muxer;
int fenceFd;
int fileFd;
@@ -187,7 +189,7 @@
status_t processCompletedInputFrame(nsecs_t timestamp, InputFrame &inputFrame);
void releaseInputFrameLocked(InputFrame *inputFrame /*out*/);
- void releaseInputFramesLocked(int64_t currentTs);
+ void releaseInputFramesLocked();
size_t findAppSegmentsSize(const uint8_t* appSegmentBuffer, size_t maxSize,
size_t* app1SegmentSize);
@@ -205,11 +207,13 @@
static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS);
static const android_dataspace kHeifDataSpace =
static_cast<android_dataspace>(HAL_DATASPACE_HEIF);
+ // Use the limit of pipeline depth in the API sepc as maximum number of acquired
+ // app segment buffers.
+ static const uint32_t kMaxAcquiredAppSegment = 8;
int mAppSegmentStreamId, mAppSegmentSurfaceId;
sp<CpuConsumer> mAppSegmentConsumer;
sp<Surface> mAppSegmentSurface;
- bool mAppSegmentBufferAcquired;
size_t mAppSegmentMaxSize;
CameraMetadata mStaticInfo;
@@ -218,9 +222,10 @@
sp<CpuConsumer> mMainImageConsumer; // Only applicable for HEVC codec.
bool mYuvBufferAcquired; // Only applicable to HEVC codec
+ static const int32_t kMaxOutputSurfaceProducerCount = 1;
sp<Surface> mOutputSurface;
sp<ProducerListener> mProducerListener;
-
+ int32_t mDequeuedOutputBufferCnt;
// Map from frame number to JPEG setting of orientation+quality
std::map<int64_t, std::pair<int32_t, int32_t>> mSettingsByFrameNumber;
@@ -229,11 +234,12 @@
// Keep all incoming APP segment Blob buffer pending further processing.
std::vector<int64_t> mInputAppSegmentBuffers;
+ int32_t mLockedAppSegmentBufferCnt;
// Keep all incoming HEIC blob buffer pending further processing.
std::vector<CodecOutputBufferInfo> mCodecOutputBuffers;
std::queue<int64_t> mCodecOutputBufferTimestamps;
- size_t mOutputBufferCounter;
+ size_t mCodecOutputCounter;
// Keep all incoming Yuv buffer pending tiling and encoding (for HEVC YUV tiling only)
std::vector<int64_t> mInputYuvBuffers;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 98c1b5e..935bc37 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -383,6 +383,12 @@
* drop buffers for stream of streamId.
*/
virtual status_t dropStreamBuffers(bool /*dropping*/, int /*streamId*/) = 0;
+
+ /**
+ * Returns the maximum expected time it'll take for all currently in-flight
+ * requests to complete, based on their settings
+ */
+ virtual nsecs_t getExpectedInFlightDuration() = 0;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index c21bd69..7ed4c3d 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -104,13 +104,30 @@
return OK;
}
-int CameraProviderManager::getCameraCount() const {
+std::pair<int, int> CameraProviderManager::getCameraCount() const {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- int count = 0;
+ int systemCameraCount = 0;
+ int publicCameraCount = 0;
for (auto& provider : mProviders) {
- count += provider->mUniqueCameraIds.size();
+ for (auto &id : provider->mUniqueCameraIds) {
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKindLocked(id, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, id.c_str());
+ continue;
+ }
+ switch(deviceKind) {
+ case SystemCameraKind::PUBLIC:
+ publicCameraCount++;
+ break;
+ case SystemCameraKind::SYSTEM_ONLY_CAMERA:
+ systemCameraCount++;
+ break;
+ default:
+ break;
+ }
+ }
}
- return count;
+ return std::make_pair(systemCameraCount, publicCameraCount);
}
std::vector<std::string> CameraProviderManager::getCameraDeviceIds() const {
@@ -124,21 +141,47 @@
return deviceIds;
}
+void CameraProviderManager::collectDeviceIdsLocked(const std::vector<std::string> deviceIds,
+ std::vector<std::string>& publicDeviceIds,
+ std::vector<std::string>& systemDeviceIds) const {
+ for (auto &deviceId : deviceIds) {
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKindLocked(deviceId, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, deviceId.c_str());
+ continue;
+ }
+ if (deviceKind == SystemCameraKind::SYSTEM_ONLY_CAMERA) {
+ systemDeviceIds.push_back(deviceId);
+ } else {
+ publicDeviceIds.push_back(deviceId);
+ }
+ }
+}
+
std::vector<std::string> CameraProviderManager::getAPI1CompatibleCameraDeviceIds() const {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ std::vector<std::string> publicDeviceIds;
+ std::vector<std::string> systemDeviceIds;
std::vector<std::string> deviceIds;
for (auto& provider : mProviders) {
std::vector<std::string> providerDeviceIds = provider->mUniqueAPI1CompatibleCameraIds;
-
+ // Secure cameras should not be exposed through camera 1 api
+ providerDeviceIds.erase(std::remove_if(providerDeviceIds.begin(), providerDeviceIds.end(),
+ [this](const std::string& s) {
+ SystemCameraKind deviceKind = SystemCameraKind::PUBLIC;
+ if (getSystemCameraKindLocked(s, &deviceKind) != OK) {
+ ALOGE("%s: Invalid camera id %s, skipping", __FUNCTION__, s.c_str());
+ return true;
+ }
+ return deviceKind == SystemCameraKind::HIDDEN_SECURE_CAMERA;}),
+ providerDeviceIds.end());
// API1 app doesn't handle logical and physical camera devices well. So
// for each camera facing, only take the first id advertised by HAL in
// all [logical, physical1, physical2, ...] id combos, and filter out the rest.
filterLogicalCameraIdsLocked(providerDeviceIds);
-
- deviceIds.insert(deviceIds.end(), providerDeviceIds.begin(), providerDeviceIds.end());
+ collectDeviceIdsLocked(providerDeviceIds, publicDeviceIds, systemDeviceIds);
}
-
- std::sort(deviceIds.begin(), deviceIds.end(),
+ auto sortFunc =
[](const std::string& a, const std::string& b) -> bool {
uint32_t aUint = 0, bUint = 0;
bool aIsUint = base::ParseUint(a, &aUint);
@@ -154,7 +197,13 @@
}
// Simple string compare if both id are not uint
return a < b;
- });
+ };
+ // We put device ids for system cameras at the end since they will be pared
+ // off for processes not having system camera permissions.
+ std::sort(publicDeviceIds.begin(), publicDeviceIds.end(), sortFunc);
+ std::sort(systemDeviceIds.begin(), systemDeviceIds.end(), sortFunc);
+ deviceIds.insert(deviceIds.end(), publicDeviceIds.begin(), publicDeviceIds.end());
+ deviceIds.insert(deviceIds.end(), systemDeviceIds.begin(), systemDeviceIds.end());
return deviceIds;
}
@@ -1054,23 +1103,45 @@
return deviceInfo->mIsLogicalCamera;
}
-SystemCameraKind CameraProviderManager::getSystemCameraKind(const std::string& id) {
+status_t CameraProviderManager::getSystemCameraKind(const std::string& id,
+ SystemCameraKind *kind) const {
std::lock_guard<std::mutex> lock(mInterfaceMutex);
-
- auto deviceInfo = findDeviceInfoLocked(id);
- if (deviceInfo == nullptr) {
- return SystemCameraKind::PUBLIC;
- }
- return deviceInfo->mSystemCameraKind;
+ return getSystemCameraKindLocked(id, kind);
}
-bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) {
+status_t CameraProviderManager::getSystemCameraKindLocked(const std::string& id,
+ SystemCameraKind *kind) const {
+ auto deviceInfo = findDeviceInfoLocked(id);
+ if (deviceInfo != nullptr) {
+ *kind = deviceInfo->mSystemCameraKind;
+ return OK;
+ }
+ // If this is a hidden physical camera, we should return what kind of
+ // camera the enclosing logical camera is.
+ auto isHiddenAndParent = isHiddenPhysicalCameraInternal(id);
+ if (isHiddenAndParent.first) {
+ LOG_ALWAYS_FATAL_IF(id == isHiddenAndParent.second->mId,
+ "%s: hidden physical camera id %s and enclosing logical camera id %s are the same",
+ __FUNCTION__, id.c_str(), isHiddenAndParent.second->mId.c_str());
+ return getSystemCameraKindLocked(isHiddenAndParent.second->mId, kind);
+ }
+ // Neither a hidden physical camera nor a logical camera
+ return NAME_NOT_FOUND;
+}
+
+bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) const {
+ return isHiddenPhysicalCameraInternal(cameraId).first;
+}
+
+std::pair<bool, CameraProviderManager::ProviderInfo::DeviceInfo *>
+CameraProviderManager::isHiddenPhysicalCameraInternal(const std::string& cameraId) const {
+ auto falseRet = std::make_pair(false, nullptr);
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
if (deviceInfo->mId == cameraId) {
// cameraId is found in public camera IDs advertised by the
// provider.
- return false;
+ return falseRet;
}
}
}
@@ -1082,7 +1153,7 @@
if (res != OK) {
ALOGE("%s: Failed to getCameraCharacteristics for id %s", __FUNCTION__,
deviceInfo->mId.c_str());
- return false;
+ return falseRet;
}
std::vector<std::string> physicalIds;
@@ -1094,16 +1165,16 @@
if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_5) {
ALOGE("%s: Wrong deviceVersion %x for hiddenPhysicalCameraId %s",
__FUNCTION__, deviceVersion, cameraId.c_str());
- return false;
+ return falseRet;
} else {
- return true;
+ return std::make_pair(true, deviceInfo.get());
}
}
}
}
}
- return false;
+ return falseRet;
}
status_t CameraProviderManager::addProviderLocked(const std::string& newProvider) {
@@ -1116,7 +1187,7 @@
}
sp<provider::V2_4::ICameraProvider> interface;
- interface = mServiceProxy->getService(newProvider);
+ interface = mServiceProxy->tryGetService(newProvider);
if (interface == nullptr) {
ALOGE("%s: Camera provider HAL '%s' is not actually available", __FUNCTION__,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 801e978..2ef1f6f 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -96,6 +96,10 @@
const std::string &serviceName,
const sp<hidl::manager::V1_0::IServiceNotification>
¬ification) = 0;
+ // Will not wait for service to start if it's not already running
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> tryGetService(
+ const std::string &serviceName) = 0;
+ // Will block for service if it exists but isn't running
virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
const std::string &serviceName) = 0;
virtual hardware::hidl_vec<hardware::hidl_string> listServices() = 0;
@@ -112,6 +116,10 @@
return hardware::camera::provider::V2_4::ICameraProvider::registerForNotifications(
serviceName, notification);
}
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> tryGetService(
+ const std::string &serviceName) override {
+ return hardware::camera::provider::V2_4::ICameraProvider::tryGetService(serviceName);
+ }
virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
const std::string &serviceName) override {
return hardware::camera::provider::V2_4::ICameraProvider::getService(serviceName);
@@ -152,10 +160,10 @@
ServiceInteractionProxy *proxy = &sHardwareServiceInteractionProxy);
/**
- * Retrieve the total number of available cameras. This value may change dynamically as cameras
- * are added or removed.
+ * Retrieve the total number of available cameras.
+ * This value may change dynamically as cameras are added or removed.
*/
- int getCameraCount() const;
+ std::pair<int, int> getCameraCount() const;
std::vector<std::string> getCameraDeviceIds() const;
@@ -292,8 +300,8 @@
*/
bool isLogicalCamera(const std::string& id, std::vector<std::string>* physicalCameraIds);
- SystemCameraKind getSystemCameraKind(const std::string& id);
- bool isHiddenPhysicalCamera(const std::string& cameraId);
+ status_t getSystemCameraKind(const std::string& id, SystemCameraKind *kind) const;
+ bool isHiddenPhysicalCamera(const std::string& cameraId) const;
static const float kDepthARTolerance;
private:
@@ -615,6 +623,13 @@
status_t getCameraCharacteristicsLocked(const std::string &id,
CameraMetadata* characteristics) const;
void filterLogicalCameraIdsLocked(std::vector<std::string>& deviceIds) const;
+
+ status_t getSystemCameraKindLocked(const std::string& id, SystemCameraKind *kind) const;
+ std::pair<bool, ProviderInfo::DeviceInfo *> isHiddenPhysicalCameraInternal(const std::string& cameraId) const;
+
+ void collectDeviceIdsLocked(const std::vector<std::string> deviceIds,
+ std::vector<std::string>& normalDeviceIds,
+ std::vector<std::string>& systemCameraDeviceIds) const;
};
} // namespace android
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
index 522d521..62ef681 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -165,8 +165,12 @@
mem = mHidlMemPoolMap.at(data);
}
sp<CameraHeapMemory> heapMem(static_cast<CameraHeapMemory *>(mem->handle));
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
- heapMem->mBuffers[bufferIndex]->pointer();
+ heapMem->mBuffers[bufferIndex]->unsecurePointer();
md->pHandle = const_cast<native_handle_t*>(frameData.getNativeHandle());
sDataCbTimestamp(timestamp, (int32_t) msgType, mem, bufferIndex, this);
return hardware::Void();
@@ -192,8 +196,12 @@
hidl_msg.bufferIndex, mem->mNumBufs);
return hardware::Void();
}
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
- mem->mBuffers[hidl_msg.bufferIndex]->pointer();
+ mem->mBuffers[hidl_msg.bufferIndex]->unsecurePointer();
md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
@@ -578,7 +586,11 @@
int bufferIndex = offset / size;
if (CC_LIKELY(mHidlDevice != nullptr)) {
if (size == sizeof(VideoNativeHandleMetadata)) {
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
// Caching the handle here because md->pHandle will be subject to HAL's edit
native_handle_t* nh = md->pHandle;
hidl_handle frame = nh;
@@ -605,7 +617,11 @@
if (size == sizeof(VideoNativeHandleMetadata)) {
uint32_t heapId = heap->getHeapID();
uint32_t bufferIndex = offset / size;
- VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+ // TODO: Using unsecurePointer() has some associated security pitfalls
+ // (see declaration for details).
+ // Either document why it is safe in this case or address the
+ // issue (e.g. by copying).
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->unsecurePointer();
// Caching the handle here because md->pHandle will be subject to HAL's edit
native_handle_t* nh = md->pHandle;
VideoFrameMessage msg;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 4227a3b..3188892 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2802,6 +2802,27 @@
mOperatingMode = operatingMode;
}
+ // In case called from configureStreams, abort queued input buffers not belonging to
+ // any pending requests.
+ if (mInputStream != NULL && notifyRequestThread) {
+ while (true) {
+ camera3_stream_buffer_t inputBuffer;
+ status_t res = mInputStream->getInputBuffer(&inputBuffer,
+ /*respectHalLimit*/ false);
+ if (res != OK) {
+ // Exhausted acquiring all input buffers.
+ break;
+ }
+
+ inputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+ res = mInputStream->returnInputBuffer(inputBuffer);
+ if (res != OK) {
+ ALOGE("%s: %d: couldn't return input buffer while clearing input queue: "
+ "%s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+ }
+ }
+ }
+
if (!mNeedConfig) {
ALOGV("%s: Skipping config, no stream changes", __FUNCTION__);
return OK;
@@ -3679,7 +3700,7 @@
// Did we get the (final) result metadata for this capture?
if (result->result != NULL && !isPartialResult) {
if (request.physicalCameraIds.size() != result->num_physcam_metadata) {
- SET_ERR("Requested physical Camera Ids %d not equal to number of metadata %d",
+ SET_ERR("Expected physical Camera metadata count %d not equal to actual count %d",
request.physicalCameraIds.size(), result->num_physcam_metadata);
return;
}
@@ -3873,12 +3894,14 @@
errorCode) {
if (physicalCameraId.size() > 0) {
String8 cameraId(physicalCameraId);
- if (r.physicalCameraIds.find(cameraId) == r.physicalCameraIds.end()) {
+ auto iter = r.physicalCameraIds.find(cameraId);
+ if (iter == r.physicalCameraIds.end()) {
ALOGE("%s: Reported result failure for physical camera device: %s "
" which is not part of the respective request!",
__FUNCTION__, cameraId.string());
break;
}
+ r.physicalCameraIds.erase(iter);
resultExtras.errorPhysicalCameraId = physicalCameraId;
} else {
logicalDeviceResultError = true;
@@ -5082,6 +5105,7 @@
ALOGW("%s: %d: couldn't get input buffer while clearing the request "
"list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
} else {
+ inputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
res = (*it)->mInputStream->returnInputBuffer(inputBuffer);
if (res != OK) {
ALOGE("%s: %d: couldn't return input buffer while clearing the request "
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index cae34ce..2573b48 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -194,6 +194,8 @@
*/
status_t dropStreamBuffers(bool dropping, int streamId) override;
+ nsecs_t getExpectedInFlightDuration() override;
+
/**
* Helper functions to map between framework and HIDL values
*/
@@ -1111,12 +1113,6 @@
const SurfaceMap& outputSurfaces);
/**
- * Returns the maximum expected time it'll take for all currently in-flight
- * requests to complete, based on their settings
- */
- nsecs_t getExpectedInFlightDuration();
-
- /**
* Tracking for idle detection
*/
sp<camera3::StatusTracker> mStatusTracker;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index fc83684..cb59a76 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -71,7 +71,8 @@
res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
if (res != OK) {
- ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
+ // This may or may not be an error condition depending on caller.
+ ALOGV("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index acb8b3c..e1d35e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -54,9 +54,8 @@
mState = STATE_ERROR;
}
- if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
- mBufferReleasedListener = new BufferReleasedListener(this);
- }
+ bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+ mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id,
@@ -87,9 +86,8 @@
mState = STATE_ERROR;
}
- if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
- mBufferReleasedListener = new BufferReleasedListener(this);
- }
+ bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+ mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id,
@@ -124,10 +122,8 @@
}
mConsumerName = String8("Deferred");
- if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
- mBufferReleasedListener = new BufferReleasedListener(this);
- }
-
+ bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+ mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
}
Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
@@ -151,9 +147,8 @@
mDropBuffers(false),
mDequeueBufferLatency(kDequeueLatencyBinSize) {
- if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
- mBufferReleasedListener = new BufferReleasedListener(this);
- }
+ bool needsReleaseNotify = setId > CAMERA3_STREAM_SET_ID_INVALID;
+ mBufferProducerListener = new BufferProducerListener(this, needsReleaseNotify);
// Subclasses expected to initialize mConsumer themselves
}
@@ -261,7 +256,7 @@
notifyBufferReleased(anwBuffer);
if (mUseBufferManager) {
// Return this buffer back to buffer manager.
- mBufferReleasedListener->onBufferReleased();
+ mBufferProducerListener->onBufferReleased();
}
} else {
if (mTraceFirstBuffer && (stream_type == CAMERA3_STREAM_OUTPUT)) {
@@ -387,8 +382,8 @@
// Configure consumer-side ANativeWindow interface. The listener may be used
// to notify buffer manager (if it is used) of the returned buffers.
res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
- /*listener*/mBufferReleasedListener,
- /*reportBufferRemoval*/true);
+ /*reportBufferRemoval*/true,
+ /*listener*/mBufferProducerListener);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mId);
@@ -790,7 +785,7 @@
return INVALID_OPERATION;
}
-void Camera3OutputStream::BufferReleasedListener::onBufferReleased() {
+void Camera3OutputStream::BufferProducerListener::onBufferReleased() {
sp<Camera3OutputStream> stream = mParent.promote();
if (stream == nullptr) {
ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
@@ -823,6 +818,25 @@
}
}
+void Camera3OutputStream::BufferProducerListener::onBuffersDiscarded(
+ const std::vector<sp<GraphicBuffer>>& buffers) {
+ sp<Camera3OutputStream> stream = mParent.promote();
+ if (stream == nullptr) {
+ ALOGV("%s: Parent camera3 output stream was destroyed", __FUNCTION__);
+ return;
+ }
+
+ if (buffers.size() > 0) {
+ Mutex::Autolock l(stream->mLock);
+ stream->onBuffersRemovedLocked(buffers);
+ if (stream->mUseBufferManager) {
+ stream->mBufferManager->onBuffersRemoved(stream->getId(),
+ stream->getStreamSetId(), buffers.size());
+ }
+ ALOGV("Stream %d: %zu Buffers discarded.", stream->getId(), buffers.size());
+ }
+}
+
void Camera3OutputStream::onBuffersRemovedLocked(
const std::vector<sp<GraphicBuffer>>& removedBuffers) {
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 729c655..b4e49f9 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -146,18 +146,22 @@
*/
virtual status_t setConsumers(const std::vector<sp<Surface>>& consumers);
- class BufferReleasedListener : public BnProducerListener {
+ class BufferProducerListener : public SurfaceListener {
public:
- BufferReleasedListener(wp<Camera3OutputStream> parent) : mParent(parent) {}
+ BufferProducerListener(wp<Camera3OutputStream> parent, bool needsReleaseNotify)
+ : mParent(parent), mNeedsReleaseNotify(needsReleaseNotify) {}
- /**
- * Implementation of IProducerListener, used to notify this stream that the consumer
- * has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
- */
- virtual void onBufferReleased();
+ /**
+ * Implementation of IProducerListener, used to notify this stream that the consumer
+ * has returned a buffer and it is ready to return to Camera3BufferManager for reuse.
+ */
+ virtual void onBufferReleased();
+ virtual bool needsReleaseNotify() { return mNeedsReleaseNotify; }
+ virtual void onBuffersDiscarded(const std::vector<sp<GraphicBuffer>>& buffers);
private:
- wp<Camera3OutputStream> mParent;
+ wp<Camera3OutputStream> mParent;
+ bool mNeedsReleaseNotify;
};
virtual status_t detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd);
@@ -262,10 +266,10 @@
sp<Camera3BufferManager> mBufferManager;
/**
- * Buffer released listener, used to notify the buffer manager that a buffer is released
- * from consumer side.
+ * Buffer producer listener, used to handle notification when a buffer is released
+ * from consumer side, or a set of buffers are discarded by the consumer.
*/
- sp<BufferReleasedListener> mBufferReleasedListener;
+ sp<BufferProducerListener> mBufferProducerListener;
/**
* Flag indicating if the buffer manager is used to allocate the stream buffers
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index fd9b4b0..f707ef8 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -70,7 +70,7 @@
mFormatOverridden(false),
mOriginalFormat(format),
mDataSpaceOverridden(false),
- mOriginalDataSpace(HAL_DATASPACE_UNKNOWN),
+ mOriginalDataSpace(dataSpace),
mPhysicalCameraId(physicalCameraId),
mLastTimestamp(0) {
@@ -137,9 +137,6 @@
void Camera3Stream::setDataSpaceOverride(bool dataSpaceOverridden) {
mDataSpaceOverridden = dataSpaceOverridden;
- if (dataSpaceOverridden && mOriginalDataSpace == HAL_DATASPACE_UNKNOWN) {
- mOriginalDataSpace = camera3_stream::data_space;
- }
}
bool Camera3Stream::isDataSpaceOverridden() const {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 67afd0f..805df82 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -588,7 +588,7 @@
//Keep track of original dataSpace in case it gets overridden
bool mDataSpaceOverridden;
- android_dataspace mOriginalDataSpace;
+ const android_dataspace mOriginalDataSpace;
String8 mPhysicalCameraId;
nsecs_t mLastTimestamp;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
index 84c2ec7..80df7db 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.cpp
@@ -496,7 +496,7 @@
mInputSlots[bufferItem.mSlot].mFrameNumber = bufferItem.mFrameNumber;
} else {
SP_LOGE("%s: Invalid input graphic buffer!", __FUNCTION__);
- res = BAD_VALUE;
+ mOnFrameAvailableRes.store(BAD_VALUE);
return;
}
bufferId = bufferItem.mGraphicBuffer->getId();
@@ -541,6 +541,11 @@
mOnFrameAvailableRes.store(res);
}
+void Camera3StreamSplitter::onFrameReplaced(const BufferItem& item) {
+ ATRACE_CALL();
+ onFrameAvailable(item);
+}
+
void Camera3StreamSplitter::decrementBufRefCountLocked(uint64_t id, size_t surfaceId) {
ATRACE_CALL();
diff --git a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
index 960f7aa..4eb455a 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamSplitter.h
@@ -102,6 +102,13 @@
void onFrameAvailable(const BufferItem& item) override;
// From IConsumerListener
+ //
+ // Similar to onFrameAvailable, but buffer item is indeed replacing a buffer
+ // in the buffer queue. This can happen when buffer queue is in droppable
+ // mode.
+ void onFrameReplaced(const BufferItem& item) override;
+
+ // From IConsumerListener
// We don't care about released buffers because we detach each buffer as
// soon as we acquire it. See the comment for onBufferReleased below for
// some clarifying notes about the name.
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index b4e7c32..ec5e876 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -23,7 +23,6 @@
libcameraservice \
libhidlbase \
liblog \
- libhidltransport \
libcamera_client \
libcamera_metadata \
libutils \
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index 78d737d..084dc62 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -183,6 +183,7 @@
sp<TestICameraProvider> mTestCameraProvider;
TestInteractionProxy() {}
+
void setProvider(sp<TestICameraProvider> provider) {
mTestCameraProvider = provider;
}
@@ -199,13 +200,31 @@
return true;
}
+ virtual sp<hardware::camera::provider::V2_4::ICameraProvider> tryGetService(
+ const std::string &serviceName) override {
+ // If no provider has been given, act like the HAL isn't available and return null.
+ if (mTestCameraProvider == nullptr) return nullptr;
+ return getService(serviceName);
+ }
+
virtual sp<hardware::camera::provider::V2_4::ICameraProvider> getService(
const std::string &serviceName) override {
+ // If no provider has been given, fail; in reality, getService would
+ // block for HALs that don't start correctly, so we should never use
+ // getService when we don't have a valid HAL running
+ if (mTestCameraProvider == nullptr) {
+ ADD_FAILURE() << "getService called with no valid provider; would block indefinitely";
+ // Real getService would block, but that's bad in unit tests. So
+ // just record an error and return nullptr
+ return nullptr;
+ }
mLastRequestedServiceNames.push_back(serviceName);
return mTestCameraProvider;
}
virtual hardware::hidl_vec<hardware::hidl_string> listServices() override {
+ // Always provide a list even if there's no actual provider yet, to
+ // simulate stuck HAL situations as well
hardware::hidl_vec<hardware::hidl_string> ret = {"test/0"};
return ret;
}
@@ -438,3 +457,52 @@
<< "Unable to change device state";
}
+
+// Test that CameraProviderManager doesn't get stuck when the camera HAL isn't really working
+TEST(CameraProviderManagerTest, BadHalStartupTest) {
+
+ std::vector<hardware::hidl_string> deviceNames;
+ deviceNames.push_back("device@3.2/test/0");
+ deviceNames.push_back("device@1.0/test/0");
+ deviceNames.push_back("device@3.2/test/1");
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ status_t res;
+
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+
+ // Not setting up provider in the service proxy yet, to test cases where a
+ // HAL isn't starting right
+ res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ // Now set up provider and trigger a registration
+ serviceProxy.setProvider(provider);
+ int numProviders = static_cast<int>(serviceProxy.listServices().size());
+
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName,
+ testProviderInstanceName, false);
+
+ // Check that new provider is called once for all the init methods
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::SET_CALLBACK], numProviders) <<
+ "Only one call to setCallback per provider expected during register";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_VENDOR_TAGS], numProviders) <<
+ "Only one call to getVendorTags per provider expected during register";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::IS_SET_TORCH_MODE_SUPPORTED],
+ numProviders) <<
+ "Only one call to isSetTorchModeSupported per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::GET_CAMERA_ID_LIST], numProviders) <<
+ "Only one call to getCameraIdList per provider expected during init";
+ EXPECT_EQ(provider->mCalledCounter[TestICameraProvider::NOTIFY_DEVICE_STATE], numProviders) <<
+ "Only one call to notifyDeviceState per provider expected during init";
+
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceNames.back(), testProviderInstanceName) <<
+ "Incorrect instance requested from service manager";
+}
diff --git a/services/mediaanalytics/Android.bp b/services/mediaanalytics/Android.bp
index 72f4b52..dc72064 100644
--- a/services/mediaanalytics/Android.bp
+++ b/services/mediaanalytics/Android.bp
@@ -6,8 +6,32 @@
srcs: [
"main_mediametrics.cpp",
- "MediaAnalyticsService.cpp",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "liblog",
+ "libmediaanalyticsservice",
+ "libutils",
+ ],
+
+ init_rc: [
+ "mediametrics.rc",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+}
+
+cc_library_shared {
+ name: "libmediaanalyticsservice",
+
+ srcs: [
"iface_statsd.cpp",
+ "MediaAnalyticsService.cpp",
"statsd_audiopolicy.cpp",
"statsd_audiorecord.cpp",
"statsd_audiothread.cpp",
@@ -24,45 +48,25 @@
},
shared_libs: [
- "libcutils",
- "liblog",
- "libmedia",
- "libutils",
"libbinder",
- "libdl",
- "libgui",
- "libmedia",
- "libmediautils",
+ "liblog",
"libmediametrics",
- "libstagefright_foundation",
+ "libprotobuf-cpp-lite",
"libstatslog",
"libutils",
- "libprotobuf-cpp-lite",
],
static_libs: [
"libplatformprotos",
- "libregistermsext",
],
include_dirs: [
- "frameworks/av/media/libstagefright/include",
- "frameworks/av/media/libstagefright/rtsp",
- "frameworks/av/media/libstagefright/webm",
- "frameworks/av/include/media",
- "frameworks/av/include/camera",
- "frameworks/native/include/media/openmax",
- "frameworks/native/include/media/hardware",
- "external/tremolo/Tremolo",
+ "system/media/audio_utils/include",
],
- init_rc: ["mediametrics.rc"],
-
cflags: [
- "-Werror",
"-Wall",
- "-Wno-error=deprecated-declarations",
+ "-Werror",
+ "-Wextra",
],
- clang: true,
-
}
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index 0e7edfd..1ed8b74 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -14,66 +14,20 @@
* limitations under the License.
*/
-// Proxy for media player implementations
-
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaAnalyticsService"
#include <utils/Log.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <dirent.h>
-#include <pthread.h>
-#include <unistd.h>
-
-#include <string.h>
-#include <pwd.h>
-
-#include <cutils/atomic.h>
-#include <cutils/properties.h> // for property_get
-
-#include <utils/misc.h>
-
-#include <android/content/pm/IPackageManagerNative.h>
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/MemoryHeapBase.h>
-#include <binder/MemoryBase.h>
-#include <gui/Surface.h>
-#include <utils/Errors.h> // for status_t
-#include <utils/List.h>
-#include <utils/String8.h>
-#include <utils/SystemClock.h>
-#include <utils/Timers.h>
-#include <utils/Vector.h>
-
-#include <media/IMediaHTTPService.h>
-#include <media/IRemoteDisplay.h>
-#include <media/IRemoteDisplayClient.h>
-#include <media/MediaPlayerInterface.h>
-#include <media/mediarecorder.h>
-#include <media/MediaMetadataRetrieverInterface.h>
-#include <media/Metadata.h>
-#include <media/AudioTrack.h>
-#include <media/MemoryLeakTrackUtil.h>
-#include <media/stagefright/MediaCodecList.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/Utils.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/ALooperRoster.h>
-#include <mediautils/BatteryNotifier.h>
-
-//#include <memunreachable/memunreachable.h>
-#include <system/audio.h>
-
-#include <private/android_filesystem_config.h>
-
#include "MediaAnalyticsService.h"
+#include <pwd.h> //getpwuid
+
+#include <audio_utils/clock.h> // clock conversions
+#include <android/content/pm/IPackageManagerNative.h> // package info
+#include <binder/IPCThreadState.h> // get calling uid
+#include <cutils/properties.h> // for property_get
+#include <private/android_filesystem_config.h> // UID
+
namespace android {
// individual records kept in memory: age or count
@@ -81,192 +35,143 @@
// count: hard limit of # records
// (0 for either of these disables that threshold)
//
-static constexpr nsecs_t kMaxRecordAgeNs = 28 * 3600 * (1000*1000*1000ll);
+static constexpr nsecs_t kMaxRecordAgeNs = 28 * 3600 * NANOS_PER_SECOND;
// 2019/6: average daily per device is currently 375-ish;
// setting this to 2000 is large enough to catch most devices
// we'll lose some data on very very media-active devices, but only for
// the gms collection; statsd will have already covered those for us.
// This also retains enough information to help with bugreports
-static constexpr int kMaxRecords = 2000;
+static constexpr size_t kMaxRecords = 2000;
// max we expire in a single call, to constrain how long we hold the
// mutex, which also constrains how long a client might wait.
-static constexpr int kMaxExpiredAtOnce = 50;
+static constexpr size_t kMaxExpiredAtOnce = 50;
// TODO: need to look at tuning kMaxRecords and friends for low-memory devices
-static const char *kServiceName = "media.metrics";
-
-void MediaAnalyticsService::instantiate() {
- defaultServiceManager()->addService(
- String16(kServiceName), new MediaAnalyticsService());
-}
-
MediaAnalyticsService::MediaAnalyticsService()
: mMaxRecords(kMaxRecords),
mMaxRecordAgeNs(kMaxRecordAgeNs),
mMaxRecordsExpiredAtOnce(kMaxExpiredAtOnce),
- mDumpProto(MediaAnalyticsItem::PROTO_V1),
- mDumpProtoDefault(MediaAnalyticsItem::PROTO_V1) {
-
- ALOGD("MediaAnalyticsService created");
-
- mItemsSubmitted = 0;
- mItemsFinalized = 0;
- mItemsDiscarded = 0;
- mItemsDiscardedExpire = 0;
- mItemsDiscardedCount = 0;
-
- mLastSessionID = 0;
- // recover any persistency we set up
- // etc
-}
-
-MediaAnalyticsService::~MediaAnalyticsService() {
- ALOGD("MediaAnalyticsService destroyed");
-
- while (mItems.size() > 0) {
- MediaAnalyticsItem * oitem = *(mItems.begin());
- mItems.erase(mItems.begin());
- delete oitem;
- mItemsDiscarded++;
- mItemsDiscardedCount++;
- }
-}
-
-
-MediaAnalyticsItem::SessionID_t MediaAnalyticsService::generateUniqueSessionID() {
- // generate a new sessionid
-
- Mutex::Autolock _l(mLock_ids);
- return (++mLastSessionID);
-}
-
-// caller surrenders ownership of 'item'
-MediaAnalyticsItem::SessionID_t MediaAnalyticsService::submit(MediaAnalyticsItem *item, bool forcenew)
+ mDumpProtoDefault(MediaAnalyticsItem::PROTO_V1)
{
- UNUSED(forcenew);
+ ALOGD("%s", __func__);
+}
- // fill in a sessionID if we do not yet have one
- if (item->getSessionID() <= MediaAnalyticsItem::SessionIDNone) {
- item->setSessionID(generateUniqueSessionID());
- }
+MediaAnalyticsService::~MediaAnalyticsService()
+{
+ ALOGD("%s", __func__);
+ // the class destructor clears anyhow, but we enforce clearing items first.
+ mItemsDiscarded += mItems.size();
+ mItems.clear();
+}
+status_t MediaAnalyticsService::submitInternal(MediaAnalyticsItem *item, bool release)
+{
// we control these, generally not trusting user input
nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
// round nsecs to seconds
- now = ((now + 500000000) / 1000000000) * 1000000000;
+ now = (now + NANOS_PER_SECOND / 2) / NANOS_PER_SECOND * NANOS_PER_SECOND;
+ // TODO: if we convert to boot time, do we need to round timestamp?
item->setTimestamp(now);
- int pid = IPCThreadState::self()->getCallingPid();
- int uid = IPCThreadState::self()->getCallingUid();
- int uid_given = item->getUid();
- int pid_given = item->getPid();
+ const int pid = IPCThreadState::self()->getCallingPid();
+ const int uid = IPCThreadState::self()->getCallingUid();
+ const int uid_given = item->getUid();
+ const int pid_given = item->getPid();
- // although we do make exceptions for some trusted client uids
- bool isTrusted = false;
-
- ALOGV("caller has uid=%d, embedded uid=%d", uid, uid_given);
-
- switch (uid) {
- case AID_MEDIA:
- case AID_MEDIA_CODEC:
- case AID_MEDIA_EX:
- case AID_MEDIA_DRM:
- // trusted source, only override default values
- isTrusted = true;
- if (uid_given == (-1)) {
- item->setUid(uid);
- }
- if (pid_given == (-1)) {
- item->setPid(pid);
- }
- break;
- default:
- isTrusted = false;
- item->setPid(pid);
+ ALOGV("%s: caller has uid=%d, embedded uid=%d", __func__, uid, uid_given);
+ bool isTrusted;
+ switch (uid) {
+ case AID_DRM:
+ case AID_MEDIA:
+ case AID_MEDIA_CODEC:
+ case AID_MEDIA_EX:
+ case AID_MEDIA_DRM:
+ // trusted source, only override default values
+ isTrusted = true;
+ if (uid_given == -1) {
item->setUid(uid);
- break;
+ }
+ if (pid_given == -1) {
+ item->setPid(pid);
+ }
+ break;
+ default:
+ isTrusted = false;
+ item->setPid(pid);
+ item->setUid(uid);
+ break;
}
// Overwrite package name and version if the caller was untrusted.
if (!isTrusted) {
- setPkgInfo(item, item->getUid(), true, true);
+ mUidInfo.setPkgInfo(item, item->getUid(), true, true);
} else if (item->getPkgName().empty()) {
- // empty, so fill out both parts
- setPkgInfo(item, item->getUid(), true, true);
+ // empty, so fill out both parts
+ mUidInfo.setPkgInfo(item, item->getUid(), true, true);
} else {
- // trusted, provided a package, do nothing
+ // trusted, provided a package, do nothing
}
- ALOGV("given uid %d; sanitized uid: %d sanitized pkg: %s "
- "sanitized pkg version: %" PRId64,
+ ALOGV("%s: given uid %d; sanitized uid: %d sanitized pkg: %s "
+ "sanitized pkg version: %lld",
+ __func__,
uid_given, item->getUid(),
item->getPkgName().c_str(),
- item->getPkgVersionCode());
+ (long long)item->getPkgVersionCode());
mItemsSubmitted++;
// validate the record; we discard if we don't like it
- if (contentValid(item, isTrusted) == false) {
- delete item;
- return MediaAnalyticsItem::SessionIDInvalid;
+ if (isContentValid(item, isTrusted) == false) {
+ if (release) delete item;
+ return PERMISSION_DENIED;
}
// XXX: if we have a sessionid in the new record, look to make
// sure it doesn't appear in the finalized list.
if (item->count() == 0) {
- ALOGV("dropping empty record...");
- delete item;
- item = NULL;
- return MediaAnalyticsItem::SessionIDInvalid;
+ ALOGV("%s: dropping empty record...", __func__);
+ if (release) delete item;
+ return BAD_VALUE;
}
- // save the new record
- //
- // send a copy to statsd
- dump2Statsd(item);
+ // send to statsd
+ extern bool dump2Statsd(MediaAnalyticsItem *item); // extern hook
+ (void)dump2Statsd(item); // failure should be logged in function.
- // and keep our copy for dumpsys
- MediaAnalyticsItem::SessionID_t id = item->getSessionID();
+ if (!release) item = item->dup();
saveItem(item);
- mItemsFinalized++;
-
- return id;
+ return NO_ERROR;
}
-
status_t MediaAnalyticsService::dump(int fd, const Vector<String16>& args)
{
- const size_t SIZE = 512;
- char buffer[SIZE];
String8 result;
if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
- snprintf(buffer, SIZE, "Permission Denial: "
+ result.appendFormat("Permission Denial: "
"can't dump MediaAnalyticsService from pid=%d, uid=%d\n",
IPCThreadState::self()->getCallingPid(),
IPCThreadState::self()->getCallingUid());
- result.append(buffer);
write(fd, result.string(), result.size());
return NO_ERROR;
}
// crack any parameters
- String16 protoOption("-proto");
+ const String16 protoOption("-proto");
int chosenProto = mDumpProtoDefault;
- String16 clearOption("-clear");
+ const String16 clearOption("-clear");
bool clear = false;
- String16 sinceOption("-since");
+ const String16 sinceOption("-since");
nsecs_t ts_since = 0;
- String16 helpOption("-help");
- String16 onlyOption("-only");
+ const String16 helpOption("-help");
+ const String16 onlyOption("-only");
std::string only;
- int n = args.size();
-
+ const int n = args.size();
for (int i = 0; i < n; i++) {
- String8 myarg(args[i]);
if (args[i] == clearOption) {
clear = true;
} else if (args[i] == protoOption) {
@@ -304,7 +209,7 @@
ts_since = 0;
}
// command line is milliseconds; internal units are nano-seconds
- ts_since *= 1000*1000;
+ ts_since *= NANOS_PER_MILLISECOND;
} else if (args[i] == onlyOption) {
i++;
if (i < n) {
@@ -312,6 +217,10 @@
only = value.string();
}
} else if (args[i] == helpOption) {
+ // TODO: consider function area dumping.
+ // dumpsys media.metrics audiotrack,codec
+ // or dumpsys media.metrics audiotrack codec
+
result.append("Recognized parameters:\n");
result.append("-help this help message\n");
result.append("-proto # dump using protocol #");
@@ -324,31 +233,18 @@
}
}
- Mutex::Autolock _l(mLock);
- // mutex between insertion and dumping the contents
+ {
+ std::lock_guard _l(mLock);
- mDumpProto = chosenProto;
+ result.appendFormat("Dump of the %s process:\n", kServiceName);
+ dumpHeaders_l(result, chosenProto, ts_since);
+ dumpRecent_l(result, chosenProto, ts_since, only.c_str());
- // we ALWAYS dump this piece
- snprintf(buffer, SIZE, "Dump of the %s process:\n", kServiceName);
- result.append(buffer);
-
- dumpHeaders(result, ts_since);
-
- dumpRecent(result, ts_since, only.c_str());
-
-
- if (clear) {
- // remove everything from the finalized queue
- while (mItems.size() > 0) {
- MediaAnalyticsItem * oitem = *(mItems.begin());
- mItems.erase(mItems.begin());
- delete oitem;
- mItemsDiscarded++;
+ if (clear) {
+ mItemsDiscarded += mItems.size();
+ mItems.clear();
+ // shall we clear the summary data too?
}
-
- // shall we clear the summary data too?
-
}
write(fd, result.string(), result.size());
@@ -356,275 +252,207 @@
}
// dump headers
-void MediaAnalyticsService::dumpHeaders(String8 &result, nsecs_t ts_since)
+void MediaAnalyticsService::dumpHeaders_l(String8 &result, int dumpProto, nsecs_t ts_since)
{
- const size_t SIZE = 512;
- char buffer[SIZE];
-
- snprintf(buffer, SIZE, "Protocol Version: %d\n", mDumpProto);
- result.append(buffer);
-
- int enabled = MediaAnalyticsItem::isEnabled();
- if (enabled) {
- snprintf(buffer, SIZE, "Metrics gathering: enabled\n");
+ result.appendFormat("Protocol Version: %d\n", dumpProto);
+ if (MediaAnalyticsItem::isEnabled()) {
+ result.append("Metrics gathering: enabled\n");
} else {
- snprintf(buffer, SIZE, "Metrics gathering: DISABLED via property\n");
+ result.append("Metrics gathering: DISABLED via property\n");
}
- result.append(buffer);
-
- snprintf(buffer, SIZE,
- "Since Boot: Submissions: %8" PRId64
- " Accepted: %8" PRId64 "\n",
- mItemsSubmitted, mItemsFinalized);
- result.append(buffer);
- snprintf(buffer, SIZE,
- "Records Discarded: %8" PRId64
- " (by Count: %" PRId64 " by Expiration: %" PRId64 ")\n",
- mItemsDiscarded, mItemsDiscardedCount, mItemsDiscardedExpire);
- result.append(buffer);
+ result.appendFormat(
+ "Since Boot: Submissions: %lld Accepted: %lld\n",
+ (long long)mItemsSubmitted.load(), (long long)mItemsFinalized);
+ result.appendFormat(
+ "Records Discarded: %lld (by Count: %lld by Expiration: %lld)\n",
+ (long long)mItemsDiscarded, (long long)mItemsDiscardedCount,
+ (long long)mItemsDiscardedExpire);
if (ts_since != 0) {
- snprintf(buffer, SIZE,
- "Emitting Queue entries more recent than: %" PRId64 "\n",
- (int64_t) ts_since);
- result.append(buffer);
+ result.appendFormat(
+ "Emitting Queue entries more recent than: %lld\n",
+ (long long)ts_since);
}
}
-// the recent, detailed queues
-void MediaAnalyticsService::dumpRecent(String8 &result, nsecs_t ts_since, const char * only)
+void MediaAnalyticsService::dumpRecent_l(
+ String8 &result, int dumpProto, nsecs_t ts_since, const char * only)
{
- const size_t SIZE = 512;
- char buffer[SIZE];
-
- if (only != NULL && *only == '\0') {
- only = NULL;
+ if (only != nullptr && *only == '\0') {
+ only = nullptr;
}
-
- // show the recently recorded records
- snprintf(buffer, sizeof(buffer), "\nFinalized Metrics (oldest first):\n");
- result.append(buffer);
- result.append(this->dumpQueue(ts_since, only));
+ result.append("\nFinalized Metrics (oldest first):\n");
+ dumpQueue_l(result, dumpProto, ts_since, only);
// show who is connected and injecting records?
// talk about # records fed to the 'readers'
// talk about # records we discarded, perhaps "discarded w/o reading" too
}
-// caller has locked mLock...
-String8 MediaAnalyticsService::dumpQueue() {
- return dumpQueue((nsecs_t) 0, NULL);
+void MediaAnalyticsService::dumpQueue_l(String8 &result, int dumpProto) {
+ dumpQueue_l(result, dumpProto, (nsecs_t) 0, nullptr /* only */);
}
-String8 MediaAnalyticsService::dumpQueue(nsecs_t ts_since, const char * only) {
- String8 result;
+void MediaAnalyticsService::dumpQueue_l(
+ String8 &result, int dumpProto, nsecs_t ts_since, const char * only) {
int slot = 0;
if (mItems.empty()) {
- result.append("empty\n");
+ result.append("empty\n");
} else {
- List<MediaAnalyticsItem *>::iterator it = mItems.begin();
- for (; it != mItems.end(); it++) {
- nsecs_t when = (*it)->getTimestamp();
+ for (const auto &item : mItems) {
+ nsecs_t when = item->getTimestamp();
if (when < ts_since) {
continue;
}
- if (only != NULL &&
- strcmp(only, (*it)->getKey().c_str()) != 0) {
- ALOGV("Omit '%s', it's not '%s'", (*it)->getKey().c_str(), only);
+ // TODO: Only should be a set<string>
+ if (only != nullptr &&
+ item->getKey() /* std::string */ != only) {
+ ALOGV("%s: omit '%s', it's not '%s'",
+ __func__, item->getKey().c_str(), only);
continue;
}
- std::string entry = (*it)->toString(mDumpProto);
- result.appendFormat("%5d: %s\n", slot, entry.c_str());
+ result.appendFormat("%5d: %s\n",
+ slot, item->toString(dumpProto).c_str());
slot++;
}
}
-
- return result;
}
//
// Our Cheap in-core, non-persistent records management.
-
-// we hold mLock when we get here
// if item != NULL, it's the item we just inserted
// true == more items eligible to be recovered
bool MediaAnalyticsService::expirations_l(MediaAnalyticsItem *item)
{
bool more = false;
- int handled = 0;
- // keep removing old records the front until we're in-bounds (count)
- // since we invoke this with each insertion, it should be 0/1 iterations.
- if (mMaxRecords > 0) {
- while (mItems.size() > (size_t) mMaxRecords) {
- MediaAnalyticsItem * oitem = *(mItems.begin());
- if (oitem == item) {
- break;
- }
- if (handled >= mMaxRecordsExpiredAtOnce) {
- // unlikely in this loop
- more = true;
- break;
- }
- handled++;
- mItems.erase(mItems.begin());
- delete oitem;
- mItemsDiscarded++;
- mItemsDiscardedCount++;
+ // check queue size
+ size_t overlimit = 0;
+ if (mMaxRecords > 0 && mItems.size() > mMaxRecords) {
+ overlimit = mItems.size() - mMaxRecords;
+ if (overlimit > mMaxRecordsExpiredAtOnce) {
+ more = true;
+ overlimit = mMaxRecordsExpiredAtOnce;
}
}
- // keep removing old records the front until we're in-bounds (age)
- // limited to mMaxRecordsExpiredAtOnce items per invocation.
- if (mMaxRecordAgeNs > 0) {
- nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
- while (mItems.size() > 0) {
- MediaAnalyticsItem * oitem = *(mItems.begin());
+ // check queue times
+ size_t expired = 0;
+ if (!more && mMaxRecordAgeNs > 0) {
+ const nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+ // we check one at a time, skip search would be more efficient.
+ size_t i = overlimit;
+ for (; i < mItems.size(); ++i) {
+ auto &oitem = mItems[i];
nsecs_t when = oitem->getTimestamp();
- if (oitem == item) {
+ if (oitem.get() == item) {
break;
}
- // careful about timejumps too
- if ((now > when) && (now-when) <= mMaxRecordAgeNs) {
- // this (and the rest) are recent enough to keep
- break;
+ if (now > when && (now - when) <= mMaxRecordAgeNs) {
+ break; // TODO: if we use BOOTTIME, should be monotonic.
}
- if (handled >= mMaxRecordsExpiredAtOnce) {
+ if (i >= mMaxRecordsExpiredAtOnce) {
// this represents "one too many"; tell caller there are
// more to be reclaimed.
more = true;
break;
}
- handled++;
- mItems.erase(mItems.begin());
- delete oitem;
- mItemsDiscarded++;
- mItemsDiscardedExpire++;
}
+ expired = i - overlimit;
}
- // we only indicate whether there's more to clean;
- // caller chooses whether to schedule further cleanup.
+ if (const size_t toErase = overlimit + expired;
+ toErase > 0) {
+ mItemsDiscardedCount += overlimit;
+ mItemsDiscardedExpire += expired;
+ mItemsDiscarded += toErase;
+ mItems.erase(mItems.begin(), mItems.begin() + toErase); // erase from front
+ }
return more;
}
-// process expirations in bite sized chunks, allowing new insertions through
-// runs in a pthread specifically started for this (which then exits)
-bool MediaAnalyticsService::processExpirations()
+void MediaAnalyticsService::processExpirations()
{
bool more;
do {
sleep(1);
- {
- Mutex::Autolock _l(mLock);
- more = expirations_l(NULL);
- if (!more) {
- break;
- }
- }
+ std::lock_guard _l(mLock);
+ more = expirations_l(nullptr);
} while (more);
- return true; // value is for std::future thread synchronization
}
-// insert appropriately into queue
-void MediaAnalyticsService::saveItem(MediaAnalyticsItem * item)
+void MediaAnalyticsService::saveItem(MediaAnalyticsItem *item)
{
-
- Mutex::Autolock _l(mLock);
- // mutex between insertion and dumping the contents
-
- // we want to dump 'in FIFO order', so insert at the end
- mItems.push_back(item);
-
- // clean old stuff from the queue
- bool more = expirations_l(item);
-
- // consider scheduling some asynchronous cleaning, if not running
- if (more) {
- if (!mExpireFuture.valid()
- || mExpireFuture.wait_for(std::chrono::seconds(0)) == std::future_status::ready) {
-
- mExpireFuture = std::async(std::launch::async, [this]()
- {return this->processExpirations();});
- }
+ std::lock_guard _l(mLock);
+ // we assume the items are roughly in time order.
+ mItems.emplace_back(item);
+ ++mItemsFinalized;
+ if (expirations_l(item)
+ && (!mExpireFuture.valid()
+ || mExpireFuture.wait_for(std::chrono::seconds(0)) == std::future_status::ready)) {
+ mExpireFuture = std::async(std::launch::async, [this] { processExpirations(); });
}
}
-static std::string allowedKeys[] =
+/* static */
+bool MediaAnalyticsService::isContentValid(const MediaAnalyticsItem *item, bool isTrusted)
{
- "audiopolicy",
- "audiorecord",
- "audiothread",
- "audiotrack",
- "codec",
- "extractor",
- "nuplayer",
-};
-
-static const int nAllowedKeys = sizeof(allowedKeys) / sizeof(allowedKeys[0]);
-
-// are the contents good
-bool MediaAnalyticsService::contentValid(MediaAnalyticsItem *item, bool isTrusted) {
-
+ if (isTrusted) return true;
// untrusted uids can only send us a limited set of keys
- if (isTrusted == false) {
- // restrict to a specific set of keys
- std::string key = item->getKey();
-
- size_t i;
- for(i = 0; i < nAllowedKeys; i++) {
- if (key == allowedKeys[i]) {
- break;
- }
- }
- if (i == nAllowedKeys) {
- ALOGD("Ignoring (key): %s", item->toString().c_str());
- return false;
+ const std::string &key = item->getKey();
+ for (const char *allowedKey : {
+ "audiopolicy",
+ "audiorecord",
+ "audiothread",
+ "audiotrack",
+ "codec",
+ "extractor",
+ "nuplayer",
+ }) {
+ if (key == allowedKey) {
+ return true;
}
}
-
- // internal consistency
-
- return true;
-}
-
-// are we rate limited, normally false
-bool MediaAnalyticsService::rateLimited(MediaAnalyticsItem *) {
-
+ ALOGD("%s: invalid key: %s", __func__, item->toString().c_str());
return false;
}
-// how long we hold package info before we re-fetch it
-#define PKG_EXPIRATION_NS (30*60*1000000000ll) // 30 minutes, in nsecs
+// are we rate limited, normally false
+bool MediaAnalyticsService::isRateLimited(MediaAnalyticsItem *) const
+{
+ return false;
+}
+
+// How long we hold package info before we re-fetch it
+constexpr nsecs_t PKG_EXPIRATION_NS = 30 * 60 * NANOS_PER_SECOND; // 30 minutes
// give me the package name, perhaps going to find it
// manages its own mutex operations internally
-void MediaAnalyticsService::setPkgInfo(MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion)
+void MediaAnalyticsService::UidInfo::setPkgInfo(
+ MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion)
{
- ALOGV("asking for packagename to go with uid=%d", uid);
+ ALOGV("%s: uid=%d", __func__, uid);
if (!setName && !setVersion) {
- // setting nothing? strange
- return;
+ return; // setting nothing? strange
}
- nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
- struct UidToPkgMap mapping;
- mapping.uid = (uid_t)(-1);
-
+ const nsecs_t now = systemTime(SYSTEM_TIME_REALTIME);
+ struct UidToPkgInfo mapping;
{
- Mutex::Autolock _l(mLock_mappings);
- int i = mPkgMappings.indexOfKey(uid);
- if (i >= 0) {
- mapping = mPkgMappings.valueAt(i);
- ALOGV("Expiration? uid %d expiration %" PRId64 " now %" PRId64,
- uid, mapping.expiration, now);
+ std::lock_guard _l(mUidInfoLock);
+ auto it = mPkgMappings.find(uid);
+ if (it != mPkgMappings.end()) {
+ mapping = it->second;
+ ALOGV("%s: uid %d expiration %lld now %lld",
+ __func__, uid, (long long)mapping.expiration, (long long)now);
if (mapping.expiration <= now) {
// purge the stale entry and fall into re-fetching
- ALOGV("entry for uid %d expired, now= %" PRId64 "", uid, now);
- mPkgMappings.removeItemsAt(i);
- mapping.uid = (uid_t)(-1);
+ ALOGV("%s: entry for uid %d expired, now %lld",
+ __func__, uid, (long long)now);
+ mPkgMappings.erase(it);
+ mapping.uid = (uid_t)-1; // this is always fully overwritten
}
}
}
@@ -632,115 +460,103 @@
// if we did not find it
if (mapping.uid == (uid_t)(-1)) {
std::string pkg;
- std::string installer = "";
+ std::string installer;
int64_t versionCode = 0;
- struct passwd *pw = getpwuid(uid);
+ const struct passwd *pw = getpwuid(uid);
if (pw) {
pkg = pw->pw_name;
}
- // find the proper value
-
- sp<IBinder> binder = NULL;
sp<IServiceManager> sm = defaultServiceManager();
- if (sm == NULL) {
- ALOGE("defaultServiceManager failed");
+ sp<content::pm::IPackageManagerNative> package_mgr;
+ if (sm.get() == nullptr) {
+ ALOGE("%s: Cannot find service manager", __func__);
} else {
- binder = sm->getService(String16("package_native"));
- if (binder == NULL) {
- ALOGE("getService package_native failed");
+ sp<IBinder> binder = sm->getService(String16("package_native"));
+ if (binder.get() == nullptr) {
+ ALOGE("%s: Cannot find package_native", __func__);
+ } else {
+ package_mgr = interface_cast<content::pm::IPackageManagerNative>(binder);
}
}
- if (binder != NULL) {
- sp<content::pm::IPackageManagerNative> package_mgr =
- interface_cast<content::pm::IPackageManagerNative>(binder);
- binder::Status status;
-
+ if (package_mgr != nullptr) {
std::vector<int> uids;
std::vector<std::string> names;
-
uids.push_back(uid);
-
- status = package_mgr->getNamesForUids(uids, &names);
+ binder::Status status = package_mgr->getNamesForUids(uids, &names);
if (!status.isOk()) {
- ALOGE("package_native::getNamesForUids failed: %s",
- status.exceptionMessage().c_str());
- } else {
- if (!names[0].empty()) {
- pkg = names[0].c_str();
- }
+ ALOGE("%s: getNamesForUids failed: %s",
+ __func__, status.exceptionMessage().c_str());
+ }
+ if (!names[0].empty()) {
+ pkg = names[0].c_str();
+ }
+ }
+
+ // strip any leading "shared:" strings that came back
+ if (pkg.compare(0, 7, "shared:") == 0) {
+ pkg.erase(0, 7);
+ }
+ // determine how pkg was installed and the versionCode
+ if (pkg.empty()) {
+ pkg = std::to_string(uid); // no name for us to manage
+ } else if (strchr(pkg.c_str(), '.') == NULL) {
+ // not of form 'com.whatever...'; assume internal and ok
+ } else if (strncmp(pkg.c_str(), "android.", 8) == 0) {
+ // android.* packages are assumed fine
+ } else if (package_mgr.get() != nullptr) {
+ String16 pkgName16(pkg.c_str());
+ binder::Status status = package_mgr->getInstallerForPackage(pkgName16, &installer);
+ if (!status.isOk()) {
+ ALOGE("%s: getInstallerForPackage failed: %s",
+ __func__, status.exceptionMessage().c_str());
}
- // strip any leading "shared:" strings that came back
- if (pkg.compare(0, 7, "shared:") == 0) {
- pkg.erase(0, 7);
- }
-
- // determine how pkg was installed and the versionCode
- //
- if (pkg.empty()) {
- // no name for us to manage
- } else if (strchr(pkg.c_str(), '.') == NULL) {
- // not of form 'com.whatever...'; assume internal and ok
- } else if (strncmp(pkg.c_str(), "android.", 8) == 0) {
- // android.* packages are assumed fine
- } else {
- String16 pkgName16(pkg.c_str());
- status = package_mgr->getInstallerForPackage(pkgName16, &installer);
+ // skip if we didn't get an installer
+ if (status.isOk()) {
+ status = package_mgr->getVersionCodeForPackage(pkgName16, &versionCode);
if (!status.isOk()) {
- ALOGE("package_native::getInstallerForPackage failed: %s",
- status.exceptionMessage().c_str());
- }
-
- // skip if we didn't get an installer
- if (status.isOk()) {
- status = package_mgr->getVersionCodeForPackage(pkgName16, &versionCode);
- if (!status.isOk()) {
- ALOGE("package_native::getVersionCodeForPackage failed: %s",
- status.exceptionMessage().c_str());
- }
- }
-
-
- ALOGV("package '%s' installed by '%s' versioncode %" PRId64 " / %" PRIx64,
- pkg.c_str(), installer.c_str(), versionCode, versionCode);
-
- if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
- // from play store, we keep info
- } else if (strncmp(installer.c_str(), "com.google.", 11) == 0) {
- // some google source, we keep info
- } else if (strcmp(installer.c_str(), "preload") == 0) {
- // preloads, we keep the info
- } else if (installer.c_str()[0] == '\0') {
- // sideload (no installer); do not report
- pkg = "";
- versionCode = 0;
- } else {
- // unknown installer; do not report
- pkg = "";
- versionCode = 0;
+ ALOGE("%s: getVersionCodeForPackage failed: %s",
+ __func__, status.exceptionMessage().c_str());
}
}
+
+ ALOGV("%s: package '%s' installed by '%s' versioncode %lld",
+ __func__, pkg.c_str(), installer.c_str(), (long long)versionCode);
+
+ if (strncmp(installer.c_str(), "com.android.", 12) == 0) {
+ // from play store, we keep info
+ } else if (strncmp(installer.c_str(), "com.google.", 11) == 0) {
+ // some google source, we keep info
+ } else if (strcmp(installer.c_str(), "preload") == 0) {
+ // preloads, we keep the info
+ } else if (installer.c_str()[0] == '\0') {
+ // sideload (no installer); report UID only
+ pkg = std::to_string(uid);
+ versionCode = 0;
+ } else {
+ // unknown installer; report UID only
+ pkg = std::to_string(uid);
+ versionCode = 0;
+ }
+ } else {
+ // unvalidated by package_mgr just send uid.
+ pkg = std::to_string(uid);
}
// add it to the map, to save a subsequent lookup
- if (!pkg.empty()) {
- Mutex::Autolock _l(mLock_mappings);
- ALOGV("Adding uid %d pkg '%s'", uid, pkg.c_str());
- ssize_t i = mPkgMappings.indexOfKey(uid);
- if (i < 0) {
- mapping.uid = uid;
- mapping.pkg = pkg;
- mapping.installer = installer.c_str();
- mapping.versionCode = versionCode;
- mapping.expiration = now + PKG_EXPIRATION_NS;
- ALOGV("expiration for uid %d set to %" PRId64 "", uid, mapping.expiration);
-
- mPkgMappings.add(uid, mapping);
- }
- }
+ std::lock_guard _l(mUidInfoLock);
+ // always overwrite
+ mapping.uid = uid;
+ mapping.pkg = std::move(pkg);
+ mapping.installer = std::move(installer);
+ mapping.versionCode = versionCode;
+ mapping.expiration = now + PKG_EXPIRATION_NS;
+ ALOGV("%s: adding uid %d pkg '%s' expiration: %lld",
+ __func__, uid, mapping.pkg.c_str(), (long long)mapping.expiration);
+ mPkgMappings[uid] = mapping;
}
if (mapping.uid != (uid_t)(-1)) {
diff --git a/services/mediaanalytics/MediaAnalyticsService.h b/services/mediaanalytics/MediaAnalyticsService.h
index 6c9cbaa..eb7d725 100644
--- a/services/mediaanalytics/MediaAnalyticsService.h
+++ b/services/mediaanalytics/MediaAnalyticsService.h
@@ -14,109 +14,110 @@
* limitations under the License.
*/
+#pragma once
-#ifndef ANDROID_MEDIAANALYTICSSERVICE_H
-#define ANDROID_MEDIAANALYTICSSERVICE_H
-
-#include <arpa/inet.h>
-
-#include <utils/threads.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/String8.h>
-#include <utils/List.h>
-
+#include <atomic>
+#include <deque>
#include <future>
+#include <mutex>
+#include <unordered_map>
+// IMediaAnalyticsService must include Vector, String16, Errors
#include <media/IMediaAnalyticsService.h>
+#include <utils/String8.h>
namespace android {
class MediaAnalyticsService : public BnMediaAnalyticsService
{
+public:
+ MediaAnalyticsService();
+ ~MediaAnalyticsService() override;
- public:
+ /**
+ * Submits the indicated record to the mediaanalytics service.
+ *
+ * \param item the item to submit.
+ * \return status failure, which is negative on binder transaction failure.
+ * As the transaction is one-way, remote failures will not be reported.
+ */
+ status_t submit(MediaAnalyticsItem *item) override {
+ return submitInternal(item, false /* release */);
+ }
- // on this side, caller surrenders ownership
- virtual int64_t submit(MediaAnalyticsItem *item, bool forcenew);
+ status_t dump(int fd, const Vector<String16>& args) override;
- static void instantiate();
- virtual status_t dump(int fd, const Vector<String16>& args);
+ static constexpr const char * const kServiceName = "media.metrics";
- MediaAnalyticsService();
- virtual ~MediaAnalyticsService();
+protected:
- bool processExpirations();
+ // Internal call where release is true if ownership of item is transferred
+ // to the service (that is, the service will eventually delete the item).
+ status_t submitInternal(MediaAnalyticsItem *item, bool release) override;
- private:
- MediaAnalyticsItem::SessionID_t generateUniqueSessionID();
+private:
+ void processExpirations();
+ // input validation after arrival from client
+ static bool isContentValid(const MediaAnalyticsItem *item, bool isTrusted);
+ bool isRateLimited(MediaAnalyticsItem *) const;
+ void saveItem(MediaAnalyticsItem *);
- // statistics about our analytics
- int64_t mItemsSubmitted;
- int64_t mItemsFinalized;
- int64_t mItemsDiscarded;
- int64_t mItemsDiscardedExpire;
- int64_t mItemsDiscardedCount;
- MediaAnalyticsItem::SessionID_t mLastSessionID;
+ // The following methods are GUARDED_BY(mLock)
+ bool expirations_l(MediaAnalyticsItem *);
- // partitioned a bit so we don't over serialize
- mutable Mutex mLock;
- mutable Mutex mLock_ids;
- mutable Mutex mLock_mappings;
+ // support for generating output
+ void dumpQueue_l(String8 &result, int dumpProto);
+ void dumpQueue_l(String8 &result, int dumpProto, nsecs_t, const char *only);
+ void dumpHeaders_l(String8 &result, int dumpProto, nsecs_t ts_since);
+ void dumpSummaries_l(String8 &result, int dumpProto, nsecs_t ts_since, const char * only);
+ void dumpRecent_l(String8 &result, int dumpProto, nsecs_t ts_since, const char * only);
+
+ // The following variables accessed without mLock
// limit how many records we'll retain
// by count (in each queue (open, finalized))
- int32_t mMaxRecords;
- // by time (none older than this long agan
- nsecs_t mMaxRecordAgeNs;
+ const size_t mMaxRecords;
+ // by time (none older than this)
+ const nsecs_t mMaxRecordAgeNs;
// max to expire per expirations_l() invocation
- int32_t mMaxRecordsExpiredAtOnce;
- //
- // # of sets of summaries
- int32_t mMaxRecordSets;
- // nsecs until we start a new record set
- nsecs_t mNewSetInterval;
+ const size_t mMaxRecordsExpiredAtOnce;
+ const int mDumpProtoDefault;
- // input validation after arrival from client
- bool contentValid(MediaAnalyticsItem *item, bool isTrusted);
- bool rateLimited(MediaAnalyticsItem *);
+ class UidInfo {
+ public:
+ void setPkgInfo(MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion);
- // (oldest at front) so it prints nicely for dumpsys
- List<MediaAnalyticsItem *> mItems;
- void saveItem(MediaAnalyticsItem *);
+ private:
+ std::mutex mUidInfoLock;
- bool expirations_l(MediaAnalyticsItem *);
- std::future<bool> mExpireFuture;
+ struct UidToPkgInfo {
+ uid_t uid = -1;
+ std::string pkg;
+ std::string installer;
+ int64_t versionCode = 0;
+ nsecs_t expiration = 0; // TODO: remove expiration.
+ };
- // support for generating output
- int mDumpProto;
- int mDumpProtoDefault;
- String8 dumpQueue();
- String8 dumpQueue(nsecs_t, const char *only);
+ // TODO: use concurrent hashmap with striped lock.
+ std::unordered_map<uid_t, struct UidToPkgInfo> mPkgMappings; // GUARDED_BY(mUidInfoLock)
+ } mUidInfo; // mUidInfo can be accessed without lock (locked internally)
- void dumpHeaders(String8 &result, nsecs_t ts_since);
- void dumpSummaries(String8 &result, nsecs_t ts_since, const char * only);
- void dumpRecent(String8 &result, nsecs_t ts_since, const char * only);
+ std::atomic<int64_t> mItemsSubmitted{}; // accessed outside of lock.
- // mapping uids to package names
- struct UidToPkgMap {
- uid_t uid;
- std::string pkg;
- std::string installer;
- int64_t versionCode;
- nsecs_t expiration;
- };
+ std::mutex mLock;
+ // statistics about our analytics
+ int64_t mItemsFinalized = 0; // GUARDED_BY(mLock)
+ int64_t mItemsDiscarded = 0; // GUARDED_BY(mLock)
+ int64_t mItemsDiscardedExpire = 0; // GUARDED_BY(mLock)
+ int64_t mItemsDiscardedCount = 0; // GUARDED_BY(mLock)
- KeyedVector<uid_t,struct UidToPkgMap> mPkgMappings;
- void setPkgInfo(MediaAnalyticsItem *item, uid_t uid, bool setName, bool setVersion);
+ // If we have a worker thread to garbage collect
+ std::future<void> mExpireFuture; // GUARDED_BY(mLock)
+ // Our item queue, generally (oldest at front)
+ // TODO: Make separate class, use segmented queue, write lock only end.
+ // Note: Another analytics module might have ownership of an item longer than the log.
+ std::deque<std::shared_ptr<const MediaAnalyticsItem>> mItems; // GUARDED_BY(mLock)
};
-// hook to send things off to the statsd subsystem
-extern bool dump2Statsd(MediaAnalyticsItem *item);
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
-#endif // ANDROID_MEDIAANALYTICSSERVICE_H
+} // namespace android
diff --git a/services/mediaanalytics/OWNERS b/services/mediaanalytics/OWNERS
index 9af258b..e37a1f8 100644
--- a/services/mediaanalytics/OWNERS
+++ b/services/mediaanalytics/OWNERS
@@ -1 +1,2 @@
essick@google.com
+hunga@google.com
diff --git a/services/mediaanalytics/iface_statsd.cpp b/services/mediaanalytics/iface_statsd.cpp
index 6845f06..e02c9cf 100644
--- a/services/mediaanalytics/iface_statsd.cpp
+++ b/services/mediaanalytics/iface_statsd.cpp
@@ -52,7 +52,7 @@
};
// keep this sorted, so we can do binary searches
-struct statsd_hooks statsd_handlers[] =
+static constexpr struct statsd_hooks statsd_handlers[] =
{
{ "audiopolicy", statsd_audiopolicy },
{ "audiorecord", statsd_audiorecord },
@@ -60,6 +60,7 @@
{ "audiotrack", statsd_audiotrack },
{ "codec", statsd_codec},
{ "drm.vendor.Google.WidevineCDM", statsd_widevineCDM },
+ { "drmmanager", statsd_drmmanager },
{ "extractor", statsd_extractor },
{ "mediadrm", statsd_mediadrm },
{ "nuplayer", statsd_nuplayer },
@@ -67,7 +68,6 @@
{ "recorder", statsd_recorder },
};
-
// give me a record, i'll look at the type and upload appropriately
bool dump2Statsd(MediaAnalyticsItem *item) {
if (item == NULL) return false;
@@ -80,10 +80,9 @@
return false;
}
- int i;
- for(i = 0;i < sizeof(statsd_handlers) / sizeof(statsd_handlers[0]) ; i++) {
- if (key == statsd_handlers[i].key) {
- return (*statsd_handlers[i].handler)(item);
+ for (const auto &statsd_handler : statsd_handlers) {
+ if (key == statsd_handler.key) {
+ return statsd_handler.handler(item);
}
}
return false;
diff --git a/services/mediaanalytics/iface_statsd.h b/services/mediaanalytics/iface_statsd.h
index f85d303..014929b 100644
--- a/services/mediaanalytics/iface_statsd.h
+++ b/services/mediaanalytics/iface_statsd.h
@@ -30,5 +30,6 @@
extern bool statsd_mediadrm(MediaAnalyticsItem *);
extern bool statsd_widevineCDM(MediaAnalyticsItem *);
+extern bool statsd_drmmanager(MediaAnalyticsItem *);
} // namespace android
diff --git a/services/mediaanalytics/main_mediametrics.cpp b/services/mediaanalytics/main_mediametrics.cpp
index 8020a03..6833fe2 100644
--- a/services/mediaanalytics/main_mediametrics.cpp
+++ b/services/mediaanalytics/main_mediametrics.cpp
@@ -16,33 +16,33 @@
#define LOG_TAG "mediametrics"
//#define LOG_NDEBUG 0
-
-#include <binder/IPCThreadState.h>
-#include <binder/ProcessState.h>
-#include <binder/IServiceManager.h>
#include <utils/Log.h>
-//#include "RegisterExtensions.h"
-// from LOCAL_C_INCLUDES
#include "MediaAnalyticsService.h"
-using namespace android;
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/ProcessState.h>
+
int main(int argc __unused, char **argv __unused)
{
+ using namespace android;
+
signal(SIGPIPE, SIG_IGN);
// to match the service name
// we're replacing "/system/bin/mediametrics" with "media.metrics"
// we add a ".", but discard the path components: we finish with a shorter string
- strcpy(argv[0], "media.metrics");
+ strcpy(argv[0], MediaAnalyticsService::kServiceName);
- sp<ProcessState> proc(ProcessState::self());
- sp<IServiceManager> sm(defaultServiceManager());
- ALOGI("ServiceManager: %p", sm.get());
+ defaultServiceManager()->addService(
+ String16(MediaAnalyticsService::kServiceName), new MediaAnalyticsService());
- MediaAnalyticsService::instantiate();
-
- ProcessState::self()->startThreadPool();
+ sp<ProcessState> processState(ProcessState::self());
+ // processState->setThreadPoolMaxThreadCount(8);
+ processState->startThreadPool();
IPCThreadState::self()->joinThreadPool();
+
+ return EXIT_SUCCESS;
}
diff --git a/services/mediaanalytics/statsd_audiopolicy.cpp b/services/mediaanalytics/statsd_audiopolicy.cpp
index 06c4dde..95cb274 100644
--- a/services/mediaanalytics/statsd_audiopolicy.cpp
+++ b/services/mediaanalytics/statsd_audiopolicy.cpp
@@ -60,14 +60,14 @@
metrics_proto.set_status(status);
}
//string char kAudioPolicyRqstSrc[] = "android.media.audiopolicy.rqst.src";
- char *rqst_src = NULL;
- if (item->getCString("android.media.audiopolicy.rqst.src", &rqst_src)) {
- metrics_proto.set_request_source(rqst_src);
+ std::string rqst_src;
+ if (item->getString("android.media.audiopolicy.rqst.src", &rqst_src)) {
+ metrics_proto.set_request_source(std::move(rqst_src));
}
//string char kAudioPolicyRqstPkg[] = "android.media.audiopolicy.rqst.pkg";
- char *rqst_pkg = NULL;
- if (item->getCString("android.media.audiopolicy.rqst.pkg", &rqst_pkg)) {
- metrics_proto.set_request_package(rqst_pkg);
+ std::string rqst_pkg;
+ if (item->getString("android.media.audiopolicy.rqst.pkg", &rqst_pkg)) {
+ metrics_proto.set_request_package(std::move(rqst_pkg));
}
//int32 char kAudioPolicyRqstSession[] = "android.media.audiopolicy.rqst.session";
int32_t rqst_session = -1;
@@ -75,20 +75,20 @@
metrics_proto.set_request_session(rqst_session);
}
//string char kAudioPolicyRqstDevice[] = "android.media.audiopolicy.rqst.device";
- char *rqst_device = NULL;
- if (item->getCString("android.media.audiopolicy.rqst.device", &rqst_device)) {
- metrics_proto.set_request_device(rqst_device);
+ std::string rqst_device;
+ if (item->getString("android.media.audiopolicy.rqst.device", &rqst_device)) {
+ metrics_proto.set_request_device(std::move(rqst_device));
}
//string char kAudioPolicyActiveSrc[] = "android.media.audiopolicy.active.src";
- char *active_src = NULL;
- if (item->getCString("android.media.audiopolicy.active.src", &active_src)) {
- metrics_proto.set_active_source(active_src);
+ std::string active_src;
+ if (item->getString("android.media.audiopolicy.active.src", &active_src)) {
+ metrics_proto.set_active_source(std::move(active_src));
}
//string char kAudioPolicyActivePkg[] = "android.media.audiopolicy.active.pkg";
- char *active_pkg = NULL;
- if (item->getCString("android.media.audiopolicy.active.pkg", &active_pkg)) {
- metrics_proto.set_active_package(active_pkg);
+ std::string active_pkg;
+ if (item->getString("android.media.audiopolicy.active.pkg", &active_pkg)) {
+ metrics_proto.set_active_package(std::move(active_pkg));
}
//int32 char kAudioPolicyActiveSession[] = "android.media.audiopolicy.active.session";
int32_t active_session = -1;
@@ -96,9 +96,9 @@
metrics_proto.set_active_session(active_session);
}
//string char kAudioPolicyActiveDevice[] = "android.media.audiopolicy.active.device";
- char *active_device = NULL;
- if (item->getCString("android.media.audiopolicy.active.device", &active_device)) {
- metrics_proto.set_active_device(active_device);
+ std::string active_device;
+ if (item->getString("android.media.audiopolicy.active.device", &active_device)) {
+ metrics_proto.set_active_device(std::move(active_device));
}
@@ -119,14 +119,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(rqst_src);
- free(rqst_pkg);
- free(rqst_device);
- free(active_src);
- free(active_pkg);
- free(active_device);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_audiorecord.cpp b/services/mediaanalytics/statsd_audiorecord.cpp
index c9edb27..7c7a62c 100644
--- a/services/mediaanalytics/statsd_audiorecord.cpp
+++ b/services/mediaanalytics/statsd_audiorecord.cpp
@@ -54,14 +54,14 @@
// flesh out the protobuf we'll hand off with our data
//
- char *encoding = NULL;
- if (item->getCString("android.media.audiorecord.encoding", &encoding)) {
- metrics_proto.set_encoding(encoding);
+ std::string encoding;
+ if (item->getString("android.media.audiorecord.encoding", &encoding)) {
+ metrics_proto.set_encoding(std::move(encoding));
}
- char *source = NULL;
- if (item->getCString("android.media.audiorecord.source", &source)) {
- metrics_proto.set_source(source);
+ std::string source;
+ if (item->getString("android.media.audiorecord.source", &source)) {
+ metrics_proto.set_source(std::move(source));
}
int32_t latency = -1;
@@ -101,11 +101,11 @@
metrics_proto.set_error_code(errcode);
}
- char *errfunc = NULL;
- if (item->getCString("android.media.audiorecord.errfunc", &errfunc)) {
- metrics_proto.set_error_function(errfunc);
- } else if (item->getCString("android.media.audiorecord.lastError.at", &errfunc)) {
- metrics_proto.set_error_function(errfunc);
+ std::string errfunc;
+ if (item->getString("android.media.audiorecord.errfunc", &errfunc)) {
+ metrics_proto.set_error_function(std::move(errfunc));
+ } else if (item->getString("android.media.audiorecord.lastError.at", &errfunc)) {
+ metrics_proto.set_error_function(std::move(errfunc));
}
// portId (int32)
@@ -119,9 +119,9 @@
metrics_proto.set_frame_count(frameCount);
}
// attributes (string)
- char *attributes = NULL;
- if (item->getCString("android.media.audiorecord.attributes", &attributes)) {
- metrics_proto.set_attributes(attributes);
+ std::string attributes;
+ if (item->getString("android.media.audiorecord.attributes", &attributes)) {
+ metrics_proto.set_attributes(std::move(attributes));
}
// channelMask (int64)
int64_t channelMask = -1;
@@ -152,12 +152,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(encoding);
- free(source);
- free(errfunc);
- free(attributes);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_audiothread.cpp b/services/mediaanalytics/statsd_audiothread.cpp
index 8232424..e9d6b17 100644
--- a/services/mediaanalytics/statsd_audiothread.cpp
+++ b/services/mediaanalytics/statsd_audiothread.cpp
@@ -56,9 +56,9 @@
// flesh out the protobuf we'll hand off with our data
//
- char *mytype = NULL;
- if (item->getCString(MM_PREFIX "type", &mytype)) {
- metrics_proto.set_type(mytype);
+ std::string mytype;
+ if (item->getString(MM_PREFIX "type", &mytype)) {
+ metrics_proto.set_type(std::move(mytype));
}
int32_t framecount = -1;
if (item->getInt32(MM_PREFIX "framecount", &framecount)) {
@@ -68,17 +68,17 @@
if (item->getInt32(MM_PREFIX "samplerate", &samplerate)) {
metrics_proto.set_samplerate(samplerate);
}
- char *workhist = NULL;
- if (item->getCString(MM_PREFIX "workMs.hist", &workhist)) {
- metrics_proto.set_work_millis_hist(workhist);
+ std::string workhist;
+ if (item->getString(MM_PREFIX "workMs.hist", &workhist)) {
+ metrics_proto.set_work_millis_hist(std::move(workhist));
}
- char *latencyhist = NULL;
- if (item->getCString(MM_PREFIX "latencyMs.hist", &latencyhist)) {
- metrics_proto.set_latency_millis_hist(latencyhist);
+ std::string latencyhist;
+ if (item->getString(MM_PREFIX "latencyMs.hist", &latencyhist)) {
+ metrics_proto.set_latency_millis_hist(std::move(latencyhist));
}
- char *warmuphist = NULL;
- if (item->getCString(MM_PREFIX "warmupMs.hist", &warmuphist)) {
- metrics_proto.set_warmup_millis_hist(warmuphist);
+ std::string warmuphist;
+ if (item->getString(MM_PREFIX "warmupMs.hist", &warmuphist)) {
+ metrics_proto.set_warmup_millis_hist(std::move(warmuphist));
}
int64_t underruns = -1;
if (item->getInt64(MM_PREFIX "underruns", &underruns)) {
@@ -108,9 +108,9 @@
metrics_proto.set_port_id(port_id);
}
// item->setCString(MM_PREFIX "type", threadTypeToString(mType));
- char *type = NULL;
- if (item->getCString(MM_PREFIX "type", &type)) {
- metrics_proto.set_type(type);
+ std::string type;
+ if (item->getString(MM_PREFIX "type", &type)) {
+ metrics_proto.set_type(std::move(type));
}
// item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
int32_t sample_rate = -1;
@@ -123,9 +123,9 @@
metrics_proto.set_channel_mask(channel_mask);
}
// item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
- char *encoding = NULL;
- if (item->getCString(MM_PREFIX "encoding", &encoding)) {
- metrics_proto.set_encoding(encoding);
+ std::string encoding;
+ if (item->getString(MM_PREFIX "encoding", &encoding)) {
+ metrics_proto.set_encoding(std::move(encoding));
}
// item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
int32_t frame_count = -1;
@@ -133,14 +133,14 @@
metrics_proto.set_frame_count(frame_count);
}
// item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
- char *outDevice = NULL;
- if (item->getCString(MM_PREFIX "outDevice", &outDevice)) {
- metrics_proto.set_output_device(outDevice);
+ std::string outDevice;
+ if (item->getString(MM_PREFIX "outDevice", &outDevice)) {
+ metrics_proto.set_output_device(std::move(outDevice));
}
// item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
- char *inDevice = NULL;
- if (item->getCString(MM_PREFIX "inDevice", &inDevice)) {
- metrics_proto.set_input_device(inDevice);
+ std::string inDevice;
+ if (item->getString(MM_PREFIX "inDevice", &inDevice)) {
+ metrics_proto.set_input_device(std::move(inDevice));
}
// item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
double iojitters_ms_mean = -1;
@@ -201,16 +201,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(mytype);
- free(workhist);
- free(latencyhist);
- free(warmuphist);
- free(type);
- free(encoding);
- free(inDevice);
- free(outDevice);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_audiotrack.cpp b/services/mediaanalytics/statsd_audiotrack.cpp
index f250ced..57cda99 100644
--- a/services/mediaanalytics/statsd_audiotrack.cpp
+++ b/services/mediaanalytics/statsd_audiotrack.cpp
@@ -57,23 +57,23 @@
// static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
// optional string streamType;
- char *streamtype = NULL;
- if (item->getCString("android.media.audiotrack.streamtype", &streamtype)) {
- metrics_proto.set_stream_type(streamtype);
+ std::string streamtype;
+ if (item->getString("android.media.audiotrack.streamtype", &streamtype)) {
+ metrics_proto.set_stream_type(std::move(streamtype));
}
// static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
// optional string contentType;
- char *contenttype = NULL;
- if (item->getCString("android.media.audiotrack.type", &contenttype)) {
- metrics_proto.set_content_type(contenttype);
+ std::string contenttype;
+ if (item->getString("android.media.audiotrack.type", &contenttype)) {
+ metrics_proto.set_content_type(std::move(contenttype));
}
// static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
// optional string trackUsage;
- char *trackusage = NULL;
- if (item->getCString("android.media.audiotrack.usage", &trackusage)) {
- metrics_proto.set_track_usage(trackusage);
+ std::string trackusage;
+ if (item->getString("android.media.audiotrack.usage", &trackusage)) {
+ metrics_proto.set_track_usage(std::move(trackusage));
}
// static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
@@ -111,9 +111,9 @@
metrics_proto.set_port_id(port_id);
}
// encoding (string)
- char *encoding = NULL;
- if (item->getCString("android.media.audiotrack.encoding", &encoding)) {
- metrics_proto.set_encoding(encoding);
+ std::string encoding;
+ if (item->getString("android.media.audiotrack.encoding", &encoding)) {
+ metrics_proto.set_encoding(std::move(encoding));
}
// frameCount (int32)
int32_t frame_count = -1;
@@ -121,9 +121,9 @@
metrics_proto.set_frame_count(frame_count);
}
// attributes (string)
- char *attributes = NULL;
- if (item->getCString("android.media.audiotrack.attributes", &attributes)) {
- metrics_proto.set_attributes(attributes);
+ std::string attributes;
+ if (item->getString("android.media.audiotrack.attributes", &attributes)) {
+ metrics_proto.set_attributes(std::move(attributes));
}
std::string serialized;
@@ -143,13 +143,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(streamtype);
- free(contenttype);
- free(trackusage);
- free(encoding);
- free(attributes);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_codec.cpp b/services/mediaanalytics/statsd_codec.cpp
index dc8e4ef..bf82e50 100644
--- a/services/mediaanalytics/statsd_codec.cpp
+++ b/services/mediaanalytics/statsd_codec.cpp
@@ -55,19 +55,19 @@
// flesh out the protobuf we'll hand off with our data
//
// android.media.mediacodec.codec string
- char *codec = NULL;
- if (item->getCString("android.media.mediacodec.codec", &codec)) {
- metrics_proto.set_codec(codec);
+ std::string codec;
+ if (item->getString("android.media.mediacodec.codec", &codec)) {
+ metrics_proto.set_codec(std::move(codec));
}
// android.media.mediacodec.mime string
- char *mime = NULL;
- if (item->getCString("android.media.mediacodec.mime", &mime)) {
- metrics_proto.set_mime(mime);
+ std::string mime;
+ if (item->getString("android.media.mediacodec.mime", &mime)) {
+ metrics_proto.set_mime(std::move(mime));
}
// android.media.mediacodec.mode string
- char *mode = NULL;
- if ( item->getCString("android.media.mediacodec.mode", &mode)) {
- metrics_proto.set_mode(mode);
+ std::string mode;
+ if ( item->getString("android.media.mediacodec.mode", &mode)) {
+ metrics_proto.set_mode(std::move(mode));
}
// android.media.mediacodec.encoder int32
int32_t encoder = -1;
@@ -125,9 +125,9 @@
metrics_proto.set_error_code(errcode);
}
// android.media.mediacodec.errstate string
- char *errstate = NULL;
- if ( item->getCString("android.media.mediacodec.errstate", &errstate)) {
- metrics_proto.set_error_state(errstate);
+ std::string errstate;
+ if ( item->getString("android.media.mediacodec.errstate", &errstate)) {
+ metrics_proto.set_error_state(std::move(errstate));
}
// android.media.mediacodec.latency.max int64
int64_t latency_max = -1;
@@ -173,12 +173,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(codec);
- free(mime);
- free(mode);
- free(errstate);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_drm.cpp b/services/mediaanalytics/statsd_drm.cpp
index 902483a..845383d 100644
--- a/services/mediaanalytics/statsd_drm.cpp
+++ b/services/mediaanalytics/statsd_drm.cpp
@@ -104,4 +104,38 @@
return true;
}
+// drmmanager
+bool statsd_drmmanager(MediaAnalyticsItem *item)
+{
+ if (item == NULL) return false;
+
+ nsecs_t timestamp = item->getTimestamp();
+ std::string pkgName = item->getPkgName();
+ int64_t pkgVersionCode = item->getPkgVersionCode();
+ int64_t mediaApexVersion = 0;
+
+ char *plugin_id = NULL;
+ (void) item->getCString("plugin_id", &plugin_id);
+ char *description = NULL;
+ (void) item->getCString("description", &description);
+ int32_t method_id = -1;
+ (void) item->getInt32("method_id", &method_id);
+ char *mime_types = NULL;
+ (void) item->getCString("mime_types", &mime_types);
+
+ if (enabled_statsd) {
+ android::util::stats_write(android::util::MEDIAMETRICS_DRMMANAGER_REPORTED,
+ timestamp, pkgName.c_str(), pkgVersionCode,
+ mediaApexVersion,
+ plugin_id, description,
+ method_id, mime_types);
+ } else {
+ ALOGV("NOT sending: drmmanager data");
+ }
+
+ free(plugin_id);
+ free(description);
+ free(mime_types);
+ return true;
+}
} // namespace android
diff --git a/services/mediaanalytics/statsd_extractor.cpp b/services/mediaanalytics/statsd_extractor.cpp
index 395c912..d84930c 100644
--- a/services/mediaanalytics/statsd_extractor.cpp
+++ b/services/mediaanalytics/statsd_extractor.cpp
@@ -56,14 +56,14 @@
//
// android.media.mediaextractor.fmt string
- char *fmt = NULL;
- if (item->getCString("android.media.mediaextractor.fmt", &fmt)) {
- metrics_proto.set_format(fmt);
+ std::string fmt;
+ if (item->getString("android.media.mediaextractor.fmt", &fmt)) {
+ metrics_proto.set_format(std::move(fmt));
}
// android.media.mediaextractor.mime string
- char *mime = NULL;
- if (item->getCString("android.media.mediaextractor.mime", &mime)) {
- metrics_proto.set_mime(mime);
+ std::string mime;
+ if (item->getString("android.media.mediaextractor.mime", &mime)) {
+ metrics_proto.set_mime(std::move(mime));
}
// android.media.mediaextractor.ntrk int32
int32_t ntrk = -1;
@@ -88,10 +88,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(fmt);
- free(mime);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_nuplayer.cpp b/services/mediaanalytics/statsd_nuplayer.cpp
index 5ec118a..e6e0f2c 100644
--- a/services/mediaanalytics/statsd_nuplayer.cpp
+++ b/services/mediaanalytics/statsd_nuplayer.cpp
@@ -62,13 +62,13 @@
// differentiate between nuplayer and nuplayer2
metrics_proto.set_whichplayer(item->getKey().c_str());
- char *video_mime = NULL;
- if (item->getCString("android.media.mediaplayer.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(video_mime);
+ std::string video_mime;
+ if (item->getString("android.media.mediaplayer.video.mime", &video_mime)) {
+ metrics_proto.set_video_mime(std::move(video_mime));
}
- char *video_codec = NULL;
- if (item->getCString("android.media.mediaplayer.video.codec", &video_codec)) {
- metrics_proto.set_video_codec(video_codec);
+ std::string video_codec;
+ if (item->getString("android.media.mediaplayer.video.codec", &video_codec)) {
+ metrics_proto.set_video_codec(std::move(video_codec));
}
int32_t width = -1;
@@ -97,13 +97,13 @@
metrics_proto.set_framerate(fps);
}
- char *audio_mime = NULL;
- if (item->getCString("android.media.mediaplayer.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(audio_mime);
+ std::string audio_mime;
+ if (item->getString("android.media.mediaplayer.audio.mime", &audio_mime)) {
+ metrics_proto.set_audio_mime(std::move(audio_mime));
}
- char *audio_codec = NULL;
- if (item->getCString("android.media.mediaplayer.audio.codec", &audio_codec)) {
- metrics_proto.set_audio_codec(audio_codec);
+ std::string audio_codec;
+ if (item->getString("android.media.mediaplayer.audio.codec", &audio_codec)) {
+ metrics_proto.set_audio_codec(std::move(audio_codec));
}
int64_t duration_ms = -1;
@@ -123,14 +123,14 @@
if (item->getInt32("android.media.mediaplayer.errcode", &error_code)) {
metrics_proto.set_error_code(error_code);
}
- char *error_state = NULL;
- if (item->getCString("android.media.mediaplayer.errstate", &error_state)) {
- metrics_proto.set_error_state(error_state);
+ std::string error_state;
+ if (item->getString("android.media.mediaplayer.errstate", &error_state)) {
+ metrics_proto.set_error_state(std::move(error_state));
}
- char *data_source_type = NULL;
- if (item->getCString("android.media.mediaplayer.dataSource", &data_source_type)) {
- metrics_proto.set_data_source_type(data_source_type);
+ std::string data_source_type;
+ if (item->getString("android.media.mediaplayer.dataSource", &data_source_type)) {
+ metrics_proto.set_data_source_type(std::move(data_source_type));
}
int64_t rebufferingMs = -1;
@@ -164,14 +164,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(video_mime);
- free(video_codec);
- free(audio_mime);
- free(audio_codec);
- free(error_state);
- free(data_source_type);
-
return true;
}
diff --git a/services/mediaanalytics/statsd_recorder.cpp b/services/mediaanalytics/statsd_recorder.cpp
index 4d981b4..d286f00 100644
--- a/services/mediaanalytics/statsd_recorder.cpp
+++ b/services/mediaanalytics/statsd_recorder.cpp
@@ -56,14 +56,14 @@
//
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
- char *audio_mime = NULL;
- if (item->getCString("android.media.mediarecorder.audio.mime", &audio_mime)) {
- metrics_proto.set_audio_mime(audio_mime);
+ std::string audio_mime;
+ if (item->getString("android.media.mediarecorder.audio.mime", &audio_mime)) {
+ metrics_proto.set_audio_mime(std::move(audio_mime));
}
// string kRecorderVideoMime = "android.media.mediarecorder.video.mime";
- char *video_mime = NULL;
- if (item->getCString("android.media.mediarecorder.video.mime", &video_mime)) {
- metrics_proto.set_video_mime(video_mime);
+ std::string video_mime;
+ if (item->getString("android.media.mediarecorder.video.mime", &video_mime)) {
+ metrics_proto.set_video_mime(std::move(video_mime));
}
// int32 kRecorderVideoProfile = "android.media.mediarecorder.video-encoder-profile";
int32_t videoProfile = -1;
@@ -183,10 +183,6 @@
ALOGV("NOT sending: private data (len=%zu)", strlen(serialized.c_str()));
}
- // must free the strings that we were given
- free(audio_mime);
- free(video_mime);
-
return true;
}
diff --git a/services/mediaanalytics/tests/Android.bp b/services/mediaanalytics/tests/Android.bp
new file mode 100644
index 0000000..7bca1c4
--- /dev/null
+++ b/services/mediaanalytics/tests/Android.bp
@@ -0,0 +1,25 @@
+cc_test {
+ name: "mediametrics_tests",
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+
+ include_dirs: [
+ "frameworks/av/services/mediaanalytics",
+ ],
+
+ shared_libs: [
+ "libbinder",
+ "liblog",
+ "libmediaanalyticsservice",
+ "libmediametrics",
+ "libutils",
+ ],
+
+ srcs: [
+ "mediametrics_tests.cpp",
+ ],
+}
diff --git a/services/mediaanalytics/tests/build_and_run_all_unit_tests.sh b/services/mediaanalytics/tests/build_and_run_all_unit_tests.sh
new file mode 100755
index 0000000..2511c30
--- /dev/null
+++ b/services/mediaanalytics/tests/build_and_run_all_unit_tests.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Run tests in this directory.
+#
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+ echo "Android build environment not set"
+ exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+mm
+
+echo "waiting for device"
+
+adb root && adb wait-for-device remount
+
+echo "========================================"
+
+echo "testing mediametrics"
+adb push $OUT/data/nativetest/mediametrics_tests/mediametrics_tests /system/bin
+adb shell /system/bin/mediametrics_tests
diff --git a/services/mediaanalytics/tests/mediametrics_tests.cpp b/services/mediaanalytics/tests/mediametrics_tests.cpp
new file mode 100644
index 0000000..7a6f5a4
--- /dev/null
+++ b/services/mediaanalytics/tests/mediametrics_tests.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "mediametrics_tests"
+#include <utils/Log.h>
+
+#include "MediaAnalyticsService.h"
+
+#include <stdio.h>
+
+#include <gtest/gtest.h>
+#include <media/MediaAnalyticsItem.h>
+
+using namespace android;
+
+TEST(mediametrics_tests, instantiate) {
+ sp mediaMetrics = new MediaAnalyticsService();
+ status_t status;
+
+ // random keys ignored when empty
+ std::unique_ptr<MediaAnalyticsItem> random_key(MediaAnalyticsItem::create("random_key"));
+ status = mediaMetrics->submit(random_key.get());
+ ASSERT_EQ(PERMISSION_DENIED, status);
+
+ // random keys ignored with data
+ random_key->setInt32("foo", 10);
+ status = mediaMetrics->submit(random_key.get());
+ ASSERT_EQ(PERMISSION_DENIED, status);
+
+ // known keys ignored if empty
+ std::unique_ptr<MediaAnalyticsItem> audiotrack_key(MediaAnalyticsItem::create("audiotrack"));
+ status = mediaMetrics->submit(audiotrack_key.get());
+ ASSERT_EQ(BAD_VALUE, status);
+
+ // known keys not ignored if not empty
+ audiotrack_key->addInt32("foo", 10);
+ status = mediaMetrics->submit(audiotrack_key.get());
+ ASSERT_EQ(NO_ERROR, status);
+
+
+ /*
+ // fluent style that goes directly to mediametrics
+ ASSERT_EQ(true, MediaAnalyticsItem("audiorecord")
+ .setInt32("value", 2)
+ .addInt32("bar", 1)
+ .addInt32("value", 3)
+ .selfrecord());
+ */
+
+ mediaMetrics->dump(fileno(stdout), {} /* args */);
+}
+
+TEST(mediametrics_tests, item_manipulation) {
+ MediaAnalyticsItem item("audiorecord");
+
+ item.setInt32("value", 2).addInt32("bar", 3).addInt32("value", 4);
+
+ int32_t i32;
+ ASSERT_TRUE(item.getInt32("value", &i32));
+ ASSERT_EQ(6, i32);
+
+ ASSERT_TRUE(item.getInt32("bar", &i32));
+ ASSERT_EQ(3, i32);
+
+ item.setInt64("big", INT64_MAX).setInt64("smaller", INT64_MAX - 1).addInt64("smaller", -2);
+
+ int64_t i64;
+ ASSERT_TRUE(item.getInt64("big", &i64));
+ ASSERT_EQ(INT64_MAX, i64);
+
+ ASSERT_TRUE(item.getInt64("smaller", &i64));
+ ASSERT_EQ(INT64_MAX - 3, i64);
+
+ item.setDouble("precise", 10.5).setDouble("small", 0.125).addDouble("precise", 0.25);
+
+ double d;
+ ASSERT_TRUE(item.getDouble("precise", &d));
+ ASSERT_EQ(10.75, d);
+
+ ASSERT_TRUE(item.getDouble("small", &d));
+ ASSERT_EQ(0.125, d);
+
+ char *s;
+ item.setCString("name", "Frank").setCString("mother", "June").setCString("mother", "July");
+ ASSERT_TRUE(item.getCString("name", &s));
+ ASSERT_EQ(0, strcmp(s, "Frank"));
+ free(s);
+
+ ASSERT_TRUE(item.getCString("mother", &s));
+ ASSERT_EQ(0, strcmp(s, "July")); // "July" overwrites "June"
+ free(s);
+
+ item.setRate("burgersPerHour", 5, 2);
+ int64_t b, h;
+ ASSERT_TRUE(item.getRate("burgersPerHour", &b, &h, &d));
+ ASSERT_EQ(5, b);
+ ASSERT_EQ(2, h);
+ ASSERT_EQ(2.5, d);
+
+ item.addRate("burgersPerHour", 4, 2);
+ ASSERT_TRUE(item.getRate("burgersPerHour", &b, &h, &d));
+ ASSERT_EQ(9, b);
+ ASSERT_EQ(4, h);
+ ASSERT_EQ(2.25, d);
+
+ printf("item: %s\n", item.toString().c_str());
+ fflush(stdout);
+
+ sp mediaMetrics = new MediaAnalyticsService();
+ status_t status = mediaMetrics->submit(&item);
+ ASSERT_EQ(NO_ERROR, status);
+ mediaMetrics->dump(fileno(stdout), {} /* args */);
+}
+
+TEST(mediametrics_tests, superbig_item) {
+ MediaAnalyticsItem item("TheBigOne");
+ constexpr size_t count = 10000;
+
+ for (size_t i = 0; i < count; ++i) {
+ item.setInt32(std::to_string(i).c_str(), i);
+ }
+ for (size_t i = 0; i < count; ++i) {
+ int32_t i32;
+ ASSERT_TRUE(item.getInt32(std::to_string(i).c_str(), &i32));
+ ASSERT_EQ((int32_t)i, i32);
+ }
+}
+
+TEST(mediametrics_tests, superbig_item_removal) {
+ MediaAnalyticsItem item("TheOddBigOne");
+ constexpr size_t count = 10000;
+
+ for (size_t i = 0; i < count; ++i) {
+ item.setInt32(std::to_string(i).c_str(), i);
+ }
+ for (size_t i = 0; i < count; i += 2) {
+ item.filter(std::to_string(i).c_str()); // filter out all the evens.
+ }
+ for (size_t i = 0; i < count; ++i) {
+ int32_t i32;
+ if (i & 1) { // check to see that only the odds are left.
+ ASSERT_TRUE(item.getInt32(std::to_string(i).c_str(), &i32));
+ ASSERT_EQ((int32_t)i, i32);
+ } else {
+ ASSERT_FALSE(item.getInt32(std::to_string(i).c_str(), &i32));
+ }
+ }
+}
+
+TEST(mediametrics_tests, item_transmutation) {
+ MediaAnalyticsItem item("Alchemist's Stone");
+
+ item.setInt64("convert", 123);
+ int64_t i64;
+ ASSERT_TRUE(item.getInt64("convert", &i64));
+ ASSERT_EQ(123, i64);
+
+ item.addInt32("convert", 2); // changes type of 'convert' from i64 to i32 (and re-init).
+ ASSERT_FALSE(item.getInt64("convert", &i64)); // should be false, no value in i64.
+
+ int32_t i32;
+ ASSERT_TRUE(item.getInt32("convert", &i32)); // check it is i32 and 2 (123 is discarded).
+ ASSERT_EQ(2, i32);
+}
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 99a6d6b..36042a4 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -10,8 +10,6 @@
"libavservices_minijail",
"libbase",
"libhidlbase",
- "libhidltransport",
- "libhwbinder",
"liblog",
"libmedia_codecserviceregistrant",
],
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index ecc8408..d878d72 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -39,9 +39,7 @@
libbase \
libavservices_minijail_vendor \
libcutils \
- libhwbinder \
libhidlbase \
- libhidltransport \
libstagefright_omx \
libstagefright_xmlparser \
android.hardware.media.omx@1.0 \
diff --git a/services/mediacodec/registrant/Android.bp b/services/mediacodec/registrant/Android.bp
index e3893e5..fa5bc4a 100644
--- a/services/mediacodec/registrant/Android.bp
+++ b/services/mediacodec/registrant/Android.bp
@@ -42,8 +42,8 @@
"libcodec2_soft_opusenc",
"libcodec2_soft_vp8dec",
"libcodec2_soft_vp9dec",
- "libcodec2_soft_av1dec",
- "libcodec2_soft_gav1dec",
+ // "libcodec2_soft_av1dec_aom", // replaced by the gav1 implementation
+ "libcodec2_soft_av1dec_gav1",
"libcodec2_soft_vp8enc",
"libcodec2_soft_vp9enc",
"libcodec2_soft_rawdec",
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 227a29d..72d42ae 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -20,14 +20,18 @@
MediaDrmService.cpp \
main_mediadrmserver.cpp
+LOCAL_HEADER_LIBRARIES:= \
+ libmedia_headers \
+ libmediadrm_headers
+
LOCAL_SHARED_LIBRARIES:= \
libbinder \
liblog \
+ libmedia \
libmediadrm \
libutils \
libhidlbase \
libhidlmemory \
- libhidltransport \
android.hardware.drm@1.0 \
android.hardware.drm@1.1 \
android.hardware.drm@1.2
diff --git a/services/mediaextractor/Android.bp b/services/mediaextractor/Android.bp
index 98cc69f..828e89a 100644
--- a/services/mediaextractor/Android.bp
+++ b/services/mediaextractor/Android.bp
@@ -8,6 +8,7 @@
srcs: ["MediaExtractorService.cpp"],
shared_libs: [
+ "libdatasource",
"libmedia",
"libstagefright",
"libbinder",
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 36e084b..6239fb2 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -20,8 +20,8 @@
#include <utils/Vector.h>
+#include <datasource/DataSourceFactory.h>
#include <media/DataSource.h>
-#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/RemoteDataSource.h>
@@ -55,7 +55,7 @@
sp<IDataSource> MediaExtractorService::makeIDataSource(int fd, int64_t offset, int64_t length)
{
- sp<DataSource> source = DataSourceFactory::CreateFromFd(fd, offset, length);
+ sp<DataSource> source = DataSourceFactory::getInstance()->CreateFromFd(fd, offset, length);
return CreateIDataSourceFromDataSource(source);
}
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
index bee5d25..74b63d5 100644
--- a/services/medialog/Android.bp
+++ b/services/medialog/Android.bp
@@ -6,6 +6,10 @@
"MediaLogService.cpp",
],
+ header_libs: [
+ "libmedia_headers",
+ ],
+
shared_libs: [
"libaudioutils",
"libbinder",
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index f3339a0..d468406 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -23,4 +23,6 @@
"-Wall",
],
+ export_include_dirs: ["."],
+
}
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 5a52b3d..ae832c7 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -23,6 +23,7 @@
#include <binder/IServiceManager.h>
#include <cutils/sched_policy.h>
#include <dirent.h>
+#include <media/MediaResourcePolicy.h>
#include <media/stagefright/ProcessInfo.h>
#include <mediautils/BatteryNotifier.h>
#include <mediautils/SchedulingPolicyService.h>
@@ -37,7 +38,7 @@
namespace android {
-namespace {
+namespace media {
class DeathNotifier : public IBinder::DeathRecipient {
public:
@@ -60,20 +61,18 @@
int64_t mClientId;
};
-} // namespace
-
template <typename T>
-static String8 getString(const Vector<T> &items) {
+static String8 getString(const std::vector<T> &items) {
String8 itemsStr;
for (size_t i = 0; i < items.size(); ++i) {
- itemsStr.appendFormat("%s ", items[i].toString().string());
+ itemsStr.appendFormat("%s ", toString(items[i]).string());
}
return itemsStr;
}
static bool hasResourceType(MediaResource::Type type, const ResourceList& resources) {
for (auto it = resources.begin(); it != resources.end(); it++) {
- if (it->second.mType == type) {
+ if (it->second.type == type) {
return true;
}
}
@@ -121,15 +120,15 @@
return infos.editValueAt(index);
}
-static void notifyResourceGranted(int pid, const Vector<MediaResource> &resources) {
+static void notifyResourceGranted(int pid, const std::vector<MediaResourceParcel> &resources) {
static const char* const kServiceName = "media_resource_monitor";
sp<IBinder> binder = defaultServiceManager()->checkService(String16(kServiceName));
if (binder != NULL) {
sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder);
for (size_t i = 0; i < resources.size(); ++i) {
- if (resources[i].mSubType == MediaResource::kAudioCodec) {
+ if (resources[i].subType == MediaResource::SubType::kAudioCodec) {
service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_AUDIO_CODEC);
- } else if (resources[i].mSubType == MediaResource::kVideoCodec) {
+ } else if (resources[i].subType == MediaResource::SubType::kVideoCodec) {
service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_VIDEO_CODEC);
}
}
@@ -182,13 +181,18 @@
snprintf(buffer, SIZE, " Id: %lld\n", (long long)infos[j].clientId);
result.append(buffer);
- snprintf(buffer, SIZE, " Name: %s\n", infos[j].client->getName().string());
+ std::string clientName;
+ Status status = infos[j].client->getName(&clientName);
+ if (!status.isOk()) {
+ clientName = "<unknown client>";
+ }
+ snprintf(buffer, SIZE, " Name: %s\n", clientName.c_str());
result.append(buffer);
const ResourceList &resources = infos[j].resources;
result.append(" Resources:\n");
for (auto it = resources.begin(); it != resources.end(); it++) {
- snprintf(buffer, SIZE, " %s\n", it->second.toString().string());
+ snprintf(buffer, SIZE, " %s\n", toString(it->second).string());
result.append(buffer);
}
}
@@ -200,74 +204,120 @@
return OK;
}
-ResourceManagerService::ResourceManagerService()
- : ResourceManagerService(new ProcessInfo()) {}
+struct SystemCallbackImpl :
+ public ResourceManagerService::SystemCallbackInterface {
+ SystemCallbackImpl() {}
-ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo)
+ virtual void noteStartVideo(int uid) override {
+ BatteryNotifier::getInstance().noteStartVideo(uid);
+ }
+ virtual void noteStopVideo(int uid) override {
+ BatteryNotifier::getInstance().noteStopVideo(uid);
+ }
+ virtual void noteResetVideo() override {
+ BatteryNotifier::getInstance().noteResetVideo();
+ }
+ virtual bool requestCpusetBoost(
+ bool enable, const sp<IInterface> &client) override {
+ return android::requestCpusetBoost(enable, client);
+ }
+
+protected:
+ virtual ~SystemCallbackImpl() {}
+
+private:
+ DISALLOW_EVIL_CONSTRUCTORS(SystemCallbackImpl);
+};
+
+ResourceManagerService::ResourceManagerService()
+ : ResourceManagerService(new ProcessInfo(), new SystemCallbackImpl()) {}
+
+ResourceManagerService::ResourceManagerService(
+ const sp<ProcessInfoInterface> &processInfo,
+ const sp<SystemCallbackInterface> &systemResource)
: mProcessInfo(processInfo),
+ mSystemCB(systemResource),
mServiceLog(new ServiceLog()),
mSupportsMultipleSecureCodecs(true),
mSupportsSecureWithNonSecureCodec(true),
mCpuBoostCount(0) {
- BatteryNotifier::getInstance().noteResetVideo();
+ mSystemCB->noteResetVideo();
}
ResourceManagerService::~ResourceManagerService() {}
-void ResourceManagerService::config(const Vector<MediaResourcePolicy> &policies) {
+Status ResourceManagerService::config(const std::vector<MediaResourcePolicyParcel>& policies) {
String8 log = String8::format("config(%s)", getString(policies).string());
mServiceLog->add(log);
Mutex::Autolock lock(mLock);
for (size_t i = 0; i < policies.size(); ++i) {
- String8 type = policies[i].mType;
- String8 value = policies[i].mValue;
- if (type == kPolicySupportsMultipleSecureCodecs) {
+ const std::string &type = policies[i].type;
+ const std::string &value = policies[i].value;
+ if (type == MediaResourcePolicy::kPolicySupportsMultipleSecureCodecs()) {
mSupportsMultipleSecureCodecs = (value == "true");
- } else if (type == kPolicySupportsSecureWithNonSecureCodec) {
+ } else if (type == MediaResourcePolicy::kPolicySupportsSecureWithNonSecureCodec()) {
mSupportsSecureWithNonSecureCodec = (value == "true");
}
}
+ return Status::ok();
}
void ResourceManagerService::onFirstAdded(
- const MediaResource& resource, const ResourceInfo& clientInfo) {
+ const MediaResourceParcel& resource, const ResourceInfo& clientInfo) {
// first time added
- if (resource.mType == MediaResource::kCpuBoost
- && resource.mSubType == MediaResource::kUnspecifiedSubType) {
+ if (resource.type == MediaResource::Type::kCpuBoost
+ && resource.subType == MediaResource::SubType::kUnspecifiedSubType) {
// Request it on every new instance of kCpuBoost, as the media.codec
// could have died, if we only do it the first time subsequent instances
// never gets the boost.
- if (requestCpusetBoost(true, this) != OK) {
+ if (mSystemCB->requestCpusetBoost(true, this) != OK) {
ALOGW("couldn't request cpuset boost");
}
mCpuBoostCount++;
- } else if (resource.mType == MediaResource::kBattery
- && resource.mSubType == MediaResource::kVideoCodec) {
- BatteryNotifier::getInstance().noteStartVideo(clientInfo.uid);
+ } else if (resource.type == MediaResource::Type::kBattery
+ && resource.subType == MediaResource::SubType::kVideoCodec) {
+ mSystemCB->noteStartVideo(clientInfo.uid);
}
}
void ResourceManagerService::onLastRemoved(
- const MediaResource& resource, const ResourceInfo& clientInfo) {
- if (resource.mType == MediaResource::kCpuBoost
- && resource.mSubType == MediaResource::kUnspecifiedSubType
+ const MediaResourceParcel& resource, const ResourceInfo& clientInfo) {
+ if (resource.type == MediaResource::Type::kCpuBoost
+ && resource.subType == MediaResource::SubType::kUnspecifiedSubType
&& mCpuBoostCount > 0) {
if (--mCpuBoostCount == 0) {
- requestCpusetBoost(false, this);
+ mSystemCB->requestCpusetBoost(false, this);
}
- } else if (resource.mType == MediaResource::kBattery
- && resource.mSubType == MediaResource::kVideoCodec) {
- BatteryNotifier::getInstance().noteStopVideo(clientInfo.uid);
+ } else if (resource.type == MediaResource::Type::kBattery
+ && resource.subType == MediaResource::SubType::kVideoCodec) {
+ mSystemCB->noteStopVideo(clientInfo.uid);
}
}
-void ResourceManagerService::addResource(
- int pid,
- int uid,
+void ResourceManagerService::mergeResources(
+ MediaResourceParcel& r1, const MediaResourceParcel& r2) {
+ // The resource entry on record is maintained to be in [0,INT64_MAX].
+ // Clamp if merging in the new resource value causes it to go out of bound.
+ // Note that the new resource value could be negative, eg.DrmSession, the
+ // value goes lower when the session is used more often. During reclaim
+ // the session with the highest value (lowest usage) would be closed.
+ if (r2.value < INT64_MAX - r1.value) {
+ r1.value += r2.value;
+ if (r1.value < 0) {
+ r1.value = 0;
+ }
+ } else {
+ r1.value = INT64_MAX;
+ }
+}
+
+Status ResourceManagerService::addResource(
+ int32_t pid,
+ int32_t uid,
int64_t clientId,
- const sp<IResourceManagerClient> client,
- const Vector<MediaResource> &resources) {
+ const sp<IResourceManagerClient>& client,
+ const std::vector<MediaResourceParcel>& resources) {
String8 log = String8::format("addResource(pid %d, clientId %lld, resources %s)",
pid, (long long) clientId, getString(resources).string());
mServiceLog->add(log);
@@ -275,29 +325,43 @@
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
ALOGE("Rejected addResource call with invalid pid.");
- return;
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
for (size_t i = 0; i < resources.size(); ++i) {
- const auto resType = std::make_pair(resources[i].mType, resources[i].mSubType);
+ const auto &res = resources[i];
+ const auto resType = std::tuple(res.type, res.subType, res.id);
+
+ if (res.value < 0 && res.type != MediaResource::Type::kDrmSession) {
+ ALOGW("Ignoring request to remove negative value of non-drm resource");
+ continue;
+ }
if (info.resources.find(resType) == info.resources.end()) {
- onFirstAdded(resources[i], info);
- info.resources[resType] = resources[i];
+ if (res.value <= 0) {
+ // We can't init a new entry with negative value, although it's allowed
+ // to merge in negative values after the initial add.
+ ALOGW("Ignoring request to add new resource entry with value <= 0");
+ continue;
+ }
+ onFirstAdded(res, info);
+ info.resources[resType] = res;
} else {
- info.resources[resType].mValue += resources[i].mValue;
+ mergeResources(info.resources[resType], res);
}
}
- if (info.deathNotifier == nullptr) {
+ if (info.deathNotifier == nullptr && client != nullptr) {
info.deathNotifier = new DeathNotifier(this, pid, clientId);
IInterface::asBinder(client)->linkToDeath(info.deathNotifier);
}
notifyResourceGranted(pid, resources);
+ return Status::ok();
}
-void ResourceManagerService::removeResource(int pid, int64_t clientId,
- const Vector<MediaResource> &resources) {
+Status ResourceManagerService::removeResource(
+ int32_t pid, int64_t clientId,
+ const std::vector<MediaResourceParcel>& resources) {
String8 log = String8::format("removeResource(pid %d, clientId %lld, resources %s)",
pid, (long long) clientId, getString(resources).string());
mServiceLog->add(log);
@@ -305,43 +369,51 @@
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(pid)) {
ALOGE("Rejected removeResource call with invalid pid.");
- return;
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
- return;
+ return Status::ok();
}
ResourceInfos &infos = mMap.editValueAt(index);
index = infos.indexOfKey(clientId);
if (index < 0) {
ALOGV("removeResource: didn't find clientId %lld", (long long) clientId);
- return;
+ return Status::ok();
}
ResourceInfo &info = infos.editValueAt(index);
for (size_t i = 0; i < resources.size(); ++i) {
- const auto resType = std::make_pair(resources[i].mType, resources[i].mSubType);
+ const auto &res = resources[i];
+ const auto resType = std::tuple(res.type, res.subType, res.id);
+
+ if (res.value < 0) {
+ ALOGW("Ignoring request to remove negative value of resource");
+ continue;
+ }
// ignore if we don't have it
if (info.resources.find(resType) != info.resources.end()) {
- MediaResource &resource = info.resources[resType];
- if (resource.mValue > resources[i].mValue) {
- resource.mValue -= resources[i].mValue;
+ MediaResourceParcel &resource = info.resources[resType];
+ if (resource.value > res.value) {
+ resource.value -= res.value;
} else {
- onLastRemoved(resources[i], info);
+ onLastRemoved(res, info);
info.resources.erase(resType);
}
}
}
+ return Status::ok();
}
-void ResourceManagerService::removeClient(int pid, int64_t clientId) {
+Status ResourceManagerService::removeClient(int32_t pid, int64_t clientId) {
removeResource(pid, clientId, true);
+ return Status::ok();
}
-void ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) {
+Status ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) {
String8 log = String8::format(
"removeResource(pid %d, clientId %lld)",
pid, (long long) clientId);
@@ -350,19 +422,19 @@
Mutex::Autolock lock(mLock);
if (checkValid && !mProcessInfo->isValidPid(pid)) {
ALOGE("Rejected removeResource call with invalid pid.");
- return;
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
- return;
+ return Status::ok();
}
ResourceInfos &infos = mMap.editValueAt(index);
index = infos.indexOfKey(clientId);
if (index < 0) {
ALOGV("removeResource: didn't find clientId %lld", (long long) clientId);
- return;
+ return Status::ok();
}
const ResourceInfo &info = infos[index];
@@ -373,66 +445,79 @@
IInterface::asBinder(info.client)->unlinkToDeath(info.deathNotifier);
infos.removeItemsAt(index);
+ return Status::ok();
}
void ResourceManagerService::getClientForResource_l(
- int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients) {
+ int callingPid, const MediaResourceParcel *res, Vector<sp<IResourceManagerClient>> *clients) {
if (res == NULL) {
return;
}
sp<IResourceManagerClient> client;
- if (getLowestPriorityBiggestClient_l(callingPid, res->mType, &client)) {
+ if (getLowestPriorityBiggestClient_l(callingPid, res->type, &client)) {
clients->push_back(client);
}
}
-bool ResourceManagerService::reclaimResource(
- int callingPid, const Vector<MediaResource> &resources) {
+Status ResourceManagerService::reclaimResource(
+ int32_t callingPid,
+ const std::vector<MediaResourceParcel>& resources,
+ bool* _aidl_return) {
String8 log = String8::format("reclaimResource(callingPid %d, resources %s)",
callingPid, getString(resources).string());
mServiceLog->add(log);
+ *_aidl_return = false;
Vector<sp<IResourceManagerClient>> clients;
{
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isValidPid(callingPid)) {
ALOGE("Rejected reclaimResource call with invalid callingPid.");
- return false;
+ return Status::fromServiceSpecificError(BAD_VALUE);
}
- const MediaResource *secureCodec = NULL;
- const MediaResource *nonSecureCodec = NULL;
- const MediaResource *graphicMemory = NULL;
+ const MediaResourceParcel *secureCodec = NULL;
+ const MediaResourceParcel *nonSecureCodec = NULL;
+ const MediaResourceParcel *graphicMemory = NULL;
+ const MediaResourceParcel *drmSession = NULL;
for (size_t i = 0; i < resources.size(); ++i) {
- MediaResource::Type type = resources[i].mType;
- if (resources[i].mType == MediaResource::kSecureCodec) {
+ MediaResource::Type type = resources[i].type;
+ if (resources[i].type == MediaResource::Type::kSecureCodec) {
secureCodec = &resources[i];
- } else if (type == MediaResource::kNonSecureCodec) {
+ } else if (type == MediaResource::Type::kNonSecureCodec) {
nonSecureCodec = &resources[i];
- } else if (type == MediaResource::kGraphicMemory) {
+ } else if (type == MediaResource::Type::kGraphicMemory) {
graphicMemory = &resources[i];
+ } else if (type == MediaResource::Type::kDrmSession) {
+ drmSession = &resources[i];
}
}
// first pass to handle secure/non-secure codec conflict
if (secureCodec != NULL) {
if (!mSupportsMultipleSecureCodecs) {
- if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
- return false;
+ if (!getAllClients_l(callingPid, MediaResource::Type::kSecureCodec, &clients)) {
+ return Status::ok();
}
}
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, MediaResource::kNonSecureCodec, &clients)) {
- return false;
+ if (!getAllClients_l(callingPid, MediaResource::Type::kNonSecureCodec, &clients)) {
+ return Status::ok();
}
}
}
if (nonSecureCodec != NULL) {
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
- return false;
+ if (!getAllClients_l(callingPid, MediaResource::Type::kSecureCodec, &clients)) {
+ return Status::ok();
}
}
}
+ if (drmSession != NULL) {
+ getClientForResource_l(callingPid, drmSession, &clients);
+ if (clients.size() == 0) {
+ return Status::ok();
+ }
+ }
if (clients.size() == 0) {
// if no secure/non-secure codec conflict, run second pass to handle other resources.
@@ -448,32 +533,35 @@
if (clients.size() == 0) {
// if we are here, run the fourth pass to free one codec with the different type.
if (secureCodec != NULL) {
- MediaResource temp(MediaResource::kNonSecureCodec, 1);
+ MediaResource temp(MediaResource::Type::kNonSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
if (nonSecureCodec != NULL) {
- MediaResource temp(MediaResource::kSecureCodec, 1);
+ MediaResource temp(MediaResource::Type::kSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
}
}
if (clients.size() == 0) {
- return false;
+ return Status::ok();
}
sp<IResourceManagerClient> failedClient;
for (size_t i = 0; i < clients.size(); ++i) {
log = String8::format("reclaimResource from client %p", clients[i].get());
mServiceLog->add(log);
- if (!clients[i]->reclaimResource()) {
+ bool success;
+ Status status = clients[i]->reclaimResource(&success);
+ if (!status.isOk() || !success) {
failedClient = clients[i];
break;
}
}
if (failedClient == NULL) {
- return true;
+ *_aidl_return = true;
+ return Status::ok();
}
{
@@ -498,7 +586,7 @@
}
}
- return false;
+ return Status::ok();
}
bool ResourceManagerService::getAllClients_l(
@@ -613,10 +701,10 @@
for (size_t i = 0; i < infos.size(); ++i) {
const ResourceList &resources = infos[i].resources;
for (auto it = resources.begin(); it != resources.end(); it++) {
- const MediaResource &resource = it->second;
- if (resource.mType == type) {
- if (resource.mValue > largestValue) {
- largestValue = resource.mValue;
+ const MediaResourceParcel &resource = it->second;
+ if (resource.type == type) {
+ if (resource.value > largestValue) {
+ largestValue = resource.value;
clientTemp = infos[i].client;
}
}
@@ -632,4 +720,5 @@
return true;
}
+} // namespace media
} // namespace android
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index b9147ff..b5b9f86 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -15,25 +15,32 @@
** limitations under the License.
*/
-#ifndef ANDROID_RESOURCEMANAGERSERVICE_H
-#define ANDROID_RESOURCEMANAGERSERVICE_H
+#ifndef ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
+#define ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
+#include <android/media/BnResourceManagerService.h>
#include <arpa/inet.h>
#include <binder/BinderService.h>
+#include <media/MediaResource.h>
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <utils/String8.h>
#include <utils/threads.h>
#include <utils/Vector.h>
-#include <media/IResourceManagerService.h>
-
namespace android {
class ServiceLog;
struct ProcessInfoInterface;
-typedef std::map<std::pair<MediaResource::Type, MediaResource::SubType>, MediaResource> ResourceList;
+namespace media {
+
+using android::binder::Status;
+
+typedef std::map<std::tuple<
+ MediaResource::Type, MediaResource::SubType, std::vector<uint8_t>>,
+ MediaResourceParcel> ResourceList;
+
struct ResourceInfo {
int64_t clientId;
uid_t uid;
@@ -51,34 +58,49 @@
public BnResourceManagerService
{
public:
+ struct SystemCallbackInterface : public RefBase {
+ virtual void noteStartVideo(int uid) = 0;
+ virtual void noteStopVideo(int uid) = 0;
+ virtual void noteResetVideo() = 0;
+ virtual bool requestCpusetBoost(
+ bool enable, const sp<IInterface> &client) = 0;
+ };
+
static char const *getServiceName() { return "media.resource_manager"; }
virtual status_t dump(int fd, const Vector<String16>& args);
ResourceManagerService();
- explicit ResourceManagerService(sp<ProcessInfoInterface> processInfo);
+ explicit ResourceManagerService(
+ const sp<ProcessInfoInterface> &processInfo,
+ const sp<SystemCallbackInterface> &systemResource);
// IResourceManagerService interface
- virtual void config(const Vector<MediaResourcePolicy> &policies);
+ Status config(const std::vector<MediaResourcePolicyParcel>& policies) override;
- virtual void addResource(
- int pid,
- int uid,
+ Status addResource(
+ int32_t pid,
+ int32_t uid,
int64_t clientId,
- const sp<IResourceManagerClient> client,
- const Vector<MediaResource> &resources);
+ const sp<IResourceManagerClient>& client,
+ const std::vector<MediaResourceParcel>& resources) override;
- virtual void removeResource(int pid, int64_t clientId,
- const Vector<MediaResource> &resources);
+ Status removeResource(
+ int32_t pid,
+ int64_t clientId,
+ const std::vector<MediaResourceParcel>& resources) override;
- virtual void removeClient(int pid, int64_t clientId);
+ Status removeClient(int32_t pid, int64_t clientId) override;
// Tries to reclaim resource from processes with lower priority than the calling process
// according to the requested resources.
// Returns true if any resource has been reclaimed, otherwise returns false.
- virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+ Status reclaimResource(
+ int32_t callingPid,
+ const std::vector<MediaResourceParcel>& resources,
+ bool* _aidl_return) override;
- void removeResource(int pid, int64_t clientId, bool checkValid);
+ Status removeResource(int pid, int64_t clientId, bool checkValid);
protected:
virtual ~ResourceManagerService();
@@ -110,14 +132,18 @@
// A helper function basically calls getLowestPriorityBiggestClient_l and add the result client
// to the given Vector.
- void getClientForResource_l(
- int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients);
+ void getClientForResource_l(int callingPid,
+ const MediaResourceParcel *res, Vector<sp<IResourceManagerClient>> *clients);
- void onFirstAdded(const MediaResource& res, const ResourceInfo& clientInfo);
- void onLastRemoved(const MediaResource& res, const ResourceInfo& clientInfo);
+ void onFirstAdded(const MediaResourceParcel& res, const ResourceInfo& clientInfo);
+ void onLastRemoved(const MediaResourceParcel& res, const ResourceInfo& clientInfo);
+
+ // Merge r2 into r1
+ void mergeResources(MediaResourceParcel& r1, const MediaResourceParcel& r2);
mutable Mutex mLock;
sp<ProcessInfoInterface> mProcessInfo;
+ sp<SystemCallbackInterface> mSystemCB;
sp<ServiceLog> mServiceLog;
PidResourceInfosMap mMap;
bool mSupportsMultipleSecureCodecs;
@@ -126,7 +152,7 @@
};
// ----------------------------------------------------------------------------
+} // namespace media
+} // namespace android
-}; // namespace android
-
-#endif // ANDROID_RESOURCEMANAGERSERVICE_H
+#endif // ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index be592f5..203baf5 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -21,13 +21,16 @@
#include <gtest/gtest.h>
#include "ResourceManagerService.h"
-#include <media/IResourceManagerService.h>
+#include <android/media/BnResourceManagerClient.h>
#include <media/MediaResource.h>
#include <media/MediaResourcePolicy.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/ProcessInfoInterface.h>
namespace android {
+namespace media {
+
+using ::android::binder::Status;
static int64_t getId(const sp<IResourceManagerClient>& client) {
return (int64_t) client.get();
@@ -52,19 +55,77 @@
DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
};
+struct TestSystemCallback :
+ public ResourceManagerService::SystemCallbackInterface {
+ TestSystemCallback() :
+ mLastEvent({EventType::INVALID, 0}), mEventCount(0) {}
+
+ enum EventType {
+ INVALID = -1,
+ VIDEO_ON = 0,
+ VIDEO_OFF = 1,
+ VIDEO_RESET = 2,
+ CPUSET_ENABLE = 3,
+ CPUSET_DISABLE = 4,
+ };
+
+ struct EventEntry {
+ EventType type;
+ int arg;
+ };
+
+ virtual void noteStartVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_ON, uid};
+ mEventCount++;
+ }
+
+ virtual void noteStopVideo(int uid) override {
+ mLastEvent = {EventType::VIDEO_OFF, uid};
+ mEventCount++;
+ }
+
+ virtual void noteResetVideo() override {
+ mLastEvent = {EventType::VIDEO_RESET, 0};
+ mEventCount++;
+ }
+
+ virtual bool requestCpusetBoost(
+ bool enable, const sp<IInterface> &/*client*/) override {
+ mLastEvent = {enable ? EventType::CPUSET_ENABLE : EventType::CPUSET_DISABLE, 0};
+ mEventCount++;
+ return true;
+ }
+
+ size_t eventCount() { return mEventCount; }
+ EventType lastEventType() { return mLastEvent.type; }
+ EventEntry lastEvent() { return mLastEvent; }
+
+protected:
+ virtual ~TestSystemCallback() {}
+
+private:
+ EventEntry mLastEvent;
+ size_t mEventCount;
+
+ DISALLOW_EVIL_CONSTRUCTORS(TestSystemCallback);
+};
+
+
struct TestClient : public BnResourceManagerClient {
TestClient(int pid, sp<ResourceManagerService> service)
: mReclaimed(false), mPid(pid), mService(service) {}
- virtual bool reclaimResource() {
+ Status reclaimResource(bool* _aidl_return) override {
sp<IResourceManagerClient> client(this);
mService->removeClient(mPid, (int64_t) client.get());
mReclaimed = true;
- return true;
+ *_aidl_return = true;
+ return Status::ok();
}
- virtual String8 getName() {
- return String8("test_client");
+ Status getName(::std::string* _aidl_return) override {
+ *_aidl_return = "test_client";
+ return Status::ok();
}
bool reclaimed() const {
@@ -95,23 +156,37 @@
static const int kMidPriorityPid = 25;
static const int kHighPriorityPid = 10;
+using EventType = TestSystemCallback::EventType;
+using EventEntry = TestSystemCallback::EventEntry;
+bool operator== (const EventEntry& lhs, const EventEntry& rhs) {
+ return lhs.type == rhs.type && lhs.arg == rhs.arg;
+}
+
+#define CHECK_STATUS_TRUE(condition) \
+ EXPECT_TRUE((condition).isOk() && (result))
+
+#define CHECK_STATUS_FALSE(condition) \
+ EXPECT_TRUE((condition).isOk() && !(result))
+
class ResourceManagerServiceTest : public ::testing::Test {
public:
ResourceManagerServiceTest()
- : mService(new ResourceManagerService(new TestProcessInfo)),
+ : mSystemCB(new TestSystemCallback()),
+ mService(new ResourceManagerService(new TestProcessInfo, mSystemCB)),
mTestClient1(new TestClient(kTestPid1, mService)),
mTestClient2(new TestClient(kTestPid2, mService)),
mTestClient3(new TestClient(kTestPid2, mService)) {
}
protected:
- static bool isEqualResources(const Vector<MediaResource> &resources1,
+ static bool isEqualResources(const std::vector<MediaResourceParcel> &resources1,
const ResourceList &resources2) {
// convert resource1 to ResourceList
ResourceList r1;
for (size_t i = 0; i < resources1.size(); ++i) {
- const auto resType = std::make_pair(resources1[i].mType, resources1[i].mSubType);
- r1[resType] = resources1[i];
+ const auto &res = resources1[i];
+ const auto resType = std::tuple(res.type, res.subType, res.id);
+ r1[resType] = res;
}
return r1 == resources2;
}
@@ -119,7 +194,7 @@
static void expectEqResourceInfo(const ResourceInfo &info,
int uid,
sp<IResourceManagerClient> client,
- const Vector<MediaResource> &resources) {
+ const std::vector<MediaResourceParcel> &resources) {
EXPECT_EQ(uid, info.uid);
EXPECT_EQ(client, info.client);
EXPECT_TRUE(isEqualResources(resources, info.resources));
@@ -155,25 +230,25 @@
// ---------------------------------------------------------------------------------
void addResource() {
// kTestPid1 mTestClient1
- Vector<MediaResource> resources1;
- resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
- resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
- Vector<MediaResource> resources11;
- resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
+ resources1.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
+ std::vector<MediaResourceParcel> resources11;
+ resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
// kTestPid2 mTestClient2
- Vector<MediaResource> resources2;
- resources2.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
- resources2.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
+ std::vector<MediaResourceParcel> resources2;
+ resources2.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
+ resources2.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 300));
mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
// kTestPid2 mTestClient3
- Vector<MediaResource> resources3;
+ std::vector<MediaResourceParcel> resources3;
mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
- resources3.push_back(MediaResource(MediaResource::kSecureCodec, 1));
- resources3.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
+ resources3.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
+ resources3.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 100));
mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
const PidResourceInfosMap &map = mService->mMap;
@@ -192,32 +267,92 @@
expectEqResourceInfo(infos2.valueFor(getId(mTestClient3)), kTestUid2, mTestClient3, resources3);
}
+ void testCombineResourceWithNegativeValues() {
+ // kTestPid1 mTestClient1
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, -100));
+ resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, -100));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+ // Expected result:
+ // 1) the client should have been added;
+ // 2) both resource entries should have been rejected, resource list should be empty.
+ const PidResourceInfosMap &map = mService->mMap;
+ EXPECT_EQ(1u, map.size());
+ ssize_t index1 = map.indexOfKey(kTestPid1);
+ ASSERT_GE(index1, 0);
+ const ResourceInfos &infos1 = map[index1];
+ EXPECT_EQ(1u, infos1.size());
+ std::vector<MediaResourceParcel> expected;
+ expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+ resources1.clear();
+ resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MAX));
+ resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MAX));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ resources1.clear();
+ resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, 10));
+ resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 10));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+ // Expected result:
+ // Both values should saturate to INT64_MAX
+ expected.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MAX));
+ expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MAX));
+ expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+ resources1.clear();
+ resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, -10));
+ resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, -10));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+ // Expected result:
+ // 1) DrmSession resource should allow negative value addition, and value should drop accordingly
+ // 2) Non-drm session resource should ignore negative value addition.
+ expected.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MAX - 10));
+ expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MAX));
+ expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+ resources1.clear();
+ resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MIN));
+ expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MIN));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+
+ // Expected result:
+ // 1) DrmSession resource value should drop to 0, but the entry shouldn't be removed.
+ // 2) Non-drm session resource should ignore negative value addition.
+ expected.clear();
+ expected.push_back(MediaResource(MediaResource::Type::kDrmSession, 0));
+ expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MAX));
+ expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+ }
+
void testConfig() {
EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
- Vector<MediaResourcePolicy> policies1;
+ std::vector<MediaResourcePolicyParcel> policies1;
policies1.push_back(
MediaResourcePolicy(
- String8(kPolicySupportsMultipleSecureCodecs),
- String8("true")));
+ IResourceManagerService::kPolicySupportsMultipleSecureCodecs(),
+ "true"));
policies1.push_back(
MediaResourcePolicy(
- String8(kPolicySupportsSecureWithNonSecureCodec),
- String8("false")));
+ IResourceManagerService::kPolicySupportsSecureWithNonSecureCodec(),
+ "false"));
mService->config(policies1);
EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
EXPECT_FALSE(mService->mSupportsSecureWithNonSecureCodec);
- Vector<MediaResourcePolicy> policies2;
+ std::vector<MediaResourcePolicyParcel> policies2;
policies2.push_back(
MediaResourcePolicy(
- String8(kPolicySupportsMultipleSecureCodecs),
- String8("false")));
+ IResourceManagerService::kPolicySupportsMultipleSecureCodecs(),
+ "false"));
policies2.push_back(
MediaResourcePolicy(
- String8(kPolicySupportsSecureWithNonSecureCodec),
- String8("true")));
+ IResourceManagerService::kPolicySupportsSecureWithNonSecureCodec(),
+ "true"));
mService->config(policies2);
EXPECT_FALSE(mService->mSupportsMultipleSecureCodecs);
EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
@@ -225,12 +360,12 @@
void testCombineResource() {
// kTestPid1 mTestClient1
- Vector<MediaResource> resources1;
- resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
- Vector<MediaResource> resources11;
- resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
+ std::vector<MediaResourceParcel> resources11;
+ resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
const PidResourceInfosMap &map = mService->mMap;
@@ -241,35 +376,35 @@
EXPECT_EQ(1u, infos1.size());
// test adding existing types to combine values
- resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
+ resources1.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 100));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
- Vector<MediaResource> expected;
- expected.push_back(MediaResource(MediaResource::kSecureCodec, 2));
- expected.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
+ std::vector<MediaResourceParcel> expected;
+ expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 2));
+ expected.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 300));
expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
// test adding new types (including types that differs only in subType)
- resources11.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
- resources11.push_back(MediaResource(MediaResource::kSecureCodec, MediaResource::kVideoCodec, 1));
+ resources11.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
+ resources11.push_back(MediaResource(MediaResource::Type::kSecureCodec, MediaResource::SubType::kVideoCodec, 1));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
expected.clear();
- expected.push_back(MediaResource(MediaResource::kSecureCodec, 2));
- expected.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
- expected.push_back(MediaResource(MediaResource::kSecureCodec, MediaResource::kVideoCodec, 1));
- expected.push_back(MediaResource(MediaResource::kGraphicMemory, 500));
+ expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 2));
+ expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
+ expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, MediaResource::SubType::kVideoCodec, 1));
+ expected.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 500));
expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
}
void testRemoveResource() {
// kTestPid1 mTestClient1
- Vector<MediaResource> resources1;
- resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
- Vector<MediaResource> resources11;
- resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
+ std::vector<MediaResourceParcel> resources11;
+ resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
const PidResourceInfosMap &map = mService->mMap;
@@ -280,20 +415,26 @@
EXPECT_EQ(1u, infos1.size());
// test partial removal
- resources11.editItemAt(0).mValue = 100;
+ resources11[0].value = 100;
mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
- Vector<MediaResource> expected;
- expected.push_back(MediaResource(MediaResource::kSecureCodec, 1));
- expected.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
+ std::vector<MediaResourceParcel> expected;
+ expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
+ expected.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 100));
+ expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
+
+ // test removal request with negative value, should be ignored
+ resources11[0].value = -10000;
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+
expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
// test complete removal with overshoot value
- resources11.editItemAt(0).mValue = 1000;
+ resources11[0].value = 1000;
mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
expected.clear();
- expected.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
}
@@ -316,7 +457,7 @@
void testGetAllClients() {
addResource();
- MediaResource::Type type = MediaResource::kSecureCodec;
+ MediaResource::Type type = MediaResource::Type::kSecureCodec;
Vector<sp<IResourceManagerClient> > clients;
EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, &clients));
// some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
@@ -331,9 +472,10 @@
}
void testReclaimResourceSecure() {
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
- resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
+ bool result;
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 150));
// ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
{
@@ -342,19 +484,19 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
- EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
// reclaim all secure codecs
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(true /* c1 */, false /* c2 */, true /* c3 */);
// call again should reclaim one largest graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
// ### secure codecs can't coexist and secure codec can't coexist with non-secure codec ###
@@ -364,15 +506,15 @@
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
- EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
// reclaim all secure and non-secure codecs
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(true /* c1 */, true /* c2 */, true /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
@@ -383,23 +525,23 @@
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
- EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
// reclaim all non-secure codecs
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// call again should reclaim one largest graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
// call again should reclaim another largest graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
// ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
@@ -409,22 +551,22 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
// one largest graphic memory from lowest process got reclaimed
verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
// call again should reclaim another graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// call again should reclaim another graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
// ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
@@ -433,27 +575,28 @@
mService->mSupportsMultipleSecureCodecs = true;
mService->mSupportsSecureWithNonSecureCodec = true;
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
// secure codec from lowest process got reclaimed
verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
// call again should reclaim another secure codec from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
// no more secure codec, non-secure codec will be reclaimed.
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
}
}
void testReclaimResourceNonSecure() {
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
- resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
+ bool result;
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 150));
// ### secure codec can't coexist with non-secure codec ###
{
@@ -461,19 +604,19 @@
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
- EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
// reclaim all secure codecs
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(true /* c1 */, false /* c2 */, true /* c3 */);
// call again should reclaim one graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
@@ -483,22 +626,22 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
// one largest graphic memory from lowest process got reclaimed
verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
// call again should reclaim another graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// call again should reclaim another graphic memory from lowest process
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
// nothing left
- EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
}
// ### secure codec can coexist with non-secure codec ###
@@ -506,15 +649,15 @@
addResource();
mService->mSupportsSecureWithNonSecureCodec = true;
- Vector<MediaResource> resources;
- resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+ std::vector<MediaResourceParcel> resources;
+ resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
// one non secure codec from lowest process got reclaimed
verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
// no more non-secure codec, secure codec from lowest priority process will be reclaimed
- EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+ CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
// clean up client 3 which still left
@@ -523,7 +666,7 @@
}
void testGetLowestPriorityBiggestClient() {
- MediaResource::Type type = MediaResource::kGraphicMemory;
+ MediaResource::Type type = MediaResource::Type::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
@@ -532,8 +675,8 @@
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, &client));
EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
- // kTestPid1 is the lowest priority process with MediaResource::kGraphicMemory.
- // mTestClient1 has the largest MediaResource::kGraphicMemory within kTestPid1.
+ // kTestPid1 is the lowest priority process with MediaResource::Type::kGraphicMemory.
+ // mTestClient1 has the largest MediaResource::Type::kGraphicMemory within kTestPid1.
EXPECT_EQ(mTestClient1, client);
}
@@ -542,7 +685,7 @@
int priority;
TestProcessInfo processInfo;
- MediaResource::Type type = MediaResource::kGraphicMemory;
+ MediaResource::Type type = MediaResource::Type::kGraphicMemory;
EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
addResource();
@@ -553,7 +696,7 @@
processInfo.getPriority(kTestPid1, &priority1);
EXPECT_EQ(priority1, priority);
- type = MediaResource::kNonSecureCodec;
+ type = MediaResource::Type::kNonSecureCodec;
EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
EXPECT_EQ(kTestPid2, pid);
int priority2;
@@ -562,7 +705,7 @@
}
void testGetBiggestClient() {
- MediaResource::Type type = MediaResource::kGraphicMemory;
+ MediaResource::Type type = MediaResource::Type::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));
@@ -578,6 +721,84 @@
EXPECT_TRUE(mService->isCallingPriorityHigher_l(99, 100));
}
+ void testBatteryStats() {
+ // reset should always be called when ResourceManagerService is created (restarted)
+ EXPECT_EQ(1u, mSystemCB->eventCount());
+ EXPECT_EQ(EventType::VIDEO_RESET, mSystemCB->lastEventType());
+
+ // new client request should cause VIDEO_ON
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kBattery, MediaResource::SubType::kVideoCodec, 1));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ EXPECT_EQ(2u, mSystemCB->eventCount());
+ EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid1}), mSystemCB->lastEvent());
+
+ // each client should only cause 1 VIDEO_ON
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ EXPECT_EQ(2u, mSystemCB->eventCount());
+
+ // new client request should cause VIDEO_ON
+ std::vector<MediaResourceParcel> resources2;
+ resources2.push_back(MediaResource(MediaResource::Type::kBattery, MediaResource::SubType::kVideoCodec, 2));
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+ EXPECT_EQ(3u, mSystemCB->eventCount());
+ EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid2}), mSystemCB->lastEvent());
+
+ // partially remove mTestClient1's request, shouldn't be any VIDEO_OFF
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+ EXPECT_EQ(3u, mSystemCB->eventCount());
+
+ // remove mTestClient1's request, should be VIDEO_OFF for kTestUid1
+ // (use resource2 to test removing more instances than previously requested)
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+ EXPECT_EQ(4u, mSystemCB->eventCount());
+ EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid1}), mSystemCB->lastEvent());
+
+ // remove mTestClient2, should be VIDEO_OFF for kTestUid2
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(5u, mSystemCB->eventCount());
+ EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid2}), mSystemCB->lastEvent());
+ }
+
+ void testCpusetBoost() {
+ // reset should always be called when ResourceManagerService is created (restarted)
+ EXPECT_EQ(1u, mSystemCB->eventCount());
+ EXPECT_EQ(EventType::VIDEO_RESET, mSystemCB->lastEventType());
+
+ // new client request should cause CPUSET_ENABLE
+ std::vector<MediaResourceParcel> resources1;
+ resources1.push_back(MediaResource(MediaResource::Type::kCpuBoost, 1));
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ EXPECT_EQ(2u, mSystemCB->eventCount());
+ EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
+
+ // each client should only cause 1 CPUSET_ENABLE
+ mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ EXPECT_EQ(2u, mSystemCB->eventCount());
+
+ // new client request should cause CPUSET_ENABLE
+ std::vector<MediaResourceParcel> resources2;
+ resources2.push_back(MediaResource(MediaResource::Type::kCpuBoost, 2));
+ mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+ EXPECT_EQ(3u, mSystemCB->eventCount());
+ EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
+
+ // remove mTestClient2 should not cause CPUSET_DISABLE, mTestClient1 still active
+ mService->removeClient(kTestPid2, getId(mTestClient2));
+ EXPECT_EQ(3u, mSystemCB->eventCount());
+
+ // remove 1 cpuboost from mTestClient1, should not be CPUSET_DISABLE (still 1 left)
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+ EXPECT_EQ(3u, mSystemCB->eventCount());
+
+ // remove 2 cpuboost from mTestClient1, should be CPUSET_DISABLE
+ // (use resource2 to test removing more than previously requested)
+ mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+ EXPECT_EQ(4u, mSystemCB->eventCount());
+ EXPECT_EQ(EventType::CPUSET_DISABLE, mSystemCB->lastEventType());
+ }
+
+ sp<TestSystemCallback> mSystemCB;
sp<ResourceManagerService> mService;
sp<IResourceManagerClient> mTestClient1;
sp<IResourceManagerClient> mTestClient2;
@@ -596,6 +817,10 @@
testCombineResource();
}
+TEST_F(ResourceManagerServiceTest, combineResourceNegative) {
+ testCombineResourceWithNegativeValues();
+}
+
TEST_F(ResourceManagerServiceTest, removeResource) {
testRemoveResource();
}
@@ -629,4 +854,13 @@
testIsCallingPriorityHigher();
}
+TEST_F(ResourceManagerServiceTest, testBatteryStats) {
+ testBatteryStats();
+}
+
+TEST_F(ResourceManagerServiceTest, testCpusetBoost) {
+ testCpusetBoost();
+}
+
+} // namespace media
} // namespace android
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 1b7a20c..ca1354d 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -46,6 +46,7 @@
"libaaudio_internal",
"libaudioclient",
"libaudioflinger",
+ "libaudioutils",
"libbase",
"libbinder",
"libcutils",
diff --git a/services/soundtrigger/Android.bp b/services/soundtrigger/Android.bp
index 3f02f48..1bbd591 100644
--- a/services/soundtrigger/Android.bp
+++ b/services/soundtrigger/Android.bp
@@ -31,10 +31,8 @@
"libaudioutils",
"libmediautils",
- "libhwbinder",
"libhidlbase",
"libhidlmemory",
- "libhidltransport",
"libbase",
"libaudiohal",
"libaudiohal_deathhandler",
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 377d30b..ccbeb77 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -40,6 +40,32 @@
#define HW_MODULE_PREFIX "primary"
namespace android {
+namespace {
+
+// Given an IMemory, returns a copy of its content along with its size.
+// Returns nullptr on failure or if input is nullptr.
+std::pair<std::unique_ptr<uint8_t[]>,
+ size_t> CopyToArray(const sp<IMemory>& mem) {
+ if (mem == nullptr) {
+ return std::make_pair(nullptr, 0);
+ }
+
+ const size_t size = mem->size();
+ if (size == 0) {
+ return std::make_pair(nullptr, 0);
+ }
+
+ std::unique_ptr<uint8_t[]> ar = std::make_unique<uint8_t[]>(size);
+ if (ar == nullptr) {
+ return std::make_pair(nullptr, 0);
+ }
+
+ memcpy(ar.get(), mem->unsecurePointer(), size);
+ return std::make_pair(std::move(ar), size);
+}
+
+}
+
SoundTriggerHwService::SoundTriggerHwService()
: BnSoundTriggerHwService(),
mNextUniqueId(1),
@@ -234,11 +260,11 @@
size_t size = event->data_offset + event->data_size;
eventMemory = mMemoryDealer->allocate(size);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
eventMemory.clear();
return eventMemory;
}
- memcpy(eventMemory->pointer(), event, size);
+ memcpy(eventMemory->unsecurePointer(), event, size);
return eventMemory;
}
@@ -283,11 +309,11 @@
size_t size = event->data_offset + event->data_size;
eventMemory = mMemoryDealer->allocate(size);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
eventMemory.clear();
return eventMemory;
}
- memcpy(eventMemory->pointer(), event, size);
+ memcpy(eventMemory->unsecurePointer(), event, size);
return eventMemory;
}
@@ -313,11 +339,11 @@
size_t size = sizeof(sound_trigger_service_state_t);
eventMemory = mMemoryDealer->allocate(size);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
eventMemory.clear();
return eventMemory;
}
- *((sound_trigger_service_state_t *)eventMemory->pointer()) = state;
+ *((sound_trigger_service_state_t *)eventMemory->unsecurePointer()) = state;
return eventMemory;
}
@@ -557,8 +583,13 @@
return NO_INIT;
}
- struct sound_trigger_sound_model *sound_model =
- (struct sound_trigger_sound_model *)modelMemory->pointer();
+ auto immutableMemory = CopyToArray(modelMemory);
+ if (immutableMemory.first == nullptr) {
+ return NO_MEMORY;
+ }
+
+ struct sound_trigger_sound_model* sound_model =
+ (struct sound_trigger_sound_model*) immutableMemory.first.get();
size_t structSize;
if (sound_model->type == SOUND_MODEL_TYPE_KEYPHRASE) {
@@ -568,9 +599,10 @@
}
if (sound_model->data_offset < structSize ||
- sound_model->data_size > (UINT_MAX - sound_model->data_offset) ||
- modelMemory->size() < sound_model->data_offset ||
- sound_model->data_size > (modelMemory->size() - sound_model->data_offset)) {
+ sound_model->data_size > (UINT_MAX - sound_model->data_offset) ||
+ immutableMemory.second < sound_model->data_offset ||
+ sound_model->data_size >
+ (immutableMemory.second - sound_model->data_offset)) {
android_errorWriteLog(0x534e4554, "30148546");
ALOGE("loadSoundModel() data_size is too big");
return BAD_VALUE;
@@ -651,13 +683,19 @@
return NO_INIT;
}
- struct sound_trigger_recognition_config *config =
- (struct sound_trigger_recognition_config *)dataMemory->pointer();
+ auto immutableMemory = CopyToArray(dataMemory);
+ if (immutableMemory.first == nullptr) {
+ return NO_MEMORY;
+ }
+
+ struct sound_trigger_recognition_config* config =
+ (struct sound_trigger_recognition_config*) immutableMemory.first.get();
if (config->data_offset < sizeof(struct sound_trigger_recognition_config) ||
- config->data_size > (UINT_MAX - config->data_offset) ||
- dataMemory->size() < config->data_offset ||
- config->data_size > (dataMemory->size() - config->data_offset)) {
+ config->data_size > (UINT_MAX - config->data_offset) ||
+ immutableMemory.second < config->data_offset ||
+ config->data_size >
+ (immutableMemory.second - config->data_offset)) {
ALOGE("startRecognition() data_size is too big");
return BAD_VALUE;
}
@@ -734,9 +772,10 @@
{
ALOGV("onCallbackEvent type %d", event->mType);
+ // Memory is coming from a trusted process.
sp<IMemory> eventMemory = event->mMemory;
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
return;
}
if (mModuleClients.isEmpty()) {
@@ -749,7 +788,7 @@
switch (event->mType) {
case CallbackEvent::TYPE_RECOGNITION: {
struct sound_trigger_recognition_event *recognitionEvent =
- (struct sound_trigger_recognition_event *)eventMemory->pointer();
+ (struct sound_trigger_recognition_event *)eventMemory->unsecurePointer();
{
AutoMutex lock(mLock);
sp<Model> model = getModel(recognitionEvent->model);
@@ -769,7 +808,7 @@
} break;
case CallbackEvent::TYPE_SOUNDMODEL: {
struct sound_trigger_model_event *soundmodelEvent =
- (struct sound_trigger_model_event *)eventMemory->pointer();
+ (struct sound_trigger_model_event *)eventMemory->unsecurePointer();
{
AutoMutex lock(mLock);
sp<Model> model = getModel(soundmodelEvent->model);
@@ -1082,7 +1121,8 @@
sp<IMemory> eventMemory = event->mMemory;
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ // Memory is coming from a trusted process.
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
return;
}
diff --git a/soundtrigger/SoundTrigger.cpp b/soundtrigger/SoundTrigger.cpp
index 9708ea7..e297ee7 100644
--- a/soundtrigger/SoundTrigger.cpp
+++ b/soundtrigger/SoundTrigger.cpp
@@ -204,39 +204,42 @@
void SoundTrigger::onRecognitionEvent(const sp<IMemory>& eventMemory)
{
Mutex::Autolock _l(mLock);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
return;
}
if (mCallback != 0) {
+ // Memory is coming from a trusted process.
mCallback->onRecognitionEvent(
- (struct sound_trigger_recognition_event *)eventMemory->pointer());
+ (struct sound_trigger_recognition_event *)eventMemory->unsecurePointer());
}
}
void SoundTrigger::onSoundModelEvent(const sp<IMemory>& eventMemory)
{
Mutex::Autolock _l(mLock);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
return;
}
if (mCallback != 0) {
+ // Memory is coming from a trusted process.
mCallback->onSoundModelEvent(
- (struct sound_trigger_model_event *)eventMemory->pointer());
+ (struct sound_trigger_model_event *)eventMemory->unsecurePointer());
}
}
void SoundTrigger::onServiceStateChange(const sp<IMemory>& eventMemory)
{
Mutex::Autolock _l(mLock);
- if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+ if (eventMemory == 0 || eventMemory->unsecurePointer() == NULL) {
return;
}
if (mCallback != 0) {
+ // Memory is coming from a trusted process.
mCallback->onServiceStateChange(
- *((sound_trigger_service_state_t *)eventMemory->pointer()));
+ *((sound_trigger_service_state_t *)eventMemory->unsecurePointer()));
}
}